From b3e44f0bdf4caff4107826c63b9d281c4d9ff509 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:30:23 +0000 Subject: [PATCH 001/382] make FederationHandler.on_query_auth async --- synapse/handlers/federation.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86d3177e9..61df39d4bf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2120,15 +2120,14 @@ class FederationHandler(BaseHandler): logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True - @defer.inlineCallbacks - def on_query_auth( + async def on_query_auth( self, origin, event_id, room_id, remote_auth_chain, rejects, missing ): - in_room = yield self.auth.check_host_in_room(room_id, origin) + in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") - event = yield self.store.get_event( + event = await self.store.get_event( event_id, allow_none=False, check_room_id=room_id ) @@ -2136,19 +2135,19 @@ class FederationHandler(BaseHandler): # don't want to fall into the trap of `missing` being wrong. for e in remote_auth_chain: try: - yield self._handle_new_event(origin, e) + await self._handle_new_event(origin, e) except AuthError: pass # Now get the current auth_chain for the event. - local_auth_chain = yield self.store.get_auth_chain( + local_auth_chain = await self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) # TODO: Check if we would now reject event_id. If so we need to tell # everyone. - ret = yield self.construct_auth_difference(local_auth_chain, remote_auth_chain) + ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) logger.debug("on_query_auth returning: %s", ret) From 7571bf86f0399b2376427f3a6d91b8850e45b8f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:32:48 +0000 Subject: [PATCH 002/382] make FederationHandler.on_send_join_request async --- synapse/handlers/federation.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 61df39d4bf..7d6db77ae5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1411,9 +1411,7 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - @log_function - def on_send_join_request(self, origin, pdu): + async def on_send_join_request(self, origin, pdu): """ We have received a join event for a room. Fully process it and respond with the current state and auth chains. """ @@ -1450,9 +1448,9 @@ class FederationHandler(BaseHandler): # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - context = yield self._handle_new_event(origin, event) + context = await self._handle_new_event(origin, event) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1470,14 +1468,14 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.Member: if event.content["membership"] == Membership.JOIN: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, event.room_id) + await self.user_joined_room(user, event.room_id) - prev_state_ids = yield context.get_prev_state_ids() + prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) - auth_chain = yield self.store.get_auth_chain(state_ids) + auth_chain = await self.store.get_auth_chain(state_ids) - state = yield self.store.get_events(list(prev_state_ids.values())) + state = await self.store.get_events(list(prev_state_ids.values())) return {"state": list(state.values()), "auth_chain": auth_chain} From af8ba6b52502dda2c0b50a023f4fd0ef63b67237 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:33:42 +0000 Subject: [PATCH 003/382] make FederationHandler.on_invite_request async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7d6db77ae5..b924c72c77 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1479,8 +1479,7 @@ class FederationHandler(BaseHandler): return {"state": list(state.values()), "auth_chain": auth_chain} - @defer.inlineCallbacks - def on_invite_request( + async def on_invite_request( self, origin: str, event: EventBase, room_version: RoomVersion ): """ We've got an invite event. Process and persist it. Sign it. @@ -1490,7 +1489,7 @@ class FederationHandler(BaseHandler): if event.state_key is None: raise SynapseError(400, "The invite event did not have a state key") - is_blocked = yield self.store.is_room_blocked(event.room_id) + is_blocked = await self.store.is_room_blocked(event.room_id) if is_blocked: raise SynapseError(403, "This room has been blocked on this server") @@ -1533,8 +1532,8 @@ class FederationHandler(BaseHandler): ) ) - context = yield self.state_handler.compute_event_context(event) - yield self.persist_events_and_notify([(event, context)]) + context = await self.state_handler.compute_event_context(event) + await self.persist_events_and_notify([(event, context)]) return event From 98681f90cbe70c317556ec1596df256f49d4d38a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:35:30 +0000 Subject: [PATCH 004/382] make FederationHandler.on_make_join_request async --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b924c72c77..e75ebb168e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1348,20 +1348,17 @@ class FederationHandler(BaseHandler): "Error handling queued PDU %s from %s: %s", p.event_id, origin, e ) - @defer.inlineCallbacks - @log_function - def on_make_join_request(self, origin, room_id, user_id): + async def on_make_join_request( + self, origin: str, room_id: str, user_id: str + ) -> EventBase: """ We've received a /make_join/ request, so we create a partial join event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: - origin (str): The (verified) server name of the requesting server. - room_id (str): Room to create join event in - user_id (str): The user to create the join for - - Returns: - Deferred[FrozenEvent] + origin: The (verified) server name of the requesting server. + room_id: Room to create join event in + user_id: The user to create the join for """ if get_domain_from_id(user_id) != origin: logger.info( @@ -1373,7 +1370,7 @@ class FederationHandler(BaseHandler): event_content = {"membership": Membership.JOIN} - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, @@ -1387,14 +1384,14 @@ class FederationHandler(BaseHandler): ) try: - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) except AuthError as e: logger.warning("Failed to create join to %s because %s", room_id, e) raise e - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1405,7 +1402,7 @@ class FederationHandler(BaseHandler): # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - yield self.auth.check_from_context( + await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) From d184cbc0319cbff17d04028c265cd774356f6e54 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:39:24 +0000 Subject: [PATCH 005/382] make FederationHandler.on_send_leave_request async --- synapse/handlers/federation.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e75ebb168e..cdde10c8b9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1641,9 +1641,7 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - @log_function - def on_send_leave_request(self, origin, pdu): + async def on_send_leave_request(self, origin, pdu): """ We have received a leave event for a room. Fully process it.""" event = pdu @@ -1663,9 +1661,9 @@ class FederationHandler(BaseHandler): event.internal_metadata.outlier = False - context = yield self._handle_new_event(origin, event) + context = await self._handle_new_event(origin, event) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: From 6e89ec5e32907a5e6b036526e3eb6cb4e76d843f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:40:41 +0000 Subject: [PATCH 006/382] make FederationHandler.on_make_leave_request async --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cdde10c8b9..3813680c34 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1581,20 +1581,17 @@ class FederationHandler(BaseHandler): assert event.room_id == room_id return origin, event, room_version - @defer.inlineCallbacks - @log_function - def on_make_leave_request(self, origin, room_id, user_id): + async def on_make_leave_request( + self, origin: str, room_id: str, user_id: str + ) -> EventBase: """ We've received a /make_leave/ request, so we create a partial leave event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: - origin (str): The (verified) server name of the requesting server. - room_id (str): Room to create leave event in - user_id (str): The user to create the leave for - - Returns: - Deferred[FrozenEvent] + origin: The (verified) server name of the requesting server. + room_id: Room to create leave event in + user_id: The user to create the leave for """ if get_domain_from_id(user_id) != origin: logger.info( @@ -1604,7 +1601,7 @@ class FederationHandler(BaseHandler): ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, { @@ -1616,11 +1613,11 @@ class FederationHandler(BaseHandler): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1632,7 +1629,7 @@ class FederationHandler(BaseHandler): try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - yield self.auth.check_from_context( + await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) except AuthError as e: From c556ed9e15640da5b8aff15c4547609202eab6f1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:43:51 +0000 Subject: [PATCH 007/382] make FederationHandler._handle_new_events async --- synapse/handlers/federation.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3813680c34..9c16f54304 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1815,13 +1815,12 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _handle_new_events( + async def _handle_new_events( self, origin: str, event_infos: Iterable[_NewEventInfo], backfilled: bool = False, - ): + ) -> None: """Creates the appropriate contexts and persists events. The events should not depend on one another, e.g. this should be used to persist a bunch of outliers, but not a chunk of individual events that depend @@ -1830,11 +1829,10 @@ class FederationHandler(BaseHandler): Notifies about the events where appropriate. """ - @defer.inlineCallbacks - def prep(ev_info: _NewEventInfo): + async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = yield self._prep_event( + res = await self._prep_event( origin, event, state=ev_info.state, @@ -1843,14 +1841,14 @@ class FederationHandler(BaseHandler): ) return res - contexts = yield make_deferred_yieldable( + contexts = await make_deferred_yieldable( defer.gatherResults( [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [ (ev_info.event, context) for ev_info, context in zip(event_infos, contexts) From 1cdc253e0a965515823bc34c6223dbeda0c55669 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:48:33 +0000 Subject: [PATCH 008/382] make FederationHandler._handle_new_event async --- synapse/handlers/federation.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9c16f54304..81eb7eecbd 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1782,11 +1782,10 @@ class FederationHandler(BaseHandler): def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) - @defer.inlineCallbacks - def _handle_new_event( + async def _handle_new_event( self, origin, event, state=None, auth_events=None, backfilled=False ): - context = yield self._prep_event( + context = await self._prep_event( origin, event, state=state, auth_events=auth_events, backfilled=backfilled ) @@ -1799,11 +1798,11 @@ class FederationHandler(BaseHandler): and not backfilled and not context.rejected ): - yield self.action_generator.handle_push_actions_for_event( + await self.action_generator.handle_push_actions_for_event( event, context ) - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [(event, context)], backfilled=backfilled ) success = True @@ -2296,7 +2295,9 @@ class FederationHandler(BaseHandler): logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) - yield self._handle_new_event(origin, e, auth_events=auth) + yield defer.ensureDeferred( + self._handle_new_event(origin, e, auth_events=auth) + ) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e From 8033b257a7f4f8e21d5b52228234f429f1552dd2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:49:32 +0000 Subject: [PATCH 009/382] make FederationHandler._prep_event async --- synapse/handlers/federation.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 81eb7eecbd..8cb3a505f8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1966,54 +1966,41 @@ class FederationHandler(BaseHandler): yield self.persist_events_and_notify([(event, new_event_context)]) - @defer.inlineCallbacks - def _prep_event( + async def _prep_event( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], auth_events: Optional[StateMap[EventBase]], backfilled: bool, - ): - """ - - Args: - origin: - event: - state: - auth_events: - backfilled: - - Returns: - Deferred, which resolves to synapse.events.snapshot.EventContext - """ - context = yield self.state_handler.compute_event_context(event, old_state=state) + ) -> EventContext: + context = await self.state_handler.compute_event_context(event, old_state=state) if not auth_events: - prev_state_ids = yield context.get_prev_state_ids() - auth_events_ids = yield self.auth.compute_auth_events( + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = await self.auth.compute_auth_events( event, prev_state_ids, for_verification=True ) - auth_events = yield self.store.get_events(auth_events_ids) + auth_events = await self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events.values()} # This is a hack to fix some old rooms where the initial join event # didn't reference the create event in its auth events. if event.type == EventTypes.Member and not event.auth_event_ids(): if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = yield self.store.get_event( + c = await self.store.get_event( event.prev_event_ids()[0], allow_none=True ) if c and c.type == EventTypes.Create: auth_events[(c.type, c.state_key)] = c - context = yield self.do_auth(origin, event, context, auth_events=auth_events) + context = await self.do_auth(origin, event, context, auth_events=auth_events) if not context.rejected: - yield self._check_for_soft_fail(event, state, backfilled) + await self._check_for_soft_fail(event, state, backfilled) if event.type == EventTypes.GuestAccess and not context.rejected: - yield self.maybe_kick_guest_users(event) + await self.maybe_kick_guest_users(event) return context From bc9b75c6f08db3672d6620a91e91be2e0188c1a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:51:24 +0000 Subject: [PATCH 010/382] make FederationHandler.do_auth async --- synapse/handlers/federation.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8cb3a505f8..780029547d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2149,16 +2149,20 @@ class FederationHandler(BaseHandler): return missing_events - @defer.inlineCallbacks - @log_function - def do_auth(self, origin, event, context, auth_events): + async def do_auth( + self, + origin: str, + event: EventBase, + context: EventContext, + auth_events: StateMap[EventBase], + ) -> EventContext: """ Args: - origin (str): - event (synapse.events.EventBase): - context (synapse.events.snapshot.EventContext): - auth_events (dict[(str, str)->synapse.events.EventBase]): + origin: + event: + context: + auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room @@ -2168,13 +2172,13 @@ class FederationHandler(BaseHandler): Also NB that this function adds entries to it. Returns: - defer.Deferred[EventContext]: updated context object + updated context object """ - room_version = yield self.store.get_room_version_id(event.room_id) + room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: - context = yield self._update_auth_events_and_context_for_auth( + context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events ) except Exception: From a25ddf26a35f2bd25b5284736b39de70a37f8570 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:53:30 +0000 Subject: [PATCH 011/382] make FederationHandler._update_auth_events_and_context_for_auth async --- synapse/handlers/federation.py | 41 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 780029547d..8dd2e81fd4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2200,10 +2200,13 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _update_auth_events_and_context_for_auth( - self, origin, event, context, auth_events - ): + async def _update_auth_events_and_context_for_auth( + self, + origin: str, + event: EventBase, + context: EventContext, + auth_events: StateMap[EventBase], + ) -> EventContext: """Helper for do_auth. See there for docs. Checks whether a given event has the expected auth events. If it @@ -2211,16 +2214,16 @@ class FederationHandler(BaseHandler): we can come to a consensus (e.g. if one server missed some valid state). - This attempts to resovle any potential divergence of state between + This attempts to resolve any potential divergence of state between servers, but is not essential and so failures should not block further processing of the event. Args: - origin (str): - event (synapse.events.EventBase): - context (synapse.events.snapshot.EventContext): + origin: + event: + context: - auth_events (dict[(str, str)->synapse.events.EventBase]): + auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room @@ -2231,7 +2234,7 @@ class FederationHandler(BaseHandler): Also NB that this function adds entries to it. Returns: - defer.Deferred[EventContext]: updated context + updated context """ event_auth_events = set(event.auth_event_ids()) @@ -2245,7 +2248,7 @@ class FederationHandler(BaseHandler): # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: - have_events = yield self.store.have_seen_events(missing_auth) + have_events = await self.store.have_seen_events(missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) @@ -2254,7 +2257,7 @@ class FederationHandler(BaseHandler): logger.info("auth_events contains unknown events: %s", missing_auth) try: try: - remote_auth_chain = yield self.federation_client.get_event_auth( + remote_auth_chain = await self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) except RequestSendFailed as e: @@ -2263,7 +2266,7 @@ class FederationHandler(BaseHandler): logger.info("Failed to get event auth from remote: %s", e) return context - seen_remotes = yield self.store.have_seen_events( + seen_remotes = await self.store.have_seen_events( [e.event_id for e in remote_auth_chain] ) @@ -2286,9 +2289,7 @@ class FederationHandler(BaseHandler): logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) - yield defer.ensureDeferred( - self._handle_new_event(origin, e, auth_events=auth) - ) + await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e @@ -2322,7 +2323,7 @@ class FederationHandler(BaseHandler): # XXX: currently this checks for redactions but I'm not convinced that is # necessary? - different_events = yield self.store.get_events_as_list(different_auth) + different_events = await self.store.get_events_as_list(different_auth) for d in different_events: if d.room_id != event.room_id: @@ -2348,8 +2349,8 @@ class FederationHandler(BaseHandler): remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_state = remote_auth_events.values() - room_version = yield self.store.get_room_version_id(event.room_id) - new_state = yield self.state_handler.resolve_events( + room_version = await self.store.get_room_version_id(event.room_id) + new_state = await self.state_handler.resolve_events( room_version, (local_state, remote_state), event ) @@ -2364,7 +2365,7 @@ class FederationHandler(BaseHandler): auth_events.update(new_state) - context = yield self._update_context_for_auth_events( + context = await self._update_context_for_auth_events( event, context, auth_events ) From 0d5f2f4bb0a1153d7d8b7d66d5c06268b2f2ea18 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:55:35 +0000 Subject: [PATCH 012/382] make FederationHandler._update_context_for_auth_events async --- synapse/handlers/federation.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8dd2e81fd4..ea49f928e5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2371,21 +2371,21 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _update_context_for_auth_events(self, event, context, auth_events): + async def _update_context_for_auth_events( + self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] + ) -> EventContext: """Update the state_ids in an event context after auth event resolution, storing the changes as a new state group. Args: - event (Event): The event we're handling the context for + event: The event we're handling the context for - context (synapse.events.snapshot.EventContext): initial event context + context: initial event context - auth_events (dict[(str, str)->EventBase]): Events to update in the event - context. + auth_events: Events to update in the event context. Returns: - Deferred[EventContext]: new event context + new event context """ # exclude the state key of the new event from the current_state in the context. if event.is_state(): @@ -2396,19 +2396,19 @@ class FederationHandler(BaseHandler): k: a.event_id for k, a in iteritems(auth_events) if k != event_key } - current_state_ids = yield context.get_current_state_ids() + current_state_ids = await context.get_current_state_ids() current_state_ids = dict(current_state_ids) current_state_ids.update(state_updates) - prev_state_ids = yield context.get_prev_state_ids() + prev_state_ids = await context.get_prev_state_ids() prev_state_ids = dict(prev_state_ids) prev_state_ids.update({k: a.event_id for k, a in iteritems(auth_events)}) # create a new state group as a delta from the existing one. prev_group = context.state_group - state_group = yield self.state_store.store_state_group( + state_group = await self.state_store.store_state_group( event.event_id, event.room_id, prev_group=prev_group, From 957129f4a7febacba64bc68fbcd7375db5156186 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:00:46 +0000 Subject: [PATCH 013/382] make FederationHandler.construct_auth_difference async --- synapse/handlers/federation.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea49f928e5..1d9084b326 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2425,8 +2425,9 @@ class FederationHandler(BaseHandler): delta_ids=state_updates, ) - @defer.inlineCallbacks - def construct_auth_difference(self, local_auth, remote_auth): + async def construct_auth_difference( + self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase] + ) -> Dict: """ Given a local and remote auth chain, find the differences. This assumes that we have already processed all events in remote_auth @@ -2535,7 +2536,7 @@ class FederationHandler(BaseHandler): reason_map = {} for e in base_remote_rejected: - reason = yield self.store.get_rejection_reason(e.event_id) + reason = await self.store.get_rejection_reason(e.event_id) if reason is None: # TODO: e is not in the current state, so we should # construct some proof of that. From 863087d18643d224f3b8eb4a511429cc8669a135 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:02:50 +0000 Subject: [PATCH 014/382] make FederationHandler.on_exchange_third_party_invite_request async --- synapse/handlers/federation.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1d9084b326..1eafc1bf1a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -65,7 +65,7 @@ from synapse.replication.http.federation import ( from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour -from synapse.types import StateMap, UserID, get_domain_from_id +from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination @@ -2621,33 +2621,31 @@ class FederationHandler(BaseHandler): destinations, room_id, event_dict ) - @defer.inlineCallbacks - @log_function - def on_exchange_third_party_invite_request(self, room_id, event_dict): + async def on_exchange_third_party_invite_request( + self, room_id: str, event_dict: JsonDict + ) -> None: """Handle an exchange_third_party_invite request from a remote server The remote server will call this when it wants to turn a 3pid invite into a normal m.room.member invite. Args: - room_id (str): The ID of the room. + room_id: The ID of the room. event_dict (dict[str, Any]): Dictionary containing the event body. - Returns: - Deferred: resolves (to None) """ - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. builder = self.event_builder_factory.new(room_version, event_dict) - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -2658,16 +2656,16 @@ class FederationHandler(BaseHandler): 403, "This event is not allowed in this context", Codes.FORBIDDEN ) - event, context = yield self.add_display_name_to_third_party_invite( + event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) try: - yield self.auth.check_from_context(room_version, event, context) + await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e - yield self._check_signature(event, context) + await self._check_signature(event, context) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. @@ -2675,7 +2673,7 @@ class FederationHandler(BaseHandler): # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() - yield member_handler.send_membership_event(None, event, context) + await member_handler.send_membership_event(None, event, context) @defer.inlineCallbacks def add_display_name_to_third_party_invite( From 94f7b4cd54612f9f1a67b3090f7b249ac20a0c76 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:06:46 +0000 Subject: [PATCH 015/382] make FederationHandler.on_event_auth async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1eafc1bf1a..4e9f240e14 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1199,13 +1199,12 @@ class FederationHandler(BaseHandler): return pdu - @defer.inlineCallbacks - def on_event_auth(self, event_id): - event = yield self.store.get_event(event_id) - auth = yield self.store.get_auth_chain( + async def on_event_auth(self, event_id: str) -> List[EventBase]: + event = await self.store.get_event(event_id) + auth = await self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) - return [e for e in auth] + return list(auth) @log_function @defer.inlineCallbacks From ebd6a15af3994d803b8deb2172dbd0deaeb59915 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:13:13 +0000 Subject: [PATCH 016/382] make FederationHandler.do_invite_join async --- synapse/handlers/federation.py | 30 ++++++++++++++---------------- synapse/handlers/room_member.py | 6 ++++-- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4e9f240e14..127dc0fc02 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1206,9 +1206,9 @@ class FederationHandler(BaseHandler): ) return list(auth) - @log_function - @defer.inlineCallbacks - def do_invite_join(self, target_hosts, room_id, joinee, content): + async def do_invite_join( + self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict + ) -> None: """ Attempts to join the `joinee` to the room `room_id` via the servers contained in `target_hosts`. @@ -1221,17 +1221,17 @@ class FederationHandler(BaseHandler): have finished processing the join. Args: - target_hosts (Iterable[str]): List of servers to attempt to join the room with. + target_hosts: List of servers to attempt to join the room with. - room_id (str): The ID of the room to join. + room_id: The ID of the room to join. - joinee (str): The User ID of the joining user. + joinee: The User ID of the joining user. - content (dict): The event content to use for the join event. + content: The event content to use for the join event. """ logger.debug("Joining %s to %s", joinee, room_id) - origin, event, room_version_obj = yield self._make_and_verify_event( + origin, event, room_version_obj = await self._make_and_verify_event( target_hosts, room_id, joinee, @@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler): self.room_queues[room_id] = [] - yield self._clean_room_for_join(room_id) + await self._clean_room_for_join(room_id) handled_events = set() @@ -1261,7 +1261,7 @@ class FederationHandler(BaseHandler): pass event_format_version = room_version_obj.event_format - ret = yield self.federation_client.send_join( + ret = await self.federation_client.send_join( target_hosts, event, event_format_version ) @@ -1280,7 +1280,7 @@ class FederationHandler(BaseHandler): logger.debug("do_invite_join event: %s", event) try: - yield self.store.store_room( + await self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=False, @@ -1290,13 +1290,13 @@ class FederationHandler(BaseHandler): # FIXME pass - yield self._persist_auth_tree( + await self._persist_auth_tree( origin, auth_chain, state, event, room_version_obj ) # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information - predecessor = yield self.store.get_room_predecessor(room_id) + predecessor = await self.store.get_room_predecessor(room_id) if not predecessor or not isinstance(predecessor.get("room_id"), str): return old_room_id = predecessor["room_id"] @@ -1306,7 +1306,7 @@ class FederationHandler(BaseHandler): # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() - yield member_handler.transfer_room_state_on_room_upgrade( + await member_handler.transfer_room_state_on_room_upgrade( old_room_id, room_id ) @@ -1323,8 +1323,6 @@ class FederationHandler(BaseHandler): run_in_background(self._handle_queued_pdus, room_queue) - return True - async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 15e8aa5249..ce8150db6e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -944,8 +944,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): # join dance for now, since we're kinda implicitly checking # that we are allowed to join when we decide whether or not we # need to do the invite/join dance. - yield self.federation_handler.do_invite_join( - remote_room_hosts, room_id, user.to_string(), content + yield defer.ensureDeferred( + self.federation_handler.do_invite_join( + remote_room_hosts, room_id, user.to_string(), content + ) ) yield self._user_joined_room(user, room_id) From dbdf843012847536f5e87f66efd48174ad348be3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:14:58 +0000 Subject: [PATCH 017/382] make FederationHandler._persist_auth_tree async --- synapse/handlers/federation.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 127dc0fc02..7bc5632f4d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1852,15 +1852,14 @@ class FederationHandler(BaseHandler): backfilled=backfilled, ) - @defer.inlineCallbacks - def _persist_auth_tree( + async def _persist_auth_tree( self, origin: str, auth_events: List[EventBase], state: List[EventBase], event: EventBase, room_version: RoomVersion, - ): + ) -> None: """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event separately. Notifies about the persisted events @@ -1875,14 +1874,11 @@ class FederationHandler(BaseHandler): event room_version: The room version we expect this room to have, and will raise if it doesn't match the version in the create event. - - Returns: - Deferred """ events_to_context = {} for e in itertools.chain(auth_events, state): e.internal_metadata.outlier = True - ctx = yield self.state_handler.compute_event_context(e) + ctx = await self.state_handler.compute_event_context(e) events_to_context[e.event_id] = ctx event_map = { @@ -1914,7 +1910,7 @@ class FederationHandler(BaseHandler): missing_auth_events.add(e_id) for e_id in missing_auth_events: - m_ev = yield self.federation_client.get_pdu( + m_ev = await self.federation_client.get_pdu( [origin], e_id, room_version=room_version.identifier, @@ -1950,18 +1946,18 @@ class FederationHandler(BaseHandler): raise events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [ (e, events_to_context[e.event_id]) for e in itertools.chain(auth_events, state) ] ) - new_event_context = yield self.state_handler.compute_event_context( + new_event_context = await self.state_handler.compute_event_context( event, old_state=state ) - yield self.persist_events_and_notify([(event, new_event_context)]) + await self.persist_events_and_notify([(event, new_event_context)]) async def _prep_event( self, From c3f296af32d0da0dc9e8797949ed95d9f4ca1c7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:16:31 +0000 Subject: [PATCH 018/382] make FederationHandler._check_for_soft_fail async --- synapse/handlers/federation.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7bc5632f4d..c4a291d1c0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1997,27 +1997,23 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _check_for_soft_fail( + async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool - ): - """Checks if we should soft fail the event, if so marks the event as + ) -> None: + """Checks if we should soft fail the event; if so, marks the event as such. Args: event state: The state at the event if we don't have all the event's prev events backfilled: Whether the event is from backfill - - Returns: - Deferred """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier() if do_soft_fail_check: - extrem_ids = yield self.store.get_latest_event_ids_in_room(event.room_id) + extrem_ids = await self.store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids) prev_event_ids = set(event.prev_event_ids()) @@ -2028,7 +2024,7 @@ class FederationHandler(BaseHandler): do_soft_fail_check = False if do_soft_fail_check: - room_version = yield self.store.get_room_version_id(event.room_id) + room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". @@ -2045,19 +2041,19 @@ class FederationHandler(BaseHandler): # given state at the event. This should correctly handle cases # like bans, especially with state res v2. - state_sets = yield self.state_store.get_state_groups( + state_sets = await self.state_store.get_state_groups( event.room_id, extrem_ids ) state_sets = list(state_sets.values()) state_sets.append(state) - current_state_ids = yield self.state_handler.resolve_events( + current_state_ids = await self.state_handler.resolve_events( room_version, state_sets, event ) current_state_ids = { k: e.event_id for k, e in iteritems(current_state_ids) } else: - current_state_ids = yield self.state_handler.get_current_state_ids( + current_state_ids = await self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids ) @@ -2073,7 +2069,7 @@ class FederationHandler(BaseHandler): e for k, e in iteritems(current_state_ids) if k in auth_types ] - current_auth_events = yield self.store.get_events(current_state_ids) + current_auth_events = await self.store.get_events(current_state_ids) current_auth_events = { (e.type, e.state_key): e for e in current_auth_events.values() } From 4286e429a7359cdeb670fdd63839d0b73836026b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:19:18 +0000 Subject: [PATCH 019/382] make FederationHandler.do_remotely_reject_invite async --- synapse/handlers/federation.py | 13 +++++++------ synapse/handlers/room_member.py | 6 ++++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c4a291d1c0..73ef130ace 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1531,9 +1531,10 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): - origin, event, room_version = yield self._make_and_verify_event( + async def do_remotely_reject_invite( + self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict + ) -> EventBase: + origin, event, room_version = await self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not @@ -1549,10 +1550,10 @@ class FederationHandler(BaseHandler): except ValueError: pass - yield self.federation_client.send_leave(target_hosts, event) + await self.federation_client.send_leave(target_hosts, event) - context = yield self.state_handler.compute_event_context(event) - yield self.persist_events_and_notify([(event, context)]) + context = await self.state_handler.compute_event_context(event) + await self.persist_events_and_notify([(event, context)]) return event diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ce8150db6e..4260426369 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -984,8 +984,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): """ fed_handler = self.federation_handler try: - ret = yield fed_handler.do_remotely_reject_invite( - remote_room_hosts, room_id, target.to_string(), content=content, + ret = yield defer.ensureDeferred( + fed_handler.do_remotely_reject_invite( + remote_room_hosts, room_id, target.to_string(), content=content, + ) ) return ret except Exception as e: From 3b7e0e002bad4ba39e9d4b4188fae0132571bded Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:22:30 +0000 Subject: [PATCH 020/382] make FederationHandler._make_and_verify_event async --- synapse/handlers/federation.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 73ef130ace..e5fa55b973 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1557,15 +1557,20 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - def _make_and_verify_event( - self, target_hosts, room_id, user_id, membership, content={}, params=None - ): + async def _make_and_verify_event( + self, + target_hosts: Iterable[str], + room_id: str, + user_id: str, + membership: str, + content: JsonDict = {}, + params: Optional[Dict[str, str]] = None, + ) -> Tuple[str, EventBase, RoomVersion]: ( origin, event, room_version, - ) = yield self.federation_client.make_membership_event( + ) = await self.federation_client.make_membership_event( target_hosts, room_id, user_id, membership, content, params=params ) From 05299599b61bd19805a5b7843b907b1b5954e1de Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:24:07 +0000 Subject: [PATCH 021/382] make FederationHandler.persist_events_and_notify async --- synapse/handlers/federation.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e5fa55b973..ec010c1f9f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2816,27 +2816,27 @@ class FederationHandler(BaseHandler): if "valid" not in response or not response["valid"]: raise AuthError(403, "Third party certificate was invalid") - @defer.inlineCallbacks - def persist_events_and_notify(self, event_and_contexts, backfilled=False): + async def persist_events_and_notify( + self, + event_and_contexts: Sequence[Tuple[EventBase, EventContext]], + backfilled: bool = False, + ) -> None: """Persists events and tells the notifier/pushers about them, if necessary. Args: - event_and_contexts(list[tuple[FrozenEvent, EventContext]]) - backfilled (bool): Whether these events are a result of + event_and_contexts: + backfilled: Whether these events are a result of backfilling or not - - Returns: - Deferred """ if self.config.worker_app: - yield self._send_events_to_master( + await self._send_events_to_master( store=self.store, event_and_contexts=event_and_contexts, backfilled=backfilled, ) else: - max_stream_id = yield self.storage.persistence.persist_events( + max_stream_id = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) @@ -2847,7 +2847,7 @@ class FederationHandler(BaseHandler): if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - yield self._notify_persisted_event(event, max_stream_id) + await self._notify_persisted_event(event, max_stream_id) def _notify_persisted_event(self, event, max_stream_id): """Checks to see if notifier/pushers should be notified about the From 814cc00cb96168479a5c462e9078f3b60901589d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:25:41 +0000 Subject: [PATCH 022/382] make FederationHandler._notify_persisted_event async --- synapse/handlers/federation.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ec010c1f9f..19cd55b8cf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2849,13 +2849,15 @@ class FederationHandler(BaseHandler): for event, _ in event_and_contexts: await self._notify_persisted_event(event, max_stream_id) - def _notify_persisted_event(self, event, max_stream_id): + async def _notify_persisted_event( + self, event: EventBase, max_stream_id: int + ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. Args: - event (FrozenEvent) - max_stream_id (int): The max_stream_id returned by persist_events + event: + max_stream_id: The max_stream_id returned by persist_events """ extra_users = [] @@ -2879,7 +2881,7 @@ class FederationHandler(BaseHandler): event, event_stream_id, max_stream_id, extra_users=extra_users ) - return self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) def _clean_room_for_join(self, room_id): """Called to clean up any data in DB for a given room, ready for the From 52642860dabac96467d7a7947976c53eb5dc4c82 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:27:05 +0000 Subject: [PATCH 023/382] make FederationHandler._clean_room_for_join async --- synapse/handlers/federation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 19cd55b8cf..e252e69888 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2883,17 +2883,17 @@ class FederationHandler(BaseHandler): await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) - def _clean_room_for_join(self, room_id): + async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the server to join the room. Args: - room_id (str) + room_id """ if self.config.worker_app: - return self._clean_room_for_join_client(room_id) + await self._clean_room_for_join_client(room_id) else: - return self.store.clean_room_for_join(room_id) + await self.store.clean_room_for_join(room_id) def user_joined_room(self, user, room_id): """Called when a new user has joined the room From f64c96662ed794bbdd8960002658602a1b92eb2b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:28:31 +0000 Subject: [PATCH 024/382] make FederationHandler.user_joined_room async --- synapse/handlers/federation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e252e69888..c94573b547 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2895,15 +2895,15 @@ class FederationHandler(BaseHandler): else: await self.store.clean_room_for_join(room_id) - def user_joined_room(self, user, room_id): + async def user_joined_room(self, user: UserID, room_id: str) -> None: """Called when a new user has joined the room """ if self.config.worker_app: - return self._notify_user_membership_change( + await self._notify_user_membership_change( room_id=room_id, user_id=user.to_string(), change="joined" ) else: - return defer.succeed(user_joined_room(self.distributor, user, room_id)) + user_joined_room(self.distributor, user, room_id) @defer.inlineCallbacks def get_room_complexity(self, remote_room_hosts, room_id): From e49eb1a886c6f139887b1e71f8234e02e738a84a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:30:21 +0000 Subject: [PATCH 025/382] changelog --- changelog.d/6837.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6837.misc diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc new file mode 100644 index 0000000000..0496f12de8 --- /dev/null +++ b/changelog.d/6837.misc @@ -0,0 +1 @@ +Port much of `synapse.handlers.federation` to async/await. From ae5b3104f0023171b2bb89f08a066e5974ee7666 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Feb 2020 17:10:54 +0000 Subject: [PATCH 026/382] Fix stacktraces when using ObservableDeferred and async/await (#6836) --- changelog.d/6836.misc | 1 + synapse/util/async_helpers.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6836.misc diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc new file mode 100644 index 0000000000..232488e1e5 --- /dev/null +++ b/changelog.d/6836.misc @@ -0,0 +1 @@ +Fix stacktraces when using `ObservableDeferred` and async/await. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 04b6abdc24..581dffd8a0 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -73,6 +73,10 @@ class ObservableDeferred(object): def errback(f): object.__setattr__(self, "_result", (False, f)) while self._observers: + # This is a little bit of magic to correctly propagate stack + # traces when we `await` on one of the observer deferreds. + f.value.__failure__ = f + try: # TODO: Handle errors here. self._observers.pop().errback(f) From b9391c957572224c3a7c22870102fcbd24dea4e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Feb 2020 18:05:44 +0000 Subject: [PATCH 027/382] Add typing to SyncHandler (#6821) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/6821.misc | 1 + synapse/events/__init__.py | 18 +- synapse/handlers/sync.py | 697 +++++++++++++++++--------------- tests/storage/test_redaction.py | 5 +- tox.ini | 1 + 5 files changed, 377 insertions(+), 345 deletions(-) create mode 100644 changelog.d/6821.misc diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc new file mode 100644 index 0000000000..1d5265d5e2 --- /dev/null +++ b/changelog.d/6821.misc @@ -0,0 +1 @@ +Add type hints to `SyncHandler`. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index f813fa2fe7..92f76703b3 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -189,8 +189,14 @@ class EventBase(object): redacts = _event_dict_property("redacts", None) room_id = _event_dict_property("room_id") sender = _event_dict_property("sender") + state_key = _event_dict_property("state_key") + type = _event_dict_property("type") user_id = _event_dict_property("sender") + @property + def event_id(self) -> str: + raise NotImplementedError() + @property def membership(self): return self.content["membership"] @@ -281,10 +287,7 @@ class FrozenEvent(EventBase): else: frozen_dict = event_dict - self.event_id = event_dict["event_id"] - self.type = event_dict["type"] - if "state_key" in event_dict: - self.state_key = event_dict["state_key"] + self._event_id = event_dict["event_id"] super(FrozenEvent, self).__init__( frozen_dict, @@ -294,6 +297,10 @@ class FrozenEvent(EventBase): rejected_reason=rejected_reason, ) + @property + def event_id(self) -> str: + return self._event_id + def __str__(self): return self.__repr__() @@ -332,9 +339,6 @@ class FrozenEventV2(EventBase): frozen_dict = event_dict self._event_id = None - self.type = event_dict["type"] - if "state_key" in event_dict: - self.state_key = event_dict["state_key"] super(FrozenEventV2, self).__init__( frozen_dict, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd95f85e3f..5f060241b4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -14,20 +14,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections import itertools import logging +from typing import Any, Dict, FrozenSet, List, Optional, Set, Tuple from six import iteritems, itervalues +import attr from prometheus_client import Counter from synapse.api.constants import EventTypes, Membership +from synapse.api.filtering import FilterCollection +from synapse.events import EventBase from synapse.logging.context import LoggingContext from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter -from synapse.types import RoomStreamToken +from synapse.types import ( + Collection, + JsonDict, + RoomStreamToken, + StateMap, + StreamToken, + UserID, +) from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache @@ -62,17 +72,22 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000 LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100 -SyncConfig = collections.namedtuple( - "SyncConfig", ["user", "filter_collection", "is_guest", "request_key", "device_id"] -) +@attr.s(slots=True, frozen=True) +class SyncConfig: + user = attr.ib(type=UserID) + filter_collection = attr.ib(type=FilterCollection) + is_guest = attr.ib(type=bool) + request_key = attr.ib(type=Tuple[Any, ...]) + device_id = attr.ib(type=str) -class TimelineBatch( - collections.namedtuple("TimelineBatch", ["prev_batch", "events", "limited"]) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class TimelineBatch: + prev_batch = attr.ib(type=StreamToken) + events = attr.ib(type=List[EventBase]) + limited = attr.ib(bool) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -81,23 +96,17 @@ class TimelineBatch( __bool__ = __nonzero__ # python3 -class JoinedSyncResult( - collections.namedtuple( - "JoinedSyncResult", - [ - "room_id", # str - "timeline", # TimelineBatch - "state", # dict[(str, str), FrozenEvent] - "ephemeral", - "account_data", - "unread_notifications", - "summary", - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class JoinedSyncResult: + room_id = attr.ib(type=str) + timeline = attr.ib(type=TimelineBatch) + state = attr.ib(type=StateMap[EventBase]) + ephemeral = attr.ib(type=List[JsonDict]) + account_data = attr.ib(type=List[JsonDict]) + unread_notifications = attr.ib(type=JsonDict) + summary = attr.ib(type=Optional[JsonDict]) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -113,20 +122,14 @@ class JoinedSyncResult( __bool__ = __nonzero__ # python3 -class ArchivedSyncResult( - collections.namedtuple( - "ArchivedSyncResult", - [ - "room_id", # str - "timeline", # TimelineBatch - "state", # dict[(str, str), FrozenEvent] - "account_data", - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class ArchivedSyncResult: + room_id = attr.ib(type=str) + timeline = attr.ib(type=TimelineBatch) + state = attr.ib(type=StateMap[EventBase]) + account_data = attr.ib(type=List[JsonDict]) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -135,70 +138,88 @@ class ArchivedSyncResult( __bool__ = __nonzero__ # python3 -class InvitedSyncResult( - collections.namedtuple( - "InvitedSyncResult", - ["room_id", "invite"], # str # FrozenEvent: the invite event - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class InvitedSyncResult: + room_id = attr.ib(type=str) + invite = attr.ib(type=EventBase) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Invited rooms should always be reported to the client""" return True __bool__ = __nonzero__ # python3 -class GroupsSyncResult( - collections.namedtuple("GroupsSyncResult", ["join", "invite", "leave"]) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class GroupsSyncResult: + join = attr.ib(type=JsonDict) + invite = attr.ib(type=JsonDict) + leave = attr.ib(type=JsonDict) - def __nonzero__(self): + def __nonzero__(self) -> bool: return bool(self.join or self.invite or self.leave) __bool__ = __nonzero__ # python3 -class DeviceLists( - collections.namedtuple( - "DeviceLists", - [ - "changed", # list of user_ids whose devices may have changed - "left", # list of user_ids whose devices we no longer track - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class DeviceLists: + """ + Attributes: + changed: List of user_ids whose devices may have changed + left: List of user_ids whose devices we no longer track + """ - def __nonzero__(self): + changed = attr.ib(type=Collection[str]) + left = attr.ib(type=Collection[str]) + + def __nonzero__(self) -> bool: return bool(self.changed or self.left) __bool__ = __nonzero__ # python3 -class SyncResult( - collections.namedtuple( - "SyncResult", - [ - "next_batch", # Token for the next sync - "presence", # List of presence events for the user. - "account_data", # List of account_data events for the user. - "joined", # JoinedSyncResult for each joined room. - "invited", # InvitedSyncResult for each invited room. - "archived", # ArchivedSyncResult for each archived room. - "to_device", # List of direct messages for the device. - "device_lists", # List of user_ids whose devices have changed - "device_one_time_keys_count", # Dict of algorithm to count for one time keys - # for this device - "groups", - ], - ) -): - __slots__ = [] +@attr.s +class _RoomChanges: + """The set of room entries to include in the sync, plus the set of joined + and left room IDs since last sync. + """ - def __nonzero__(self): + room_entries = attr.ib(type=List["RoomSyncResultBuilder"]) + invited = attr.ib(type=List[InvitedSyncResult]) + newly_joined_rooms = attr.ib(type=List[str]) + newly_left_rooms = attr.ib(type=List[str]) + + +@attr.s(slots=True, frozen=True) +class SyncResult: + """ + Attributes: + next_batch: Token for the next sync + presence: List of presence events for the user. + account_data: List of account_data events for the user. + joined: JoinedSyncResult for each joined room. + invited: InvitedSyncResult for each invited room. + archived: ArchivedSyncResult for each archived room. + to_device: List of direct messages for the device. + device_lists: List of user_ids whose devices have changed + device_one_time_keys_count: Dict of algorithm to count for one time keys + for this device + groups: Group updates, if any + """ + + next_batch = attr.ib(type=StreamToken) + presence = attr.ib(type=List[JsonDict]) + account_data = attr.ib(type=List[JsonDict]) + joined = attr.ib(type=List[JoinedSyncResult]) + invited = attr.ib(type=List[InvitedSyncResult]) + archived = attr.ib(type=List[ArchivedSyncResult]) + to_device = attr.ib(type=List[JsonDict]) + device_lists = attr.ib(type=DeviceLists) + device_one_time_keys_count = attr.ib(type=JsonDict) + groups = attr.ib(type=Optional[GroupsSyncResult]) + + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if the notifier needs to wait for more events when polling for events. @@ -240,13 +261,15 @@ class SyncHandler(object): ) async def wait_for_sync_for_user( - self, sync_config, since_token=None, timeout=0, full_state=False - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + timeout: int = 0, + full_state: bool = False, + ) -> SyncResult: """Get the sync for a client if we have new data for it now. Otherwise wait for new data to arrive on the server. If the timeout expires, then return an empty sync result. - Returns: - Deferred[SyncResult] """ # If the user is not part of the mau group, then check that limits have # not been exceeded (if not part of the group by this point, almost certain @@ -265,8 +288,12 @@ class SyncHandler(object): return res async def _wait_for_sync_for_user( - self, sync_config, since_token, timeout, full_state - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + timeout: int = 0, + full_state: bool = False, + ) -> SyncResult: if since_token is None: sync_type = "initial_sync" elif full_state: @@ -305,25 +332,33 @@ class SyncHandler(object): return result - def current_sync_for_user(self, sync_config, since_token=None, full_state=False): + async def current_sync_for_user( + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + full_state: bool = False, + ) -> SyncResult: """Get the sync for client needed to match what the server has now. - Returns: - A Deferred SyncResult. """ - return self.generate_sync_result(sync_config, since_token, full_state) + return await self.generate_sync_result(sync_config, since_token, full_state) - async def push_rules_for_user(self, user): + async def push_rules_for_user(self, user: UserID) -> JsonDict: user_id = user.to_string() rules = await self.store.get_push_rules_for_user(user_id) rules = format_push_rules_for_user(user, rules) return rules - async def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): + async def ephemeral_by_room( + self, + sync_result_builder: "SyncResultBuilder", + now_token: StreamToken, + since_token: Optional[StreamToken] = None, + ) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]: """Get the ephemeral events for each room the user is in Args: - sync_result_builder(SyncResultBuilder) - now_token (StreamToken): Where the server is currently up to. - since_token (StreamToken): Where the server was when the client + sync_result_builder + now_token: Where the server is currently up to. + since_token: Where the server was when the client last synced. Returns: A tuple of the now StreamToken, updated to reflect the which typing @@ -348,7 +383,7 @@ class SyncHandler(object): ) now_token = now_token.copy_and_replace("typing_key", typing_key) - ephemeral_by_room = {} + ephemeral_by_room = {} # type: JsonDict for event in typing: # we want to exclude the room_id from the event, but modifying the @@ -380,13 +415,13 @@ class SyncHandler(object): async def _load_filtered_recents( self, - room_id, - sync_config, - now_token, - since_token=None, - recents=None, - newly_joined_room=False, - ): + room_id: str, + sync_config: SyncConfig, + now_token: StreamToken, + since_token: Optional[StreamToken] = None, + potential_recents: Optional[List[EventBase]] = None, + newly_joined_room: bool = False, + ) -> TimelineBatch: """ Returns: a Deferred TimelineBatch @@ -397,21 +432,29 @@ class SyncHandler(object): sync_config.filter_collection.blocks_all_room_timeline() ) - if recents is None or newly_joined_room or timeline_limit < len(recents): + if ( + potential_recents is None + or newly_joined_room + or timeline_limit < len(potential_recents) + ): limited = True else: limited = False - if recents: - recents = sync_config.filter_collection.filter_room_timeline(recents) + if potential_recents: + recents = sync_config.filter_collection.filter_room_timeline( + potential_recents + ) # We check if there are any state events, if there are then we pass # all current state events to the filter_events function. This is to # ensure that we always include current state in the timeline - current_state_ids = frozenset() + current_state_ids = frozenset() # type: FrozenSet[str] if any(e.is_state() for e in recents): - current_state_ids = await self.state.get_current_state_ids(room_id) - current_state_ids = frozenset(itervalues(current_state_ids)) + current_state_ids_map = await self.state.get_current_state_ids( + room_id + ) + current_state_ids = frozenset(itervalues(current_state_ids_map)) recents = await filter_events_for_client( self.storage, @@ -463,8 +506,10 @@ class SyncHandler(object): # ensure that we always include current state in the timeline current_state_ids = frozenset() if any(e.is_state() for e in loaded_recents): - current_state_ids = await self.state.get_current_state_ids(room_id) - current_state_ids = frozenset(itervalues(current_state_ids)) + current_state_ids_map = await self.state.get_current_state_ids( + room_id + ) + current_state_ids = frozenset(itervalues(current_state_ids_map)) loaded_recents = await filter_events_for_client( self.storage, @@ -493,17 +538,15 @@ class SyncHandler(object): limited=limited or newly_joined_room, ) - async def get_state_after_event(self, event, state_filter=StateFilter.all()): + async def get_state_after_event( + self, event: EventBase, state_filter: StateFilter = StateFilter.all() + ) -> StateMap[str]: """ Get the room state after the given event Args: - event(synapse.events.EventBase): event of interest - state_filter (StateFilter): The state filter used to fetch state - from the database. - - Returns: - A Deferred map from ((type, state_key)->Event) + event: event of interest + state_filter: The state filter used to fetch state from the database. """ state_ids = await self.state_store.get_state_ids_for_event( event.event_id, state_filter=state_filter @@ -514,18 +557,17 @@ class SyncHandler(object): return state_ids async def get_state_at( - self, room_id, stream_position, state_filter=StateFilter.all() - ): + self, + room_id: str, + stream_position: StreamToken, + state_filter: StateFilter = StateFilter.all(), + ) -> StateMap[str]: """ Get the room state at a particular stream position Args: - room_id(str): room for which to get state - stream_position(StreamToken): point at which to get state - state_filter (StateFilter): The state filter used to fetch state - from the database. - - Returns: - A Deferred map from ((type, state_key)->Event) + room_id: room for which to get state + stream_position: point at which to get state + state_filter: The state filter used to fetch state from the database. """ # FIXME this claims to get the state at a stream position, but # get_recent_events_for_room operates by topo ordering. This therefore @@ -546,23 +588,25 @@ class SyncHandler(object): state = {} return state - async def compute_summary(self, room_id, sync_config, batch, state, now_token): + async def compute_summary( + self, + room_id: str, + sync_config: SyncConfig, + batch: TimelineBatch, + state: StateMap[EventBase], + now_token: StreamToken, + ) -> Optional[JsonDict]: """ Works out a room summary block for this room, summarising the number of joined members in the room, and providing the 'hero' members if the room has no name so clients can consistently name rooms. Also adds state events to 'state' if needed to describe the heroes. - Args: - room_id(str): - sync_config(synapse.handlers.sync.SyncConfig): - batch(synapse.handlers.sync.TimelineBatch): The timeline batch for - the room that will be sent to the user. - state(dict): dict of (type, state_key) -> Event as returned by - compute_state_delta - now_token(str): Token of the end of the current batch. - - Returns: - A deferred dict describing the room summary + Args + room_id + sync_config + batch: The timeline batch for the room that will be sent to the user. + state: State as returned by compute_state_delta + now_token: Token of the end of the current batch. """ # FIXME: we could/should get this from room_stats when matthew/stats lands @@ -681,7 +725,7 @@ class SyncHandler(object): return summary - def get_lazy_loaded_members_cache(self, cache_key): + def get_lazy_loaded_members_cache(self, cache_key: Tuple[str, str]) -> LruCache: cache = self.lazy_loaded_members_cache.get(cache_key) if cache is None: logger.debug("creating LruCache for %r", cache_key) @@ -692,23 +736,24 @@ class SyncHandler(object): return cache async def compute_state_delta( - self, room_id, batch, sync_config, since_token, now_token, full_state - ): + self, + room_id: str, + batch: TimelineBatch, + sync_config: SyncConfig, + since_token: Optional[StreamToken], + now_token: StreamToken, + full_state: bool, + ) -> StateMap[EventBase]: """ Works out the difference in state between the start of the timeline and the previous sync. Args: - room_id(str): - batch(synapse.handlers.sync.TimelineBatch): The timeline batch for - the room that will be sent to the user. - sync_config(synapse.handlers.sync.SyncConfig): - since_token(str|None): Token of the end of the previous batch. May - be None. - now_token(str): Token of the end of the current batch. - full_state(bool): Whether to force returning the full state. - - Returns: - A deferred dict of (type, state_key) -> Event + room_id: + batch: The timeline batch for the room that will be sent to the user. + sync_config: + since_token: Token of the end of the previous batch. May be None. + now_token: Token of the end of the current batch. + full_state: Whether to force returning the full state. """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state @@ -800,6 +845,10 @@ class SyncHandler(object): # about them). state_filter = StateFilter.all() + # If this is an initial sync then full_state should be set, and + # that case is handled above. We assert here to ensure that this + # is indeed the case. + assert since_token is not None state_at_previous_sync = await self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) @@ -874,7 +923,7 @@ class SyncHandler(object): if t[0] == EventTypes.Member: cache.set(t[1], event_id) - state = {} + state = {} # type: Dict[str, EventBase] if state_ids: state = await self.store.get_events(list(state_ids.values())) @@ -885,7 +934,9 @@ class SyncHandler(object): ) } - async def unread_notifs_for_room_id(self, room_id, sync_config): + async def unread_notifs_for_room_id( + self, room_id: str, sync_config: SyncConfig + ) -> Optional[Dict[str, str]]: with Measure(self.clock, "unread_notifs_for_room_id"): last_unread_event_id = await self.store.get_last_receipt_event_id_for_user( user_id=sync_config.user.to_string(), @@ -893,7 +944,6 @@ class SyncHandler(object): receipt_type="m.read", ) - notifs = [] if last_unread_event_id: notifs = await self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id @@ -905,17 +955,12 @@ class SyncHandler(object): return None async def generate_sync_result( - self, sync_config, since_token=None, full_state=False - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + full_state: bool = False, + ) -> SyncResult: """Generates a sync result. - - Args: - sync_config (SyncConfig) - since_token (StreamToken) - full_state (bool) - - Returns: - Deferred(SyncResult) """ # NB: The now_token gets changed by some of the generate_sync_* methods, # this is due to some of the underlying streams not supporting the ability @@ -977,7 +1022,7 @@ class SyncHandler(object): ) device_id = sync_config.device_id - one_time_key_counts = {} + one_time_key_counts = {} # type: JsonDict if device_id: one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id @@ -1007,7 +1052,9 @@ class SyncHandler(object): ) @measure_func("_generate_sync_entry_for_groups") - async def _generate_sync_entry_for_groups(self, sync_result_builder): + async def _generate_sync_entry_for_groups( + self, sync_result_builder: "SyncResultBuilder" + ) -> None: user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token @@ -1052,27 +1099,22 @@ class SyncHandler(object): @measure_func("_generate_sync_entry_for_device_list") async def _generate_sync_entry_for_device_list( self, - sync_result_builder, - newly_joined_rooms, - newly_joined_or_invited_users, - newly_left_rooms, - newly_left_users, - ): + sync_result_builder: "SyncResultBuilder", + newly_joined_rooms: Set[str], + newly_joined_or_invited_users: Set[str], + newly_left_rooms: Set[str], + newly_left_users: Set[str], + ) -> DeviceLists: """Generate the DeviceLists section of sync Args: - sync_result_builder (SyncResultBuilder) - newly_joined_rooms (set[str]): Set of rooms user has joined since + sync_result_builder + newly_joined_rooms: Set of rooms user has joined since previous sync + newly_joined_or_invited_users: Set of users that have joined or + been invited to a room since previous sync. + newly_left_rooms: Set of rooms user has left since previous sync + newly_left_users: Set of users that have left a room we're in since previous sync - newly_joined_or_invited_users (set[str]): Set of users that have - joined or been invited to a room since previous sync. - newly_left_rooms (set[str]): Set of rooms user has left since - previous sync - newly_left_users (set[str]): Set of users that have left a room - we're in since previous sync - - Returns: - Deferred[DeviceLists] """ user_id = sync_result_builder.sync_config.user.to_string() @@ -1133,15 +1175,11 @@ class SyncHandler(object): else: return DeviceLists(changed=[], left=[]) - async def _generate_sync_entry_for_to_device(self, sync_result_builder): + async def _generate_sync_entry_for_to_device( + self, sync_result_builder: "SyncResultBuilder" + ) -> None: """Generates the portion of the sync response. Populates `sync_result_builder` with the result. - - Args: - sync_result_builder(SyncResultBuilder) - - Returns: - Deferred(dict): A dictionary containing the per room account data. """ user_id = sync_result_builder.sync_config.user.to_string() device_id = sync_result_builder.sync_config.device_id @@ -1179,15 +1217,17 @@ class SyncHandler(object): else: sync_result_builder.to_device = [] - async def _generate_sync_entry_for_account_data(self, sync_result_builder): + async def _generate_sync_entry_for_account_data( + self, sync_result_builder: "SyncResultBuilder" + ) -> Dict[str, Dict[str, JsonDict]]: """Generates the account data portion of the sync response. Populates `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) + sync_result_builder Returns: - Deferred(dict): A dictionary containing the per room account data. + A dictionary containing the per room account data. """ sync_config = sync_result_builder.sync_config user_id = sync_result_builder.sync_config.user.to_string() @@ -1231,18 +1271,21 @@ class SyncHandler(object): return account_data_by_room async def _generate_sync_entry_for_presence( - self, sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users - ): + self, + sync_result_builder: "SyncResultBuilder", + newly_joined_rooms: Set[str], + newly_joined_or_invited_users: Set[str], + ) -> None: """Generates the presence portion of the sync response. Populates the `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) - newly_joined_rooms(list): List of rooms that the user has joined - since the last sync (or empty if an initial sync) - newly_joined_or_invited_users(list): List of users that have joined - or been invited to rooms since the last sync (or empty if an initial - sync) + sync_result_builder + newly_joined_rooms: Set of rooms that the user has joined since + the last sync (or empty if an initial sync) + newly_joined_or_invited_users: Set of users that have joined or + been invited to rooms since the last sync (or empty if an + initial sync) """ now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config @@ -1286,17 +1329,19 @@ class SyncHandler(object): sync_result_builder.presence = presence async def _generate_sync_entry_for_rooms( - self, sync_result_builder, account_data_by_room - ): + self, + sync_result_builder: "SyncResultBuilder", + account_data_by_room: Dict[str, Dict[str, JsonDict]], + ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) - account_data_by_room(dict): Dictionary of per room account data + sync_result_builder + account_data_by_room: Dictionary of per room account data Returns: - Deferred(tuple): Returns a 4-tuple of + Returns a 4-tuple of `(newly_joined_rooms, newly_joined_or_invited_users, newly_left_rooms, newly_left_users)` """ @@ -1307,7 +1352,7 @@ class SyncHandler(object): ) if block_all_room_ephemeral: - ephemeral_by_room = {} + ephemeral_by_room = {} # type: Dict[str, List[JsonDict]] else: now_token, ephemeral_by_room = await self.ephemeral_by_room( sync_result_builder, @@ -1328,7 +1373,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - return [], [], [], [] + return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1340,19 +1385,22 @@ class SyncHandler(object): ignored_users = frozenset() if since_token: - res = await self._get_rooms_changed(sync_result_builder, ignored_users) - room_entries, invited, newly_joined_rooms, newly_left_rooms = res - + room_changes = await self._get_rooms_changed( + sync_result_builder, ignored_users + ) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: - res = await self._get_all_rooms(sync_result_builder, ignored_users) - room_entries, invited, newly_joined_rooms = res - newly_left_rooms = [] + room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) tags_by_room = await self.store.get_tags_for_user(user_id) + room_entries = room_changes.room_entries + invited = room_changes.invited + newly_joined_rooms = room_changes.newly_joined_rooms + newly_left_rooms = room_changes.newly_left_rooms + def handle_room_entries(room_entry): return self._generate_room_entry( sync_result_builder, @@ -1392,13 +1440,15 @@ class SyncHandler(object): newly_left_users -= newly_joined_or_invited_users return ( - newly_joined_rooms, + set(newly_joined_rooms), newly_joined_or_invited_users, - newly_left_rooms, + set(newly_left_rooms), newly_left_users, ) - async def _have_rooms_changed(self, sync_result_builder): + async def _have_rooms_changed( + self, sync_result_builder: "SyncResultBuilder" + ) -> bool: """Returns whether there may be any new events that should be sent down the sync. Returns True if there are. """ @@ -1422,22 +1472,10 @@ class SyncHandler(object): return True return False - async def _get_rooms_changed(self, sync_result_builder, ignored_users): + async def _get_rooms_changed( + self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + ) -> _RoomChanges: """Gets the the changes that have happened since the last sync. - - Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. - - Returns: - Deferred(tuple): Returns a tuple of the form: - `(room_entries, invited_rooms, newly_joined_rooms, newly_left_rooms)` - - where: - room_entries is a list [RoomSyncResultBuilder] - invited_rooms is a list [InvitedSyncResult] - newly_joined_rooms is a list[str] of room ids - newly_left_rooms is a list[str] of room ids """ user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token @@ -1451,7 +1489,7 @@ class SyncHandler(object): user_id, since_token.room_key, now_token.room_key ) - mem_change_events_by_room_id = {} + mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]] for event in rooms_changed: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) @@ -1570,7 +1608,7 @@ class SyncHandler(object): # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): - batch_events = [leave_event] + batch_events = [leave_event] # type: Optional[List[EventBase]] else: batch_events = None @@ -1636,18 +1674,17 @@ class SyncHandler(object): ) room_entries.append(entry) - return room_entries, invited, newly_joined_rooms, newly_left_rooms + return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms) - async def _get_all_rooms(self, sync_result_builder, ignored_users): + async def _get_all_rooms( + self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + ) -> _RoomChanges: """Returns entries for all rooms for the user. Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. + sync_result_builder + ignored_users: Set of users ignored by user. - Returns: - Deferred(tuple): Returns a tuple of the form: - `([RoomSyncResultBuilder], [InvitedSyncResult], [])` """ user_id = sync_result_builder.sync_config.user.to_string() @@ -1709,30 +1746,30 @@ class SyncHandler(object): ) ) - return room_entries, invited, [] + return _RoomChanges(room_entries, invited, [], []) async def _generate_room_entry( self, - sync_result_builder, - ignored_users, - room_builder, - ephemeral, - tags, - account_data, - always_include=False, + sync_result_builder: "SyncResultBuilder", + ignored_users: Set[str], + room_builder: "RoomSyncResultBuilder", + ephemeral: List[JsonDict], + tags: Optional[List[JsonDict]], + account_data: Dict[str, JsonDict], + always_include: bool = False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. - room_builder(RoomSyncResultBuilder) - ephemeral(list): List of new ephemeral events for room - tags(list): List of *all* tags for room, or None if there has been + sync_result_builder + ignored_users: Set of users ignored by user. + room_builder + ephemeral: List of new ephemeral events for room + tags: List of *all* tags for room, or None if there has been no change. - account_data(list): List of new account data for room - always_include(bool): Always include this room in the sync response, + account_data: List of new account data for room + always_include: Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined @@ -1758,7 +1795,7 @@ class SyncHandler(object): sync_config, now_token=upto_token, since_token=since_token, - recents=events, + potential_recents=events, newly_joined_room=newly_joined, ) @@ -1809,7 +1846,7 @@ class SyncHandler(object): room_id, batch, sync_config, since_token, now_token, full_state=full_state ) - summary = {} + summary = {} # type: Optional[JsonDict] # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form @@ -1833,7 +1870,7 @@ class SyncHandler(object): ) if room_builder.rtype == "joined": - unread_notifications = {} + unread_notifications = {} # type: Dict[str, str] room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -1860,18 +1897,20 @@ class SyncHandler(object): % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": - room_sync = ArchivedSyncResult( + archived_room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) - if room_sync or always_include: - sync_result_builder.archived.append(room_sync) + if archived_room_sync or always_include: + sync_result_builder.archived.append(archived_room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype) - async def get_rooms_for_user_at(self, user_id, stream_ordering): + async def get_rooms_for_user_at( + self, user_id: str, stream_ordering: int + ) -> FrozenSet[str]: """Get set of joined rooms for a user at the given stream ordering. The stream ordering *must* be recent, otherwise this may throw an @@ -1879,12 +1918,11 @@ class SyncHandler(object): current token, which should be perfectly fine). Args: - user_id (str) - stream_ordering (int) + user_id + stream_ordering ReturnValue: - Deferred[frozenset[str]]: Set of room_ids the user is in at given - stream_ordering. + Set of room_ids the user is in at given stream_ordering. """ joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id) @@ -1911,11 +1949,10 @@ class SyncHandler(object): if user_id in users_in_room: joined_room_ids.add(room_id) - joined_room_ids = frozenset(joined_room_ids) - return joined_room_ids + return frozenset(joined_room_ids) -def _action_has_highlight(actions): +def _action_has_highlight(actions: List[JsonDict]) -> bool: for action in actions: try: if action.get("set_tweak", None) == "highlight": @@ -1927,22 +1964,23 @@ def _action_has_highlight(actions): def _calculate_state( - timeline_contains, timeline_start, previous, current, lazy_load_members -): + timeline_contains: StateMap[str], + timeline_start: StateMap[str], + previous: StateMap[str], + current: StateMap[str], + lazy_load_members: bool, +) -> StateMap[str]: """Works out what state to include in a sync response. Args: - timeline_contains (dict): state in the timeline - timeline_start (dict): state at the start of the timeline - previous (dict): state at the end of the previous sync (or empty dict + timeline_contains: state in the timeline + timeline_start: state at the start of the timeline + previous: state at the end of the previous sync (or empty dict if this is an initial sync) - current (dict): state at the end of the timeline - lazy_load_members (bool): whether to return members from timeline_start + current: state at the end of the timeline + lazy_load_members: whether to return members from timeline_start or not. assumes that timeline_start has already been filtered to include only the members the client needs to know about. - - Returns: - dict """ event_id_to_key = { e: key @@ -1979,15 +2017,16 @@ def _calculate_state( return {event_id_to_key[e]: e for e in state_ids} -class SyncResultBuilder(object): +@attr.s +class SyncResultBuilder: """Used to help build up a new SyncResult for a user Attributes: - sync_config (SyncConfig) - full_state (bool) - since_token (StreamToken) - now_token (StreamToken) - joined_room_ids (list[str]) + sync_config + full_state: The full_state flag as specified by user + since_token: The token supplied by user, or None. + now_token: The token to sync up to. + joined_room_ids: List of rooms the user is joined to # The following mirror the fields in a sync response presence (list) @@ -1995,61 +2034,45 @@ class SyncResultBuilder(object): joined (list[JoinedSyncResult]) invited (list[InvitedSyncResult]) archived (list[ArchivedSyncResult]) - device (list) groups (GroupsSyncResult|None) to_device (list) """ - def __init__( - self, sync_config, full_state, since_token, now_token, joined_room_ids - ): - """ - Args: - sync_config (SyncConfig) - full_state (bool): The full_state flag as specified by user - since_token (StreamToken): The token supplied by user, or None. - now_token (StreamToken): The token to sync up to. - joined_room_ids (list[str]): List of rooms the user is joined to - """ - self.sync_config = sync_config - self.full_state = full_state - self.since_token = since_token - self.now_token = now_token - self.joined_room_ids = joined_room_ids + sync_config = attr.ib(type=SyncConfig) + full_state = attr.ib(type=bool) + since_token = attr.ib(type=Optional[StreamToken]) + now_token = attr.ib(type=StreamToken) + joined_room_ids = attr.ib(type=FrozenSet[str]) - self.presence = [] - self.account_data = [] - self.joined = [] - self.invited = [] - self.archived = [] - self.device = [] - self.groups = None - self.to_device = [] + presence = attr.ib(type=List[JsonDict], default=attr.Factory(list)) + account_data = attr.ib(type=List[JsonDict], default=attr.Factory(list)) + joined = attr.ib(type=List[JoinedSyncResult], default=attr.Factory(list)) + invited = attr.ib(type=List[InvitedSyncResult], default=attr.Factory(list)) + archived = attr.ib(type=List[ArchivedSyncResult], default=attr.Factory(list)) + groups = attr.ib(type=Optional[GroupsSyncResult], default=None) + to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list)) +@attr.s class RoomSyncResultBuilder(object): """Stores information needed to create either a `JoinedSyncResult` or `ArchivedSyncResult`. + + Attributes: + room_id + rtype: One of `"joined"` or `"archived"` + events: List of events to include in the room (more events may be added + when generating result). + newly_joined: If the user has newly joined the room + full_state: Whether the full state should be sent in result + since_token: Earliest point to return events from, or None + upto_token: Latest point to return events from. """ - def __init__( - self, room_id, rtype, events, newly_joined, full_state, since_token, upto_token - ): - """ - Args: - room_id(str) - rtype(str): One of `"joined"` or `"archived"` - events(list[FrozenEvent]): List of events to include in the room - (more events may be added when generating result). - newly_joined(bool): If the user has newly joined the room - full_state(bool): Whether the full state should be sent in result - since_token(StreamToken): Earliest point to return events from, or None - upto_token(StreamToken): Latest point to return events from. - """ - self.room_id = room_id - self.rtype = rtype - self.events = events - self.newly_joined = newly_joined - self.full_state = full_state - self.since_token = since_token - self.upto_token = upto_token + room_id = attr.ib(type=str) + rtype = attr.ib(type=str) + events = attr.ib(type=Optional[List[EventBase]]) + newly_joined = attr.ib(type=bool) + full_state = attr.ib(type=bool) + since_token = attr.ib(type=Optional[StreamToken]) + upto_token = attr.ib(type=StreamToken) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index feb1c07cb2..b9ee6ec1ec 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -238,8 +238,11 @@ class RedactionTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def build(self, prev_event_ids): built_event = yield self._base_builder.build(prev_event_ids) - built_event.event_id = self._event_id + + built_event._event_id = self._event_id built_event._event_dict["event_id"] = self._event_id + assert built_event.event_id == self._event_id + return built_event @property diff --git a/tox.ini b/tox.ini index 88ef12bebd..ef22368cf1 100644 --- a/tox.ini +++ b/tox.ini @@ -180,6 +180,7 @@ commands = mypy \ synapse/api \ synapse/config/ \ synapse/federation/transport \ + synapse/handlers/sync.py \ synapse/handlers/ui_auth \ synapse/logging/ \ synapse/module_api \ From e81c093974faeb8d39dcedcbda7c2e4f5683091f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 19:15:08 +0000 Subject: [PATCH 028/382] make FederationHandler.on_get_missing_events async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c94573b547..ea2f6a91d7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2121,24 +2121,23 @@ class FederationHandler(BaseHandler): return ret - @defer.inlineCallbacks - def on_get_missing_events( + async def on_get_missing_events( self, origin, room_id, earliest_events, latest_events, limit ): - in_room = yield self.auth.check_host_in_room(room_id, origin) + in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") limit = min(limit, 20) - missing_events = yield self.store.get_missing_events( + missing_events = await self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, ) - missing_events = yield filter_events_for_server( + missing_events = await filter_events_for_server( self.storage, origin, missing_events ) From 5d17c3159618b6e75bb58ba68a77a73572a85688 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 22:28:11 +0000 Subject: [PATCH 029/382] make FederationHandler.send_invite async --- synapse/handlers/federation.py | 5 ++--- synapse/handlers/message.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea2f6a91d7..5728ea2ee7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1184,13 +1184,12 @@ class FederationHandler(BaseHandler): ) raise SynapseError(http_client.BAD_REQUEST, "Too many auth_events") - @defer.inlineCallbacks - def send_invite(self, target_host, event): + async def send_invite(self, target_host, event): """ Sends the invite to the remote server for signing. Invites must be signed by the invitee's server before distribution. """ - pdu = yield self.federation_client.send_invite( + pdu = await self.federation_client.send_invite( destination=target_host, room_id=event.room_id, event_id=event.event_id, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index bdf16c84d3..be6ae18a92 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -932,10 +932,9 @@ class EventCreationHandler(object): # way? If we have been invited by a remote server, we need # to get them to sign the event. - returned_invite = yield federation_handler.send_invite( - invitee.domain, event + returned_invite = yield defer.ensureDeferred( + federation_handler.send_invite(invitee.domain, event) ) - event.unsigned.pop("room_state", None) # TODO: Make sure the signatures actually are correct. From 0536d0c9beb15fce5f2ca24b4565f79f3140943f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:35:40 +0000 Subject: [PATCH 030/382] make FederationClient.backfill async --- synapse/federation/federation_client.py | 26 +++++++++++-------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f99d17a7de..297292f389 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable +from typing import Dict, Iterable, List, Tuple from prometheus_client import Counter @@ -37,7 +37,7 @@ from synapse.api.room_versions import ( EventFormatVersions, RoomVersions, ) -from synapse.events import builder, room_version_to_event_format +from synapse.events import EventBase, builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function @@ -170,21 +170,17 @@ class FederationClient(FederationBase): sent_queries_counter.labels("client_one_time_keys").inc() return self.transport_layer.claim_client_keys(destination, content, timeout) - @defer.inlineCallbacks - @log_function - def backfill(self, dest, room_id, limit, extremities): - """Requests some more historic PDUs for the given context from the + async def backfill( + self, dest: str, room_id: str, limit: int, extremities: Iterable[str] + ) -> List[EventBase]: + """Requests some more historic PDUs for the given room from the given destination server. Args: dest (str): The remote homeserver to ask. room_id (str): The room_id to backfill. - limit (int): The maximum number of PDUs to return. - extremities (list): List of PDU id and origins of the first pdus - we have seen from the context - - Returns: - Deferred: Results in the received PDUs. + limit (int): The maximum number of events to return. + extremities (list): our current backwards extremities, to backfill from """ logger.debug("backfill extrem=%s", extremities) @@ -192,13 +188,13 @@ class FederationClient(FederationBase): if not extremities: return - transaction_data = yield self.transport_layer.backfill( + transaction_data = await self.transport_layer.backfill( dest, room_id, extremities, limit ) logger.debug("backfill transaction_data=%r", transaction_data) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdus = [ @@ -207,7 +203,7 @@ class FederationClient(FederationBase): ] # FIXME: We should handle signature failures more gracefully. - pdus[:] = yield make_deferred_yieldable( + pdus[:] = await make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True ).addErrback(unwrapFirstError) From 0cb0c7bcd529b0dda92aff0dae6277dfcb6f6e29 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:41:54 +0000 Subject: [PATCH 031/382] make FederationClient.get_pdu async --- synapse/federation/federation_client.py | 32 +++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 297292f389..b9c8d8e325 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable, List, Tuple +from typing import Dict, Iterable, List, Optional, Tuple from prometheus_client import Counter @@ -211,11 +211,14 @@ class FederationClient(FederationBase): return pdus - @defer.inlineCallbacks - @log_function - def get_pdu( - self, destinations, event_id, room_version, outlier=False, timeout=None - ): + async def get_pdu( + self, + destinations: Iterable[str], + event_id: str, + room_version: str, + outlier: bool = False, + timeout: Optional[int] = None, + ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home servers. @@ -223,18 +226,17 @@ class FederationClient(FederationBase): one succeeds. Args: - destinations (list): Which homeservers to query - event_id (str): event to fetch - room_version (str): version of the room - outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if + destinations: Which homeservers to query + event_id: event to fetch + room_version: version of the room + outlier: Indicates whether the PDU is an `outlier`, i.e. if it's from an arbitary point in the context as opposed to part of the current block of PDUs. Defaults to `False` - timeout (int): How long to try (in ms) each destination for before + timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. Returns: - Deferred: Results in the requested PDU, or None if we were unable to find - it. + The requested PDU, or None if we were unable to find it. """ # TODO: Rate limit the number of times we try and get the same event. @@ -255,7 +257,7 @@ class FederationClient(FederationBase): continue try: - transaction_data = yield self.transport_layer.get_event( + transaction_data = await self.transport_layer.get_event( destination, event_id, timeout=timeout ) @@ -275,7 +277,7 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = yield self._check_sigs_and_hash(room_version, pdu) + signed_pdu = await self._check_sigs_and_hash(room_version, pdu) break From d73683c363a9177951b8078dd1628c1ade8de508 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:42:52 +0000 Subject: [PATCH 032/382] make FederationClient.get_room_state_ids async --- synapse/federation/federation_client.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b9c8d8e325..c3e556e7d9 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -307,15 +307,16 @@ class FederationClient(FederationBase): return signed_pdu - @defer.inlineCallbacks - def get_room_state_ids(self, destination: str, room_id: str, event_id: str): + async def get_room_state_ids( + self, destination: str, room_id: str, event_id: str + ) -> Tuple[List[str], List[str]]: """Calls the /state_ids endpoint to fetch the state at a particular point in the room, and the auth events for the given event Returns: - Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids) + a tuple of (state event_ids, auth event_ids) """ - result = yield self.transport_layer.get_room_state_ids( + result = await self.transport_layer.get_room_state_ids( destination, room_id, event_id=event_id ) From 24d814ca238c40fef61c7826d1a9d9f9dd3a144d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:43:40 +0000 Subject: [PATCH 033/382] make FederationClient.get_event_auth async --- synapse/federation/federation_client.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c3e556e7d9..1da33a7a64 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -330,19 +330,17 @@ class FederationClient(FederationBase): return state_event_ids, auth_event_ids - @defer.inlineCallbacks - @log_function - def get_event_auth(self, destination, room_id, event_id): - res = yield self.transport_layer.get_event_auth(destination, room_id, event_id) + async def get_event_auth(self, destination, room_id, event_id): + res = await self.transport_layer.get_event_auth(destination, room_id, event_id) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) auth_chain = [ event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"] ] - signed_auth = yield self._check_sigs_and_hash_and_fetch( + signed_auth = await self._check_sigs_and_hash_and_fetch( destination, auth_chain, outlier=True, room_version=room_version ) From 3f11cbb40494f0ac9622a5b686d3cf8379a44b5d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:51:26 +0000 Subject: [PATCH 034/382] make FederationClient.make_membership_event async --- synapse/federation/federation_client.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 1da33a7a64..b69aac9041 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, + RoomVersion, RoomVersions, ) from synapse.events import EventBase, builder, room_version_to_event_format @@ -404,7 +405,7 @@ class FederationClient(FederationBase): raise SynapseError(502, "Failed to %s via any server" % (description,)) - def make_membership_event( + async def make_membership_event( self, destinations: Iterable[str], room_id: str, @@ -412,7 +413,7 @@ class FederationClient(FederationBase): membership: str, content: dict, params: Dict[str, str], - ): + ) -> Tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. @@ -433,19 +434,19 @@ class FederationClient(FederationBase): content: Any additional data to put into the content field of the event. params: Query parameters to include in the request. - Return: - Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of + + Returns: `(origin, event, room_version)` where origin is the remote homeserver which generated the event, and room_version is the version of the room. - Fails with a `UnsupportedRoomVersionError` if remote responds with - a room version we don't understand. + Raises: + UnsupportedRoomVersionError: if remote responds with + a room version we don't understand. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. + SynapseError: if the chosen remote server returns a 300/400 code. - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError: if no servers were reachable. """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: @@ -491,7 +492,7 @@ class FederationClient(FederationBase): return (destination, ev, room_version) - return self._try_destination_list( + return await self._try_destination_list( "make_" + membership, destinations, send_request ) From 8af9f11bea8d36188d7f8ff65f227b3cfdf9fa17 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:55:00 +0000 Subject: [PATCH 035/382] make FederationClient.send_join async --- synapse/federation/federation_client.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b69aac9041..c98b276805 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Optional, Tuple from prometheus_client import Counter @@ -496,27 +496,29 @@ class FederationClient(FederationBase): "make_" + membership, destinations, send_request ) - def send_join(self, destinations, pdu, event_format_version): + async def send_join( + self, destinations: Iterable[str], pdu: EventBase, event_format_version: int + ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, and send the event out to the rest of the federation. Args: - destinations (str): Candidate homeservers which are probably + destinations: Candidate homeservers which are probably participating in the room. - pdu (BaseEvent): event to be sent - event_format_version (int): The event format version + pdu: event to be sent + event_format_version: The event format version - Return: - Deferred: resolves to a dict with members ``origin`` (a string + Returns: + a dict with members ``origin`` (a string giving the serer the event was sent to, ``state`` (?) and ``auth_chain``. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. + Raises: + SynapseError: if the chosen remote server returns a 300/400 code. - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError: if no servers were reachable. """ def check_authchain_validity(signed_auth_chain): @@ -603,7 +605,7 @@ class FederationClient(FederationBase): "origin": destination, } - return self._try_destination_list("send_join", destinations, send_request) + return await self._try_destination_list("send_join", destinations, send_request) @defer.inlineCallbacks def _do_send_join(self, destination, pdu): From a46fabf17bca51eb3a73feb5b3de4072030033e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:55:11 +0000 Subject: [PATCH 036/382] make FederationClient.send_leave async --- synapse/federation/federation_client.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c98b276805..8d33d27137 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -730,7 +730,7 @@ class FederationClient(FederationBase): ) return content - def send_leave(self, destinations, pdu): + async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None: """Sends a leave event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, @@ -739,17 +739,14 @@ class FederationClient(FederationBase): This is mostly useful to reject received invites. Args: - destinations (str): Candidate homeservers which are probably + destinations: Candidate homeservers which are probably participating in the room. - pdu (BaseEvent): event to be sent + pdu: event to be sent - Return: - Deferred: resolves to None. + Raises: + SynapseError if the chosen remote server returns a 300/400 code. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. - - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError if no servers were reachable. """ @defer.inlineCallbacks @@ -759,7 +756,9 @@ class FederationClient(FederationBase): logger.debug("Got content: %s", content) return None - return self._try_destination_list("send_leave", destinations, send_request) + return await self._try_destination_list( + "send_leave", destinations, send_request + ) @defer.inlineCallbacks def _do_send_leave(self, destination, pdu): From 1330c311b79bc4a9b4d3349e72a2353bb54dcd90 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:59:10 +0000 Subject: [PATCH 037/382] make FederationClient._try_destination_list async --- synapse/federation/federation_client.py | 36 ++++++++++++++++++------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8d33d27137..11802dad0f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,17 @@ import copy import itertools import logging -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + TypeVar, +) from prometheus_client import Counter @@ -53,6 +63,8 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t PDU_RETRY_TIME_MS = 1 * 60 * 1000 +T = TypeVar("T") + class InvalidResponseError(RuntimeError): """Helper for _try_destination_list: indicates that the server returned a response @@ -349,17 +361,21 @@ class FederationClient(FederationBase): return signed_auth - @defer.inlineCallbacks - def _try_destination_list(self, description, destinations, callback): + async def _try_destination_list( + self, + description: str, + destinations: Iterable[str], + callback: Callable[[str], Awaitable[T]], + ) -> T: """Try an operation on a series of servers, until it succeeds Args: - description (unicode): description of the operation we're doing, for logging + description: description of the operation we're doing, for logging - destinations (Iterable[unicode]): list of server_names to try + destinations: list of server_names to try - callback (callable): Function to run for each server. Passed a single - argument: the server_name to try. May return a deferred. + callback: Function to run for each server. Passed a single + argument: the server_name to try. If the callback raises a CodeMessageException with a 300/400 code, attempts to perform the operation stop immediately and the exception is @@ -370,7 +386,7 @@ class FederationClient(FederationBase): suppressed if the exception is an InvalidResponseError. Returns: - The [Deferred] result of callback, if it succeeds + The result of callback, if it succeeds Raises: SynapseError if the chosen remote server returns a 300/400 code, or @@ -381,7 +397,7 @@ class FederationClient(FederationBase): continue try: - res = yield callback(destination) + res = await callback(destination) return res except InvalidResponseError as e: logger.warning("Failed to %s via %s: %s", description, destination, e) @@ -400,7 +416,7 @@ class FederationClient(FederationBase): ) except Exception: logger.warning( - "Failed to %s via %s", description, destination, exc_info=1 + "Failed to %s via %s", description, destination, exc_info=True ) raise SynapseError(502, "Failed to %s via any server" % (description,)) From ad09ee92622e0d37502fb49336f1e1474af458df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:07:13 +0000 Subject: [PATCH 038/382] make FederationClient.make_membership_event.send_request async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 11802dad0f..b59d08c4ae 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -471,9 +471,8 @@ class FederationClient(FederationBase): % (membership, ",".join(valid_memberships)) ) - @defer.inlineCallbacks - def send_request(destination): - ret = yield self.transport_layer.make_membership_event( + async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: + ret = await self.transport_layer.make_membership_event( destination, room_id, user_id, membership, params ) @@ -506,7 +505,7 @@ class FederationClient(FederationBase): event_dict=pdu_dict, ) - return (destination, ev, room_version) + return destination, ev, room_version return await self._try_destination_list( "make_" + membership, destinations, send_request From 3960527c2e1d9bbeb2f0f7b6218de06cd32bdcb4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:07:38 +0000 Subject: [PATCH 039/382] make FederationClient.send_join.send_request async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b59d08c4ae..8ca36c4d32 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -553,9 +553,8 @@ class FederationClient(FederationBase): "room appears to have unsupported version %s" % (room_version,) ) - @defer.inlineCallbacks - def send_request(destination): - content = yield self._do_send_join(destination, pdu) + async def send_request(destination) -> Dict[str, Any]: + content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) @@ -584,7 +583,7 @@ class FederationClient(FederationBase): # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") - valid_pdus = yield self._check_sigs_and_hash_and_fetch( + valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, From 638001116de3e472d916f8e809e15c658bab282a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:08:24 +0000 Subject: [PATCH 040/382] make FederationClient._do_send_join async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8ca36c4d32..b4609b78cb 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -621,12 +621,11 @@ class FederationClient(FederationBase): return await self._try_destination_list("send_join", destinations, send_request) - @defer.inlineCallbacks - def _do_send_join(self, destination, pdu): + async def _do_send_join(self, destination: str, pdu: EventBase): time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_join_v2( + content = await self.transport_layer.send_join_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -648,7 +647,7 @@ class FederationClient(FederationBase): logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") - resp = yield self.transport_layer.send_join_v1( + resp = await self.transport_layer.send_join_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From e88b90aaebd19822e310b708696036dc0b1f17f6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:08:51 +0000 Subject: [PATCH 041/382] make FederationClient.send_leave.send_request async --- synapse/federation/federation_client.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b4609b78cb..f98c36039d 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -762,12 +762,9 @@ class FederationClient(FederationBase): RuntimeError if no servers were reachable. """ - @defer.inlineCallbacks - def send_request(destination): - content = yield self._do_send_leave(destination, pdu) - + async def send_request(destination: str) -> None: + content = await self._do_send_leave(destination, pdu) logger.debug("Got content: %s", content) - return None return await self._try_destination_list( "send_leave", destinations, send_request From abadf44eb2c32c9a6f1da84239043755f854b327 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:09:07 +0000 Subject: [PATCH 042/382] make FederationClient._do_send_leave async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f98c36039d..5043220d14 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -770,12 +770,11 @@ class FederationClient(FederationBase): "send_leave", destinations, send_request ) - @defer.inlineCallbacks - def _do_send_leave(self, destination, pdu): + async def _do_send_leave(self, destination, pdu): time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_leave_v2( + content = await self.transport_layer.send_leave_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -797,7 +796,7 @@ class FederationClient(FederationBase): logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") - resp = yield self.transport_layer.send_leave_v1( + resp = await self.transport_layer.send_leave_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From 6deeefb68cbd2bc54d8e108b16e6427454644019 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:14:30 +0000 Subject: [PATCH 043/382] make FederationClient.get_missing_events async --- synapse/federation/federation_client.py | 40 ++++++++++++------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5043220d14..1e9b207a24 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -25,6 +25,7 @@ from typing import ( Iterable, List, Optional, + Sequence, Tuple, TypeVar, ) @@ -828,34 +829,33 @@ class FederationClient(FederationBase): third_party_instance_id=third_party_instance_id, ) - @defer.inlineCallbacks - def get_missing_events( + async def get_missing_events( self, - destination, - room_id, - earliest_events_ids, - latest_events, - limit, - min_depth, - timeout, - ): + destination: str, + room_id: str, + earliest_events_ids: Sequence[str], + latest_events: Iterable[EventBase], + limit: int, + min_depth: int, + timeout: int, + ) -> List[EventBase]: """Tries to fetch events we are missing. This is called when we receive an event without having received all of its ancestors. Args: - destination (str) - room_id (str) - earliest_events_ids (list): List of event ids. Effectively the + destination + room_id + earliest_events_ids: List of event ids. Effectively the events we expected to receive, but haven't. `get_missing_events` should only return events that didn't happen before these. - latest_events (list): List of events we have received that we don't + latest_events: List of events we have received that we don't have all previous events for. - limit (int): Maximum number of events to return. - min_depth (int): Minimum depth of events tor return. - timeout (int): Max time to wait in ms + limit: Maximum number of events to return. + min_depth: Minimum depth of events to return. + timeout: Max time to wait in ms """ try: - content = yield self.transport_layer.get_missing_events( + content = await self.transport_layer.get_missing_events( destination=destination, room_id=room_id, earliest_events=earliest_events_ids, @@ -865,14 +865,14 @@ class FederationClient(FederationBase): timeout=timeout, ) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) events = [ event_from_pdu_json(e, format_ver) for e in content.get("events", []) ] - signed_events = yield self._check_sigs_and_hash_and_fetch( + signed_events = await self._check_sigs_and_hash_and_fetch( destination, events, outlier=False, room_version=room_version ) except HttpResponseException as e: From 4b4536dd02cc1128335383b6cc36afc1f0f6d71c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:15:08 +0000 Subject: [PATCH 044/382] newsfile --- changelog.d/6840.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6840.misc diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc new file mode 100644 index 0000000000..0496f12de8 --- /dev/null +++ b/changelog.d/6840.misc @@ -0,0 +1 @@ +Port much of `synapse.handlers.federation` to async/await. From ea23210b2dcc75489d9369b13636a56b7449d765 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 22:29:49 +0000 Subject: [PATCH 045/382] make FederationClient.send_invite async --- synapse/federation/federation_client.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 1e9b207a24..51f9b1d8d7 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -659,23 +659,22 @@ class FederationClient(FederationBase): # content. return resp[1] - @defer.inlineCallbacks - def send_invite(self, destination, room_id, event_id, pdu): - room_version = yield self.store.get_room_version_id(room_id) + async def send_invite(self, destination, room_id, event_id, pdu): + room_version = await self.store.get_room_version_id(room_id) - content = yield self._do_send_invite(destination, pdu, room_version) + content = await self._do_send_invite(destination, pdu, room_version) pdu_dict = content["event"] logger.debug("Got response to send_invite: %s", pdu_dict) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdu = event_from_pdu_json(pdu_dict, format_ver) # Check signatures are correct. - pdu = yield self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) # FIXME: We should handle signature failures more gracefully. From 23d8a55c7af93d562b7e7f24b0d195fced692123 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 00:10:21 -0500 Subject: [PATCH 046/382] add device signatures to device key query results --- synapse/storage/data_stores/main/devices.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index ea0503476f..b7617efb80 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -320,6 +320,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) else: result["deleted"] = True @@ -524,6 +529,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) results.append(result) From 245ee142209f634a51942681bb141e617e1ecd55 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 00:21:07 -0500 Subject: [PATCH 047/382] add changelog --- changelog.d/6844.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6844.bugfix diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix new file mode 100644 index 0000000000..e84aa1029f --- /dev/null +++ b/changelog.d/6844.bugfix @@ -0,0 +1 @@ +Fix an issue with cross-signing where device signatures were not sent to remote servers. From c87572d6e426099fa36e2cd8260319531ec0fbb8 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 4 Feb 2020 16:21:09 +0000 Subject: [PATCH 048/382] Update CONTRIBUTING.md about merging PRs. (#6846) --- CONTRIBUTING.md | 14 ++++++++++++++ changelog.d/6846.doc | 1 + 2 files changed, 15 insertions(+) create mode 100644 changelog.d/6846.doc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5736ede6c4..4b01b6ac8c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs. +## Merge Strategy + +We use the commit history of develop/master extensively to identify +when regressions were introduced and what changes have been made. + +We aim to have a clean merge history, which means we normally squash-merge +changes into develop. For small changes this means there is no need to rebase +to clean up your PR before merging. Larger changes with an organised set of +commits may be merged as-is, if the history is judged to be useful. + +This use of squash-merging will mean PRs built on each other will be hard to +merge. We suggest avoiding these where possible, and if required, ensuring +each PR has a tidy set of commits to ease merging. + ## Conclusion That's it! Matrix is a very open and collaborative project as you might expect diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc new file mode 100644 index 0000000000..ad69d608c0 --- /dev/null +++ b/changelog.d/6846.doc @@ -0,0 +1 @@ +Add details of PR merge strategy to contributing docs. \ No newline at end of file From 6475382d807e1fed095d1e3fbd04884799ebd612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Feb 2020 17:25:54 +0000 Subject: [PATCH 049/382] Fix detecting unknown devices from remote encrypted events. (#6848) We were looking at the wrong event type (`m.room.encryption` vs `m.room.encrypted`). Also fixup the duplicate `EvenTypes` entries. Introduced in #6776. --- changelog.d/6848.bugfix | 1 + synapse/api/constants.py | 3 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/storage/data_stores/main/stats.py | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6848.bugfix diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix new file mode 100644 index 0000000000..65688e5d57 --- /dev/null +++ b/changelog.d/6848.bugfix @@ -0,0 +1 @@ +Fix detecting unknown devices from remote encrypted events. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 0ade47e624..cc8577552b 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -77,12 +77,11 @@ class EventTypes(object): Aliases = "m.room.aliases" Redaction = "m.room.redaction" ThirdPartyInvite = "m.room.third_party_invite" - Encryption = "m.room.encryption" RelatedGroups = "m.room.related_groups" RoomHistoryVisibility = "m.room.history_visibility" CanonicalAlias = "m.room.canonical_alias" - Encryption = "m.room.encryption" + Encrypted = "m.room.encrypted" RoomAvatar = "m.room.avatar" RoomEncryption = "m.room.encryption" GuestAccess = "m.room.guest_access" diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86d3177e9..488200a2d1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -752,7 +752,7 @@ class FederationHandler(BaseHandler): # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. - if event.type == EventTypes.Encryption: + if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") if device_id is not None: cached_devices = await self.store.get_cached_devices_for_user( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1382399557..b609a65f47 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -360,7 +360,7 @@ class RoomCreationHandler(BaseHandler): (EventTypes.RoomHistoryVisibility, ""), (EventTypes.GuestAccess, ""), (EventTypes.RoomAvatar, ""), - (EventTypes.Encryption, ""), + (EventTypes.RoomEncryption, ""), (EventTypes.ServerACL, ""), (EventTypes.RelatedGroups, ""), (EventTypes.PowerLevels, ""), diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 7f7d56390e..68e6edace5 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -286,7 +286,7 @@ class StatsHandler(StateDeltasHandler): room_state["history_visibility"] = event_content.get( "history_visibility" ) - elif typ == EventTypes.Encryption: + elif typ == EventTypes.RoomEncryption: room_state["encryption"] = event_content.get("algorithm") elif typ == EventTypes.Name: room_state["name"] = event_content.get("name") diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py index 7bc186e9a1..7af1495e47 100644 --- a/synapse/storage/data_stores/main/stats.py +++ b/synapse/storage/data_stores/main/stats.py @@ -744,7 +744,7 @@ class StatsStore(StateDeltasStore): EventTypes.Create, EventTypes.JoinRules, EventTypes.RoomHistoryVisibility, - EventTypes.Encryption, + EventTypes.RoomEncryption, EventTypes.Name, EventTypes.Topic, EventTypes.RoomAvatar, @@ -816,7 +816,7 @@ class StatsStore(StateDeltasStore): room_state["history_visibility"] = event.content.get( "history_visibility" ) - elif event.type == EventTypes.Encryption: + elif event.type == EventTypes.RoomEncryption: room_state["encryption"] = event.content.get("algorithm") elif event.type == EventTypes.Name: room_state["name"] = event.content.get("name") From d88e0ec0802b3b0a49853fb6b777a35b7c195ea6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Feb 2020 21:31:08 +0000 Subject: [PATCH 050/382] Database updates to populate rooms.room_version (#6847) We're going to need this so that we can figure out how to handle redactions when fetching events from the database. --- changelog.d/6847.misc | 1 + .../57/rooms_version_column_2.sql.postgres | 35 +++++++++++++++++++ .../57/rooms_version_column_2.sql.sqlite | 22 ++++++++++++ 3 files changed, 58 insertions(+) create mode 100644 changelog.d/6847.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc new file mode 100644 index 0000000000..094e911adb --- /dev/null +++ b/changelog.d/6847.misc @@ -0,0 +1 @@ +Populate `rooms.room_version` database column at startup, rather than in a background update. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres new file mode 100644 index 0000000000..c601cff6de --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres @@ -0,0 +1,35 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- when we first added the room_version column, it was populated via a background +-- update. We now need it to be populated before synapse starts, so we populate +-- any remaining rows with a NULL room version now. For servers which have completed +-- the background update, this will be pretty quick. + +-- the following query will set room_version to NULL if no create event is found for +-- the room in current_state_events, and will set it to '1' if a create event with no +-- room_version is found. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json::json->'content'->>'room_version','1') + FROM current_state_events cse INNER JOIN event_json ej USING (event_id) + WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key='' +) WHERE rooms.room_version IS NULL; + +-- we still allow the background update to complete: it has the useful side-effect of +-- populating `rooms` with any missing rooms (based on the current_state_events table). + +-- see also rooms_version_column_2.sql.sqlite which has a copy of the above query, using +-- sqlite syntax for the json extraction. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite new file mode 100644 index 0000000000..335c6f2074 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite @@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- see rooms_version_column_2.sql.postgres for details of what's going on here. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1') + FROM current_state_events cse INNER JOIN event_json ej USING (event_id) + WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key='' +) WHERE rooms.room_version IS NULL; From a831d2e4e3c424fb54f186bfa7d83a17965f933e Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 5 Feb 2020 08:57:38 +0000 Subject: [PATCH 051/382] Reduce performance logging to DEBUG (#6833) * Reduce tnx performance logging to DEBUG * Changelog.d --- changelog.d/6833.misc | 1 + synapse/storage/database.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6833.misc diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc new file mode 100644 index 0000000000..8a0605f90b --- /dev/null +++ b/changelog.d/6833.misc @@ -0,0 +1 @@ +Reducing log level to DEBUG for synapse.storage.TIME. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 1003dd84a5..3eeb2f7c04 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -343,7 +343,7 @@ class Database(object): top_three_counters = self._txn_perf_counters.interval(duration, limit=3) - perf_logger.info( + perf_logger.debug( "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters ) From 60d06724268891ad3b1e9dc6fe7cd080f9ba21b7 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 12:03:54 -0500 Subject: [PATCH 052/382] Merge pull request #6844 from matrix-org/uhoreg/cross_signing_fix_device_fed add device signatures to device key query results --- changelog.d/6844.bugfix | 1 + synapse/storage/data_stores/main/devices.py | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/6844.bugfix diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix new file mode 100644 index 0000000000..e84aa1029f --- /dev/null +++ b/changelog.d/6844.bugfix @@ -0,0 +1 @@ +Fix an issue with cross-signing where device signatures were not sent to remote servers. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index ea0503476f..b7617efb80 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -320,6 +320,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) else: result["deleted"] = True @@ -524,6 +529,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) results.append(result) From a58860e4802c31680ba43e59ec537984af9f5637 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Feb 2020 14:02:39 +0000 Subject: [PATCH 053/382] Check sender_key matches on inbound encrypted events. (#6850) If they don't then the device lists are probably out of sync. --- changelog.d/6850.misc | 1 + synapse/handlers/device.py | 8 +++- synapse/handlers/federation.py | 72 ++++++++++++++++++++++++++++------ 3 files changed, 67 insertions(+), 14 deletions(-) create mode 100644 changelog.d/6850.misc diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc new file mode 100644 index 0000000000..418569113f --- /dev/null +++ b/changelog.d/6850.misc @@ -0,0 +1 @@ +Detect unexpected sender keys on inbound encrypted events and resync device lists. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 26ef5e150c..a9bd431486 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -598,7 +598,13 @@ class DeviceListUpdater(object): # happens if we've missed updates. resync = yield self._need_to_do_resync(user_id, pending_updates) - logger.debug("Need to re-sync devices for %r? %r", user_id, resync) + if logger.isEnabledFor(logging.INFO): + logger.info( + "Received device list update for %s, requiring resync: %s. Devices: %s", + user_id, + resync, + ", ".join(u[0] for u in pending_updates), + ) if resync: yield self.user_device_resync(user_id) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 488200a2d1..e9441bbeff 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -754,27 +754,73 @@ class FederationHandler(BaseHandler): # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") + sender_key = event.content.get("sender_key") + + cached_devices = await self.store.get_cached_devices_for_user(event.sender) + + resync = False # Whether we should resync device lists. + + device = None if device_id is not None: - cached_devices = await self.store.get_cached_devices_for_user( - event.sender - ) - if device_id not in cached_devices: + device = cached_devices.get(device_id) + if device is None: logger.info( "Received event from remote device not in our cache: %s %s", event.sender, device_id, ) - await self.store.mark_remote_user_device_cache_as_stale( - event.sender + resync = True + + # We also check if the `sender_key` matches what we expect. + if sender_key is not None: + # Figure out what sender key we're expecting. If we know the + # device and recognize the algorithm then we can work out the + # exact key to expect. Otherwise check it matches any key we + # have for that device. + if device: + keys = device.get("keys", {}).get("keys", {}) + + if event.content.get("algorithm") == "m.megolm.v1.aes-sha2": + # For this algorithm we expect a curve25519 key. + key_name = "curve25519:%s" % (device_id,) + current_keys = [keys.get(key_name)] + else: + # We don't know understand the algorithm, so we just + # check it matches a key for the device. + current_keys = keys.values() + elif device_id: + # We don't have any keys for the device ID. + current_keys = [] + else: + # The event didn't include a device ID, so we just look for + # keys across all devices. + current_keys = ( + key + for device in cached_devices + for key in device.get("keys", {}).get("keys", {}).values() ) - # Immediately attempt a resync in the background - if self.config.worker_app: - return run_in_background(self._user_device_resync, event.sender) - else: - return run_in_background( - self._device_list_updater.user_device_resync, event.sender - ) + # We now check that the sender key matches (one of) the expected + # keys. + if sender_key not in current_keys: + logger.info( + "Received event from remote device with unexpected sender key: %s %s: %s", + event.sender, + device_id or "", + sender_key, + ) + resync = True + + if resync: + await self.store.mark_remote_user_device_cache_as_stale(event.sender) + + # Immediately attempt a resync in the background + if self.config.worker_app: + return run_in_background(self._user_device_resync, event.sender) + else: + return run_in_background( + self._device_list_updater.user_device_resync, event.sender + ) @log_function async def backfill(self, dest, room_id, limit, extremities): From 146fec08208144c8566da5ab6c2683229d162c1a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Feb 2020 15:47:00 +0000 Subject: [PATCH 054/382] Apply suggestions from code review Co-Authored-By: Erik Johnston --- synapse/federation/federation_client.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 51f9b1d8d7..b4525d28c2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -528,7 +528,7 @@ class FederationClient(FederationBase): Returns: a dict with members ``origin`` (a string - giving the serer the event was sent to, ``state`` (?) and + giving the server the event was sent to, ``state`` (?) and ``auth_chain``. Raises: @@ -659,7 +659,9 @@ class FederationClient(FederationBase): # content. return resp[1] - async def send_invite(self, destination, room_id, event_id, pdu): + async def send_invite( + self, destination: str, room_id: str, event_id: str, pdu: EventBase, + ) -> EventBase: room_version = await self.store.get_room_version_id(room_id) content = await self._do_send_invite(destination, pdu, room_version) From 6bbd890f05b59ffa33769ae15ea73bdf3c8dc908 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Feb 2020 15:49:42 +0000 Subject: [PATCH 055/382] make FederationClient._do_send_invite async --- synapse/federation/federation_client.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b4525d28c2..3a840e068b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -53,6 +53,7 @@ from synapse.events import EventBase, builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function +from synapse.types import JsonDict from synapse.util import unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -682,23 +683,19 @@ class FederationClient(FederationBase): return pdu - @defer.inlineCallbacks - def _do_send_invite(self, destination, pdu, room_version): + async def _do_send_invite( + self, destination: str, pdu: EventBase, room_version: str + ) -> JsonDict: """Actually sends the invite, first trying v2 API and falling back to v1 API if necessary. - Args: - destination (str): Target server - pdu (FrozenEvent) - room_version (str) - Returns: - dict: The event as a dict as returned by the remote server + The event as a dict as returned by the remote server """ time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_invite_v2( + content = await self.transport_layer.send_invite_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -737,7 +734,7 @@ class FederationClient(FederationBase): # Didn't work, try v1 API. # Note the v1 API returns a tuple of `(200, content)` - _, content = yield self.transport_layer.send_invite_v1( + _, content = await self.transport_layer.send_invite_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From f84700fba8cd9345d7a1a025462c7f8650f27386 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 14:07:31 +0000 Subject: [PATCH 056/382] Pass room version object into `FederationClient.get_pdu` --- synapse/federation/federation_client.py | 8 +++++--- synapse/handlers/federation.py | 8 ++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3a840e068b..d2d42e7009 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -230,7 +230,7 @@ class FederationClient(FederationBase): self, destinations: Iterable[str], event_id: str, - room_version: str, + room_version: RoomVersion, outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: @@ -262,7 +262,7 @@ class FederationClient(FederationBase): pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) - format_ver = room_version_to_event_format(room_version) + format_ver = room_version.event_format signed_pdu = None for destination in destinations: @@ -292,7 +292,9 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = await self._check_sigs_and_hash(room_version, pdu) + signed_pdu = await self._check_sigs_and_hash( + room_version.identifier, pdu + ) break diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5728ea2ee7..5ca410e2b3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1110,7 +1110,7 @@ class FederationHandler(BaseHandler): Logs a warning if we can't find the given event. """ - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) event_infos = [] @@ -1916,11 +1916,7 @@ class FederationHandler(BaseHandler): for e_id in missing_auth_events: m_ev = await self.federation_client.get_pdu( - [origin], - e_id, - room_version=room_version.identifier, - outlier=True, - timeout=10000, + [origin], e_id, room_version=room_version, outlier=True, timeout=10000, ) if m_ev and m_ev.event_id == e_id: event_map[e_id] = m_ev From ee0525b2b2245d66a3ce0c186cb1ac1725dd93e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Feb 2020 17:35:09 +0000 Subject: [PATCH 057/382] Simplify `room_version` handling in `FederationClient.send_invite` --- synapse/federation/federation_client.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d2d42e7009..110e42b9ed 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -665,7 +665,7 @@ class FederationClient(FederationBase): async def send_invite( self, destination: str, room_id: str, event_id: str, pdu: EventBase, ) -> EventBase: - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) content = await self._do_send_invite(destination, pdu, room_version) @@ -673,20 +673,17 @@ class FederationClient(FederationBase): logger.debug("Got response to send_invite: %s", pdu_dict) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - - pdu = event_from_pdu_json(pdu_dict, format_ver) + pdu = event_from_pdu_json(pdu_dict, room_version.event_format) # Check signatures are correct. - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) # FIXME: We should handle signature failures more gracefully. return pdu async def _do_send_invite( - self, destination: str, pdu: EventBase, room_version: str + self, destination: str, pdu: EventBase, room_version: RoomVersion ) -> JsonDict: """Actually sends the invite, first trying v2 API and falling back to v1 API if necessary. @@ -703,7 +700,7 @@ class FederationClient(FederationBase): event_id=pdu.event_id, content={ "event": pdu.get_pdu_json(time_now), - "room_version": room_version, + "room_version": room_version.identifier, "invite_room_state": pdu.unsigned.get("invite_room_state", []), }, ) @@ -721,8 +718,7 @@ class FederationClient(FederationBase): # Otherwise, we assume that the remote server doesn't understand # the v2 invite API. That's ok provided the room uses old-style event # IDs. - v = KNOWN_ROOM_VERSIONS.get(room_version) - if v.event_format != EventFormatVersions.V1: + if room_version.event_format != EventFormatVersions.V1: raise SynapseError( 400, "User's homeserver does not support this room version", From ff70ec0a00bd65d819ce42fdb46d67160c197202 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 15:30:02 +0000 Subject: [PATCH 058/382] Newsfile --- changelog.d/6823.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6823.misc diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6823.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. From 39c2d26e0b80ee0cd5589fc577327f2d3d80a446 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 Feb 2020 12:41:33 -0500 Subject: [PATCH 059/382] Add quotes around pip install target (my shell complained without them). --- README.rst | 2 +- changelog.d/6855.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6855.misc diff --git a/README.rst b/README.rst index 2691dfc23d..4db7d17e94 100644 --- a/README.rst +++ b/README.rst @@ -272,7 +272,7 @@ to install using pip and a virtualenv:: virtualenv -p python3 env source env/bin/activate - python -m pip install --no-use-pep517 -e .[all] + python -m pip install --no-use-pep517 -e ".[all]" This will run a process of downloading and installing all the needed dependencies into a virtual env. diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc new file mode 100644 index 0000000000..904361ddfb --- /dev/null +++ b/changelog.d/6855.misc @@ -0,0 +1 @@ +Update pip install directiosn in readme to avoid error when using zsh. From f0561fcffd172cb0dfe035dcc78f51bdd451c010 Mon Sep 17 00:00:00 2001 From: Robin Vleij Date: Wed, 5 Feb 2020 22:27:38 +0100 Subject: [PATCH 060/382] Update documentation (#6859) Update documentation to reflect the correct format of user_id (fully qualified). --- docs/admin_api/user_admin_api.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 0b3d09d694..ed6df61a26 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -2,7 +2,8 @@ Create or modify Account ======================== This API allows an administrator to create or modify a user account with a -specific ``user_id``. +specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example, +``@user:server.com``. This api is:: From 6a7e90ad782bddce95fa0c7d93e56291aa31c33d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:40:08 +0000 Subject: [PATCH 061/382] 1.10.0rc2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/6844.bugfix | 1 - changelog.d/6848.bugfix | 1 - changelog.d/6850.misc | 1 - synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/6844.bugfix delete mode 100644 changelog.d/6848.bugfix delete mode 100644 changelog.d/6850.misc diff --git a/CHANGES.md b/CHANGES.md index ab6fce3e7d..ee0e5d25e4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.10.0rc2 (2020-02-06) +============================== + +Bugfixes +-------- + +- Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) +- Fix detecting unknown devices from remote encrypted events. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) + + +Internal Changes +---------------- + +- Detect unexpected sender keys on inbound encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) + + Synapse 1.10.0rc1 (2020-01-31) ============================== diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix deleted file mode 100644 index e84aa1029f..0000000000 --- a/changelog.d/6844.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an issue with cross-signing where device signatures were not sent to remote servers. diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix deleted file mode 100644 index 65688e5d57..0000000000 --- a/changelog.d/6848.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix detecting unknown devices from remote encrypted events. diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc deleted file mode 100644 index 418569113f..0000000000 --- a/changelog.d/6850.misc +++ /dev/null @@ -1 +0,0 @@ -Detect unexpected sender keys on inbound encrypted events and resync device lists. diff --git a/synapse/__init__.py b/synapse/__init__.py index bd942d3e1c..4f1859bd57 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc1" +__version__ = "1.10.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 4a50b674f2fa730fd3e85fe5d59b84b00e34bfc7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:45:29 +0000 Subject: [PATCH 062/382] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ee0e5d25e4..e56f738481 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,7 +17,7 @@ Internal Changes Synapse 1.10.0rc1 (2020-01-31) ============================== -**WARNING**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. +**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. Features From b5176166b7b62f3c04c21f12a775982c99a90c9c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:51:02 +0000 Subject: [PATCH 063/382] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e56f738481..17c7c91c62 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,13 +5,13 @@ Bugfixes -------- - Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) -- Fix detecting unknown devices from remote encrypted events. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) +- Fix to the unknown remote device detection which was indroduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) Internal Changes ---------------- -- Detect unexpected sender keys on inbound encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) +- Detect unexpected sender keys on remote encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) Synapse 1.10.0rc1 (2020-01-31) From f663118155dbc242f99165f978817ef9dbeb9fd1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:52:25 +0000 Subject: [PATCH 064/382] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 17c7c91c62..c2aa735908 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) -- Fix to the unknown remote device detection which was indroduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) +- Fix to the unknown remote device detection which was introduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) Internal Changes From ed630ea17c40d328cc0796e35d37287768c7140d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 13:31:05 +0000 Subject: [PATCH 065/382] Reduce amount of logging at INFO level. (#6862) A lot of the things we log at INFO are now a bit superfluous, so lets make them DEBUG logs to reduce the amount we log by default. Co-Authored-By: Brendan Abolivier Co-authored-by: Brendan Abolivier --- changelog.d/6862.misc | 1 + synapse/federation/federation_server.py | 6 +++--- synapse/federation/transport/server.py | 2 +- synapse/handlers/room.py | 10 +++++----- synapse/handlers/stats.py | 2 +- synapse/handlers/sync.py | 6 +++--- synapse/handlers/user_directory.py | 4 ++-- synapse/http/site.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/storage/data_stores/main/user_directory.py | 4 ++-- synapse/storage/persist_events.py | 2 +- synapse/util/caches/response_cache.py | 2 +- 12 files changed, 22 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6862.misc diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc new file mode 100644 index 0000000000..83626d2939 --- /dev/null +++ b/changelog.d/6862.misc @@ -0,0 +1 @@ +Reduce amount we log at `INFO` level. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d92d5e8064..8e3933b6c5 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -573,7 +573,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - logger.info( + logger.debug( "on_get_missing_events: earliest_events: %r, latest_events: %r," " limit: %d", earliest_events, @@ -586,11 +586,11 @@ class FederationServer(FederationBase): ) if len(missing_events) < 5: - logger.info( + logger.debug( "Returning %d events: %r", len(missing_events), missing_events ) else: - logger.info("Returning %d events", len(missing_events)) + logger.debug("Returning %d events", len(missing_events)) time_now = self._clock.time_msec() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ae48ba8157..92a9ae2320 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -158,7 +158,7 @@ class Authenticator(object): origin, json_request, now, "Incoming request" ) - logger.info("Request from %s", origin) + logger.debug("Request from %s", origin) request.authenticated_entity = origin # If we get a valid signed request from the other side, its probably diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b609a65f47..559e3399b8 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -259,7 +259,7 @@ class RoomCreationHandler(BaseHandler): for v in ("invite", "events_default"): current = int(pl_content.get(v, 0)) if current < restricted_level: - logger.info( + logger.debug( "Setting level for %s in %s to %i (was %i)", v, old_room_id, @@ -269,7 +269,7 @@ class RoomCreationHandler(BaseHandler): pl_content[v] = restricted_level updated = True else: - logger.info("Not setting level for %s (already %i)", v, current) + logger.debug("Not setting level for %s (already %i)", v, current) if updated: try: @@ -296,7 +296,7 @@ class RoomCreationHandler(BaseHandler): EventTypes.Aliases, events_default ) - logger.info("Setting correct PLs in new room to %s", new_pl_content) + logger.debug("Setting correct PLs in new room to %s", new_pl_content) yield self.event_creation_handler.create_and_send_nonmember_event( requester, { @@ -782,7 +782,7 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) - logger.info("Sending %s in new room", etype) + logger.debug("Sending %s in new room", etype) yield self.event_creation_handler.create_and_send_nonmember_event( creator, event, ratelimit=False ) @@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler): creation_content.update({"creator": creator_id}) yield send(etype=EventTypes.Create, content=creation_content) - logger.info("Sending %s in new room", EventTypes.Member) + logger.debug("Sending %s in new room", EventTypes.Member) yield self.room_member_handler.update_membership( creator, creator.user, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 68e6edace5..d93a276693 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler): room_state["guest_access"] = event_content.get("guest_access") for room_id, state in room_to_state_updates.items(): - logger.info("Updating room_stats_state for %s: %s", room_id, state) + logger.debug("Updating room_stats_state for %s: %s", room_id, state) yield self.store.update_room_state(room_id, state) return room_to_stats_deltas, user_to_stats_deltas diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5f060241b4..f8d60d32ba 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -968,7 +968,7 @@ class SyncHandler(object): # Always use the `now_token` in `SyncResultBuilder` now_token = await self.event_sources.get_current_token() - logger.info( + logger.debug( "Calculating sync response for %r between %s and %s", sync_config.user, since_token, @@ -1498,7 +1498,7 @@ class SyncHandler(object): room_entries = [] invited = [] for room_id, events in iteritems(mem_change_events_by_room_id): - logger.info( + logger.debug( "Membership changes in %s: [%s]", room_id, ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), @@ -1892,7 +1892,7 @@ class SyncHandler(object): if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() - logger.info( + logger.debug( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 624f05ab5b..81aa58dc8c 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.pos, room_max_stream_ordering ) - logger.info("Handling %d state deltas", len(deltas)) + logger.debug("Handling %d state deltas", len(deltas)) yield self._handle_deltas(deltas) self.pos = max_pos @@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler): room_id, self.server_name ) if not is_in_room: - logger.info("Server left room: %r", room_id) + logger.debug("Server left room: %r", room_id) # Fetch all the users that we marked as being in user # directory due to being in the room and then check if # need to remove those users or not diff --git a/synapse/http/site.py b/synapse/http/site.py index 911251c0bc..e092193c9c 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -225,7 +225,7 @@ class SynapseRequest(Request): self.start_time, name=servlet_name, method=self.get_method() ) - self.site.access_logger.info( + self.site.access_logger.debug( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index d0879b0490..5bb17d1228 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -398,7 +398,7 @@ class HttpPusher(object): Args: badge (int): number of unread messages """ - logger.info("Sending updated badge count %d to %s", badge, self.name) + logger.debug("Sending updated badge count %d to %s", badge, self.name) d = { "notification": { "id": "", diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py index 90c180ec6d..6b8130bf0f 100644 --- a/synapse/storage/data_stores/main/user_directory.py +++ b/synapse/storage/data_stores/main/user_directory.py @@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) return 1 - logger.info( + logger.debug( "Processing the next %d rooms of %d remaining" % (len(rooms_to_work_on), progress["remaining"]) ) @@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) return 1 - logger.info( + logger.debug( "Processing the next %d users of %d remaining" % (len(users_to_work_on), progress["remaining"]) ) diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index af3fd67ab9..a5370ed527 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -390,7 +390,7 @@ class EventsPersistenceStorage(object): state_delta_reuse_delta_counter.inc() break - logger.info("Calculating state delta for room %s", room_id) + logger.debug("Calculating state delta for room %s", room_id) with Measure( self._clock, "persist_events.get_new_state_after_events" ): diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 82d3eefe0e..b68f9fe0d4 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -144,7 +144,7 @@ class ResponseCache(object): """ result = self.get(key) if not result: - logger.info( + logger.debug( "[%s]: no cached result for [%s], calculating new one", self._name, key ) d = run_in_background(callback, *args, **kwargs) From 99fcc96289f673f96f2d180a84df84f6b8a85521 Mon Sep 17 00:00:00 2001 From: PeerD Date: Thu, 6 Feb 2020 15:15:29 +0100 Subject: [PATCH 066/382] Third party event rules Update (#6781) --- changelog.d/6781.bugfix | 1 + synapse/events/third_party_rules.py | 7 ++++--- synapse/handlers/room.py | 6 +++++- 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6781.bugfix diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix new file mode 100644 index 0000000000..47cd671bff --- /dev/null +++ b/changelog.d/6781.bugfix @@ -0,0 +1 @@ +Fixed third party event rules function `on_create_room`'s return value being ignored. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 86f7e5f8aa..459132d388 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -74,15 +74,16 @@ class ThirdPartyEventRules(object): is_requester_admin (bool): If the requester is an admin Returns: - defer.Deferred + defer.Deferred[bool]: Whether room creation is allowed or denied. """ if self.third_party_rules is None: - return + return True - yield self.third_party_rules.on_create_room( + ret = yield self.third_party_rules.on_create_room( requester, config, is_requester_admin ) + return ret @defer.inlineCallbacks def check_threepid_can_be_invited(self, medium, address, room_id): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 559e3399b8..ab07edd2fc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -579,9 +579,13 @@ class RoomCreationHandler(BaseHandler): # Check whether the third party rules allows/changes the room create # request. - yield self.third_party_event_rules.on_create_room( + event_allowed = yield self.third_party_event_rules.on_create_room( requester, config, is_requester_admin=is_requester_admin ) + if not event_allowed: + raise SynapseError( + 403, "You are not permitted to create rooms", Codes.FORBIDDEN + ) if not is_requester_admin and not self.spam_checker.user_may_create_room( user_id From bce557175bad82889d303b349e6575636c41b702 Mon Sep 17 00:00:00 2001 From: timfi Date: Thu, 6 Feb 2020 15:45:01 +0100 Subject: [PATCH 067/382] Allow empty federation_certificate_verification_whitelist (#6849) --- changelog.d/6849.bugfix | 1 + synapse/config/tls.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6849.bugfix diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix new file mode 100644 index 0000000000..d928a26ec6 --- /dev/null +++ b/changelog.d/6849.bugfix @@ -0,0 +1 @@ +Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 2e9e478a2a..2514b0713d 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -109,6 +109,8 @@ class TlsConfig(Config): fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) + if fed_whitelist_entries is None: + fed_whitelist_entries = [] # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] # type: List[str] From b0c8bdd49dd416ed066b12daee95cf5f4828f03b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Feb 2020 15:50:39 +0000 Subject: [PATCH 068/382] pass room version into FederationClient.send_join (#6854) ... which allows us to sanity-check the create event. --- changelog.d/6854.misc | 1 + synapse/federation/federation_client.py | 60 +++++++++++++------------ synapse/handlers/federation.py | 3 +- 3 files changed, 34 insertions(+), 30 deletions(-) create mode 100644 changelog.d/6854.misc diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6854.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 110e42b9ed..5fb4bd414c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -516,7 +516,7 @@ class FederationClient(FederationBase): ) async def send_join( - self, destinations: Iterable[str], pdu: EventBase, event_format_version: int + self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. @@ -527,7 +527,8 @@ class FederationClient(FederationBase): destinations: Candidate homeservers which are probably participating in the room. pdu: event to be sent - event_format_version: The event format version + room_version: the version of the room (according to the server that + did the make_join) Returns: a dict with members ``origin`` (a string @@ -540,58 +541,51 @@ class FederationClient(FederationBase): RuntimeError: if no servers were reachable. """ - def check_authchain_validity(signed_auth_chain): - for e in signed_auth_chain: - if e.type == EventTypes.Create: - create_event = e - break - else: - raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,)) - - # the room version should be sane. - room_version = create_event.content.get("room_version", "1") - if room_version not in KNOWN_ROOM_VERSIONS: - # This shouldn't be possible, because the remote server should have - # rejected the join attempt during make_join. - raise InvalidResponseError( - "room appears to have unsupported version %s" % (room_version,) - ) - async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ - event_from_pdu_json(p, event_format_version, outlier=True) + event_from_pdu_json(p, room_version.event_format, outlier=True) for p in content.get("state", []) ] auth_chain = [ - event_from_pdu_json(p, event_format_version, outlier=True) + event_from_pdu_json(p, room_version.event_format, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} - room_version = None + create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): - room_version = e.content.get( - "room_version", RoomVersions.V1.identifier - ) + create_event = e break - if room_version is None: + if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") + # the room version should be sane. + create_room_version = create_event.content.get( + "room_version", RoomVersions.V1.identifier + ) + if create_room_version != room_version.identifier: + # either the server that fulfilled the make_join, or the server that is + # handling the send_join, is lying. + raise InvalidResponseError( + "Unexpected room version %s in create event" + % (create_room_version,) + ) + valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, - room_version=room_version, + room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} @@ -615,7 +609,17 @@ class FederationClient(FederationBase): for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) - check_authchain_validity(signed_auth) + # double-check that the same create event has ended up in the auth chain + auth_chain_create_events = [ + e.event_id + for e in signed_auth + if (e.type, e.state_key) == (EventTypes.Create, "") + ] + if auth_chain_create_events != [create_event.event_id]: + raise InvalidResponseError( + "Unexpected create event(s) in auth chain" + % (auth_chain_create_events,) + ) return { "state": signed_state, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ef3cc264b7..10e8b6ea4c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1305,9 +1305,8 @@ class FederationHandler(BaseHandler): except ValueError: pass - event_format_version = room_version_obj.event_format ret = await self.federation_client.send_join( - target_hosts, event, event_format_version + target_hosts, event, room_version_obj ) origin = ret["origin"] From 928edef9793bf10fa6156a42c4babbfaaaa17f88 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 16:50:13 +0000 Subject: [PATCH 069/382] Pass room_version into `event_from_pdu_json` It's called from all over the shop, so this one's a bit messy. --- changelog.d/6856.misc | 1 + synapse/federation/federation_base.py | 28 +++++++++-------- synapse/federation/federation_client.py | 35 ++++++++++----------- synapse/federation/federation_server.py | 41 +++++++++---------------- tests/handlers/test_federation.py | 6 ++-- 5 files changed, 51 insertions(+), 60 deletions(-) create mode 100644 changelog.d/6856.misc diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6856.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 0e22183280..ebe8b8e9fe 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersion, +) from synapse.crypto.event_signing import check_event_content_hash -from synapse.events import event_type_from_format_version +from synapse.events import EventBase, event_type_from_format_version from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( @@ -33,7 +38,7 @@ from synapse.logging.context import ( make_deferred_yieldable, preserve_fn, ) -from synapse.types import get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util import unwrapFirstError logger = logging.getLogger(__name__) @@ -342,16 +347,15 @@ def _is_invite_via_3pid(event): ) -def event_from_pdu_json(pdu_json, event_format_version, outlier=False): - """Construct a FrozenEvent from an event json received over federation +def event_from_pdu_json( + pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False +) -> EventBase: + """Construct an EventBase from an event json received over federation Args: - pdu_json (object): pdu as received over federation - event_format_version (int): The event format version - outlier (bool): True to mark this event as an outlier - - Returns: - FrozenEvent + pdu_json: pdu as received over federation + room_version: The version of the room this event belongs to + outlier: True to mark this event as an outlier Raises: SynapseError: if the pdu is missing required fields or is otherwise @@ -370,7 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False): elif depth > MAX_DEPTH: raise SynapseError(400, "Depth too large", Codes.BAD_JSON) - event = event_type_from_format_version(event_format_version)(pdu_json) + event = event_type_from_format_version(room_version.event_format)(pdu_json) event.internal_metadata.outlier = outlier diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5fb4bd414c..4870e39652 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -49,7 +49,7 @@ from synapse.api.room_versions import ( RoomVersion, RoomVersions, ) -from synapse.events import EventBase, builder, room_version_to_event_format +from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function @@ -209,18 +209,18 @@ class FederationClient(FederationBase): logger.debug("backfill transaction_data=%r", transaction_data) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) pdus = [ - event_from_pdu_json(p, format_ver, outlier=False) + event_from_pdu_json(p, room_version, outlier=False) for p in transaction_data["pdus"] ] # FIXME: We should handle signature failures more gracefully. pdus[:] = await make_deferred_yieldable( defer.gatherResults( - self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True + self._check_sigs_and_hashes(room_version.identifier, pdus), + consumeErrors=True, ).addErrback(unwrapFirstError) ) @@ -262,8 +262,6 @@ class FederationClient(FederationBase): pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) - format_ver = room_version.event_format - signed_pdu = None for destination in destinations: now = self._clock.time_msec() @@ -284,7 +282,7 @@ class FederationClient(FederationBase): ) pdu_list = [ - event_from_pdu_json(p, format_ver, outlier=outlier) + event_from_pdu_json(p, room_version, outlier=outlier) for p in transaction_data["pdus"] ] @@ -350,15 +348,15 @@ class FederationClient(FederationBase): async def get_event_auth(self, destination, room_id, event_id): res = await self.transport_layer.get_event_auth(destination, room_id, event_id) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) auth_chain = [ - event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"] + event_from_pdu_json(p, room_version, outlier=True) + for p in res["auth_chain"] ] signed_auth = await self._check_sigs_and_hash_and_fetch( - destination, auth_chain, outlier=True, room_version=room_version + destination, auth_chain, outlier=True, room_version=room_version.identifier ) signed_auth.sort(key=lambda e: e.depth) @@ -547,12 +545,12 @@ class FederationClient(FederationBase): logger.debug("Got content: %s", content) state = [ - event_from_pdu_json(p, room_version.event_format, outlier=True) + event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ - event_from_pdu_json(p, room_version.event_format, outlier=True) + event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] @@ -677,7 +675,7 @@ class FederationClient(FederationBase): logger.debug("Got response to send_invite: %s", pdu_dict) - pdu = event_from_pdu_json(pdu_dict, room_version.event_format) + pdu = event_from_pdu_json(pdu_dict, room_version) # Check signatures are correct. pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) @@ -865,15 +863,14 @@ class FederationClient(FederationBase): timeout=timeout, ) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) events = [ - event_from_pdu_json(e, format_ver) for e in content.get("events", []) + event_from_pdu_json(e, room_version) for e in content.get("events", []) ] signed_events = await self._check_sigs_and_hash_and_fetch( - destination, events, outlier=False, room_version=room_version + destination, events, outlier=False, room_version=room_version.identifier ) except HttpResponseException as e: if not e.code == 400: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8e3933b6c5..2489832a11 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -38,7 +38,6 @@ from synapse.api.errors import ( UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction @@ -234,24 +233,17 @@ class FederationServer(FederationBase): continue try: - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue - - try: - format_ver = room_version_to_event_format(room_version) - except UnsupportedRoomVersionError: + except UnsupportedRoomVersionError as e: # this can happen if support for a given room version is withdrawn, # so that we still get events for said room. - logger.info( - "Ignoring PDU for room %s with unknown version %s", - room_id, - room_version, - ) + logger.info("Ignoring PDU: %s", e) continue - event = event_from_pdu_json(p, format_ver) + event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) pdu_results = {} @@ -407,9 +399,7 @@ class FederationServer(FederationBase): Codes.UNSUPPORTED_ROOM_VERSION, ) - format_ver = room_version.event_format - - pdu = event_from_pdu_json(content, format_ver) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) @@ -420,16 +410,15 @@ class FederationServer(FederationBase): async def on_send_join_request(self, origin, content, room_id): logger.debug("on_send_join_request: content: %s", content) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - pdu = event_from_pdu_json(content, format_ver) + room_version = await self.store.get_room_version(room_id) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() @@ -451,16 +440,15 @@ class FederationServer(FederationBase): async def on_send_leave_request(self, origin, content, room_id): logger.debug("on_send_leave_request: content: %s", content) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - pdu = event_from_pdu_json(content, format_ver) + room_version = await self.store.get_room_version(room_id) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) await self.handler.on_send_leave_request(origin, pdu) return {} @@ -498,15 +486,14 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) auth_chain = [ - event_from_pdu_json(e, format_ver) for e in content["auth_chain"] + event_from_pdu_json(e, room_version) for e in content["auth_chain"] ] signed_auth = await self._check_sigs_and_hash_and_fetch( - origin, auth_chain, outlier=True, room_version=room_version + origin, auth_chain, outlier=True, room_version=room_version.identifier ) ret = await self.handler.on_query_auth( diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index b4d92cf732..132e35651d 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -99,6 +99,7 @@ class FederationTestCase(unittest.HomeserverTestCase): user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(self.store.get_room_version(room_id)) # pretend that another server has joined join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) @@ -120,7 +121,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, - join_event.format_version, + room_version, ) with LoggingContext(request="send_rejected"): @@ -149,6 +150,7 @@ class FederationTestCase(unittest.HomeserverTestCase): user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(self.store.get_room_version(room_id)) # pretend that another server has joined join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) @@ -171,7 +173,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, - join_event.format_version, + room_version, ) with LoggingContext(request="send_rejected"): From 7765bf398996002ee461904915de9d8bc2ea951a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 6 Feb 2020 13:25:24 -0500 Subject: [PATCH 070/382] Limit the number of events that can be requested when backfilling events (#6864) Limit the maximum number of events requested when backfilling events. --- changelog.d/6864.misc | 1 + synapse/handlers/federation.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6864.misc diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc new file mode 100644 index 0000000000..d24eb68460 --- /dev/null +++ b/changelog.d/6864.misc @@ -0,0 +1 @@ +Limit the number of events that can be requested by the backfill federation API to 100. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 10e8b6ea4c..eb20ef4aec 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1788,6 +1788,9 @@ class FederationHandler(BaseHandler): if not in_room: raise AuthError(403, "Host not in room.") + # Synapse asks for 100 events per backfill request. Do not allow more. + limit = min(limit, 100) + events = yield self.store.get_backfill_events(room_id, pdu_list, limit) events = yield filter_events_for_server(self.storage, origin, events) @@ -2168,6 +2171,7 @@ class FederationHandler(BaseHandler): if not in_room: raise AuthError(403, "Host not in room.") + # Only allow up to 20 events to be retrieved per request. limit = min(limit, 20) missing_events = await self.store.get_missing_events( From f4884444c36d92659b9d7a2a90d42324ab786873 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 09:26:57 +0000 Subject: [PATCH 071/382] remove unused room_version_to_event_format (#6857) --- changelog.d/6857.misc | 1 + synapse/events/__init__.py | 24 +----------------------- 2 files changed, 2 insertions(+), 23 deletions(-) create mode 100644 changelog.d/6857.misc diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6857.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 92f76703b3..89d41d82b6 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -21,8 +21,7 @@ import six from unpaddedbase64 import encode_base64 -from synapse.api.errors import UnsupportedRoomVersionError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.api.room_versions import EventFormatVersions from synapse.types import JsonDict from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -408,27 +407,6 @@ class FrozenEventV3(FrozenEventV2): return self._event_id -def room_version_to_event_format(room_version): - """Converts a room version string to the event format - - Args: - room_version (str) - - Returns: - int - - Raises: - UnsupportedRoomVersionError if the room version is unknown - """ - v = KNOWN_ROOM_VERSIONS.get(room_version) - - if not v: - # this can happen if support is withdrawn for a room version - raise UnsupportedRoomVersionError() - - return v.event_format - - def event_type_from_format_version(format_version): """Returns the python type to use to construct an Event object for the given event format version. From 56ca93ef5941b5dfcda368f373a6bcd80d177acd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 7 Feb 2020 11:29:36 +0100 Subject: [PATCH 072/382] Admin api to add an email address (#6789) --- changelog.d/6769.feature | 1 + docs/admin_api/user_admin_api.rst | 11 +++++++++ synapse/handlers/admin.py | 2 ++ synapse/handlers/auth.py | 8 +++++++ synapse/rest/admin/users.py | 39 +++++++++++++++++++++++++++++++ tests/rest/admin/test_user.py | 19 +++++++++++++-- 6 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6769.feature diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature new file mode 100644 index 0000000000..8a60e12907 --- /dev/null +++ b/changelog.d/6769.feature @@ -0,0 +1 @@ +Admin API to add or modify threepids of user accounts. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 0b3d09d694..eb146095de 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -15,6 +15,16 @@ with a body of: { "password": "user_password", "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], "avatar_url": "", "admin": false, "deactivated": false @@ -23,6 +33,7 @@ with a body of: including an ``access_token`` of a server admin. The parameter ``displayname`` is optional and defaults to ``user_id``. +The parameter ``threepids`` is optional. The parameter ``avatar_url`` is optional. The parameter ``admin`` is optional and defaults to 'false'. The parameter ``deactivated`` is optional and defaults to 'false'. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 9205865231..f3c0aeceb6 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -58,8 +58,10 @@ class AdminHandler(BaseHandler): ret = await self.store.get_user_by_id(user.to_string()) if ret: profile = await self.store.get_profileinfo(user.localpart) + threepids = await self.store.user_get_threepids(user.to_string()) ret["displayname"] = profile.display_name ret["avatar_url"] = profile.avatar_url + ret["threepids"] = threepids return ret async def export_user_data(self, user_id, writer): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 54a71c49d2..48a88d3c2a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -816,6 +816,14 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): + # check if medium has a valid value + if medium not in ["email", "msisdn"]: + raise SynapseError( + code=400, + msg=("'%s' is not a valid value for 'medium'" % (medium,)), + errcode=Codes.INVALID_PARAM, + ) + # 'Canonicalise' email addresses down to lower case. # We've now moving towards the homeserver being the entity that # is responsible for validating threepids used for resetting passwords diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f1c4434f5c..e75c5f1370 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -136,6 +136,8 @@ class UserRestServletV2(RestServlet): self.hs = hs self.auth = hs.get_auth() self.admin_handler = hs.get_handlers().admin_handler + self.store = hs.get_datastore() + self.auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.set_password_handler = hs.get_set_password_handler() self.deactivate_account_handler = hs.get_deactivate_account_handler() @@ -163,6 +165,7 @@ class UserRestServletV2(RestServlet): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) + user_id = target_user.to_string() if user: # modify user if "displayname" in body: @@ -170,6 +173,29 @@ class UserRestServletV2(RestServlet): target_user, requester, body["displayname"], True ) + if "threepids" in body: + # check for required parameters for each threepid + for threepid in body["threepids"]: + assert_params_in_dict(threepid, ["medium", "address"]) + + # remove old threepids from user + threepids = await self.store.user_get_threepids(user_id) + for threepid in threepids: + try: + await self.auth_handler.delete_threepid( + user_id, threepid["medium"], threepid["address"], None + ) + except Exception: + logger.exception("Failed to remove threepids") + raise SynapseError(500, "Failed to remove threepids") + + # add new threepids to user + current_time = self.hs.get_clock().time_msec() + for threepid in body["threepids"]: + await self.auth_handler.add_threepid( + user_id, threepid["medium"], threepid["address"], current_time + ) + if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True @@ -221,6 +247,7 @@ class UserRestServletV2(RestServlet): admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) + threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") @@ -232,6 +259,18 @@ class UserRestServletV2(RestServlet): default_display_name=displayname, user_type=user_type, ) + + if "threepids" in body: + # check for required parameters for each threepid + for threepid in body["threepids"]: + assert_params_in_dict(threepid, ["medium", "address"]) + + current_time = self.hs.get_clock().time_msec() + for threepid in body["threepids"]: + await self.auth_handler.add_threepid( + user_id, threepid["medium"], threepid["address"], current_time + ) + if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 8f09f51c61..3b5169b38d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -407,7 +407,13 @@ class UserRestTestCase(unittest.HomeserverTestCase): """ self.hs.config.registration_shared_secret = None - body = json.dumps({"password": "abc123", "admin": True}) + body = json.dumps( + { + "password": "abc123", + "admin": True, + "threepids": [{"medium": "email", "address": "bob@bob.bob"}], + } + ) # Create user request, channel = self.make_request( @@ -421,6 +427,8 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) # Get user request, channel = self.make_request( @@ -449,7 +457,13 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Modify user - body = json.dumps({"displayname": "foobar", "deactivated": True}) + body = json.dumps( + { + "displayname": "foobar", + "deactivated": True, + "threepids": [{"medium": "email", "address": "bob2@bob.bob"}], + } + ) request, channel = self.make_request( "PUT", @@ -463,6 +477,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) self.assertEqual(True, channel.json_body["deactivated"]) + # the user is deactivated, the threepid will be deleted # Get user request, channel = self.make_request( From de2d267375069c2d22bceb0d6ef9c6f5a77380e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 11:14:19 +0000 Subject: [PATCH 073/382] Allow moving group read APIs to workers (#6866) --- changelog.d/6866.feature | 1 + docs/workers.md | 8 + synapse/app/client_reader.py | 3 + synapse/app/federation_reader.py | 2 + synapse/groups/groups_server.py | 379 ++++----- synapse/handlers/groups_local.py | 270 +++---- synapse/replication/slave/storage/groups.py | 14 +- synapse/server.py | 14 +- .../storage/data_stores/main/group_server.py | 720 +++++++++--------- 9 files changed, 723 insertions(+), 688 deletions(-) create mode 100644 changelog.d/6866.feature diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature new file mode 100644 index 0000000000..256feab6ff --- /dev/null +++ b/changelog.d/6866.feature @@ -0,0 +1 @@ +Add ability to run some group APIs on workers. diff --git a/docs/workers.md b/docs/workers.md index 09a9d8a7b8..82442d6a0a 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -177,8 +177,13 @@ endpoints matching the following regular expressions: ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/send/ + ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query +Additionally, the following REST endpoints can be handled for GET requests: + + ^/_matrix/federation/v1/groups/ + The above endpoints should all be routed to the federation_reader worker by the reverse-proxy configuration. @@ -254,10 +259,13 @@ following regular expressions: ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$ ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ + ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ + ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$ Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$ + ^/_matrix/client/(api/v1|r0|unstable)/groups/.*$ Additionally, the following REST endpoints can be handled, but all requests must be routed to the same instance: diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ca96da6a4a..7fa91a3b11 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import ( RoomStateRestServlet, ) from synapse.rest.client.v1.voip import VoipRestServlet +from synapse.rest.client.v2_alpha import groups from synapse.rest.client.v2_alpha.account import ThreepidRestServlet from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet @@ -124,6 +125,8 @@ class ClientReaderServer(HomeServer): PushRuleRestServlet(self).register(resource) VersionsRestServlet(self).register(resource) + groups.register_servlets(self, resource) + resources.update({"/_matrix/client": resource}) root_resource = create_resource_tree(resources, NoResource()) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 1f1cea1416..5e17ef1396 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -35,6 +35,7 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore @@ -66,6 +67,7 @@ class FederationReaderSlavedStore( SlavedEventStore, SlavedKeyStore, SlavedRegistrationStore, + SlavedGroupServerStore, RoomStore, DirectoryStore, SlavedTransactionStore, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 0ec9be3cb5..c106abae21 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) # TODO: Flairs -class GroupsServerHandler(object): +class GroupsServerWorkerHandler(object): def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() @@ -51,9 +51,6 @@ class GroupsServerHandler(object): self.transport_client = hs.get_federation_transport_client() self.profile_handler = hs.get_profile_handler() - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - @defer.inlineCallbacks def check_group_is_ours( self, group_id, requester_user_id, and_exists=False, and_is_admin=None @@ -167,68 +164,6 @@ class GroupsServerHandler(object): "user": membership_info, } - @defer.inlineCallbacks - def update_group_summary_room( - self, group_id, requester_user_id, room_id, category_id, content - ): - """Add/update a room to the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - RoomID.from_string(room_id) # Ensure valid room id - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_room_to_summary( - group_id=group_id, - room_id=room_id, - category_id=category_id, - order=order, - is_public=is_public, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_summary_room( - self, group_id, requester_user_id, room_id, category_id - ): - """Remove a room from the summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_room_from_summary( - group_id=group_id, room_id=room_id, category_id=category_id - ) - - return {} - - @defer.inlineCallbacks - def set_group_join_policy(self, group_id, requester_user_id, content): - """Sets the group join policy. - - Currently supported policies are: - - "invite": an invite must be received and accepted in order to join. - - "open": anyone can join. - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - join_policy = _parse_join_policy_from_contents(content) - if join_policy is None: - raise SynapseError(400, "No value specified for 'm.join_policy'") - - yield self.store.set_group_join_policy(group_id, join_policy=join_policy) - - return {} - @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): """Get all categories in a group (as seen by user) @@ -248,42 +183,10 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) + logger.info("group %s", res) + return res - @defer.inlineCallbacks - def update_group_category(self, group_id, requester_user_id, category_id, content): - """Add/Update a group category - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - profile = content.get("profile") - - yield self.store.upsert_group_category( - group_id=group_id, - category_id=category_id, - is_public=is_public, - profile=profile, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_category(self, group_id, requester_user_id, category_id): - """Delete a group category - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_group_category( - group_id=group_id, category_id=category_id - ) - - return {} - @defer.inlineCallbacks def get_group_roles(self, group_id, requester_user_id): """Get all roles in a group (as seen by user) @@ -302,74 +205,6 @@ class GroupsServerHandler(object): res = yield self.store.get_group_role(group_id=group_id, role_id=role_id) return res - @defer.inlineCallbacks - def update_group_role(self, group_id, requester_user_id, role_id, content): - """Add/update a role in a group - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - - profile = content.get("profile") - - yield self.store.upsert_group_role( - group_id=group_id, role_id=role_id, is_public=is_public, profile=profile - ) - - return {} - - @defer.inlineCallbacks - def delete_group_role(self, group_id, requester_user_id, role_id): - """Remove role from group - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_group_role(group_id=group_id, role_id=role_id) - - return {} - - @defer.inlineCallbacks - def update_group_summary_user( - self, group_id, requester_user_id, user_id, role_id, content - ): - """Add/update a users entry in the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_summary( - group_id=group_id, - user_id=user_id, - role_id=role_id, - order=order, - is_public=is_public, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): - """Remove a user from the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_user_from_summary( - group_id=group_id, user_id=user_id, role_id=role_id - ) - - return {} - @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): """Get the group profile as seen by requester_user_id @@ -394,24 +229,6 @@ class GroupsServerHandler(object): else: raise SynapseError(404, "Unknown group") - @defer.inlineCallbacks - def update_group_profile(self, group_id, requester_user_id, content): - """Update the group profile - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - profile = {} - for keyname in ("name", "avatar_url", "short_description", "long_description"): - if keyname in content: - value = content[keyname] - if not isinstance(value, string_types): - raise SynapseError(400, "%r value is not a string" % (keyname,)) - profile[keyname] = value - - yield self.store.update_group_profile(group_id, profile) - @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): """Get the users in group as seen by requester_user_id. @@ -530,6 +347,196 @@ class GroupsServerHandler(object): return {"chunk": chunk, "total_room_count_estimate": len(room_results)} + +class GroupsServerHandler(GroupsServerWorkerHandler): + def __init__(self, hs): + super(GroupsServerHandler, self).__init__(hs) + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + @defer.inlineCallbacks + def update_group_summary_room( + self, group_id, requester_user_id, room_id, category_id, content + ): + """Add/update a room to the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + RoomID.from_string(room_id) # Ensure valid room id + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_room_to_summary( + group_id=group_id, + room_id=room_id, + category_id=category_id, + order=order, + is_public=is_public, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_summary_room( + self, group_id, requester_user_id, room_id, category_id + ): + """Remove a room from the summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_room_from_summary( + group_id=group_id, room_id=room_id, category_id=category_id + ) + + return {} + + @defer.inlineCallbacks + def set_group_join_policy(self, group_id, requester_user_id, content): + """Sets the group join policy. + + Currently supported policies are: + - "invite": an invite must be received and accepted in order to join. + - "open": anyone can join. + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + join_policy = _parse_join_policy_from_contents(content) + if join_policy is None: + raise SynapseError(400, "No value specified for 'm.join_policy'") + + yield self.store.set_group_join_policy(group_id, join_policy=join_policy) + + return {} + + @defer.inlineCallbacks + def update_group_category(self, group_id, requester_user_id, category_id, content): + """Add/Update a group category + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_public = _parse_visibility_from_contents(content) + profile = content.get("profile") + + yield self.store.upsert_group_category( + group_id=group_id, + category_id=category_id, + is_public=is_public, + profile=profile, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_category(self, group_id, requester_user_id, category_id): + """Delete a group category + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_group_category( + group_id=group_id, category_id=category_id + ) + + return {} + + @defer.inlineCallbacks + def update_group_role(self, group_id, requester_user_id, role_id, content): + """Add/update a role in a group + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_public = _parse_visibility_from_contents(content) + + profile = content.get("profile") + + yield self.store.upsert_group_role( + group_id=group_id, role_id=role_id, is_public=is_public, profile=profile + ) + + return {} + + @defer.inlineCallbacks + def delete_group_role(self, group_id, requester_user_id, role_id): + """Remove role from group + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_group_role(group_id=group_id, role_id=role_id) + + return {} + + @defer.inlineCallbacks + def update_group_summary_user( + self, group_id, requester_user_id, user_id, role_id, content + ): + """Add/update a users entry in the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_summary( + group_id=group_id, + user_id=user_id, + role_id=role_id, + order=order, + is_public=is_public, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): + """Remove a user from the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_user_from_summary( + group_id=group_id, user_id=user_id, role_id=role_id + ) + + return {} + + @defer.inlineCallbacks + def update_group_profile(self, group_id, requester_user_id, content): + """Update the group profile + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + profile = {} + for keyname in ("name", "avatar_url", "short_description", "long_description"): + if keyname in content: + value = content[keyname] + if not isinstance(value, string_types): + raise SynapseError(400, "%r value is not a string" % (keyname,)) + profile[keyname] = value + + yield self.store.update_group_profile(group_id, profile) + @defer.inlineCallbacks def add_room_to_group(self, group_id, requester_user_id, room_id, content): """Add room to group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 319565510f..ad22415782 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -63,7 +63,7 @@ def _create_rerouter(func_name): return f -class GroupsLocalHandler(object): +class GroupsLocalWorkerHandler(object): def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() @@ -81,40 +81,17 @@ class GroupsLocalHandler(object): self.profile_handler = hs.get_profile_handler() - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - # The following functions merely route the query to the local groups server # or federation depending on if the group is local or remote get_group_profile = _create_rerouter("get_group_profile") - update_group_profile = _create_rerouter("update_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") - get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") - - add_room_to_group = _create_rerouter("add_room_to_group") - update_room_in_group = _create_rerouter("update_room_in_group") - remove_room_from_group = _create_rerouter("remove_room_from_group") - - update_group_summary_room = _create_rerouter("update_group_summary_room") - delete_group_summary_room = _create_rerouter("delete_group_summary_room") - - update_group_category = _create_rerouter("update_group_category") - delete_group_category = _create_rerouter("delete_group_category") get_group_category = _create_rerouter("get_group_category") get_group_categories = _create_rerouter("get_group_categories") - - update_group_summary_user = _create_rerouter("update_group_summary_user") - delete_group_summary_user = _create_rerouter("delete_group_summary_user") - - update_group_role = _create_rerouter("update_group_role") - delete_group_role = _create_rerouter("delete_group_role") get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") - set_group_join_policy = _create_rerouter("set_group_join_policy") - @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): """Get the group summary for a group. @@ -169,6 +146,144 @@ class GroupsLocalHandler(object): return res + @defer.inlineCallbacks + def get_users_in_group(self, group_id, requester_user_id): + """Get users in a group + """ + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.get_users_in_group( + group_id, requester_user_id + ) + return res + + group_server_name = get_domain_from_id(group_id) + + try: + res = yield self.transport_client.get_users_in_group( + get_domain_from_id(group_id), group_id, requester_user_id + ) + except HttpResponseException as e: + raise e.to_synapse_error() + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") + + chunk = res["chunk"] + valid_entries = [] + for entry in chunk: + g_user_id = entry["user_id"] + attestation = entry.pop("attestation", {}) + try: + if get_domain_from_id(g_user_id) != group_server_name: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + server_name=get_domain_from_id(g_user_id), + ) + valid_entries.append(entry) + except Exception as e: + logger.info("Failed to verify user is in group: %s", e) + + res["chunk"] = valid_entries + + return res + + @defer.inlineCallbacks + def get_joined_groups(self, user_id): + group_ids = yield self.store.get_joined_groups(user_id) + return {"groups": group_ids} + + @defer.inlineCallbacks + def get_publicised_groups_for_user(self, user_id): + if self.hs.is_mine_id(user_id): + result = yield self.store.get_publicised_groups_for_user(user_id) + + # Check AS associated groups for this user - this depends on the + # RegExps in the AS registration file (under `users`) + for app_service in self.store.get_app_services(): + result.extend(app_service.get_groups_for_user(user_id)) + + return {"groups": result} + else: + try: + bulk_result = yield self.transport_client.bulk_get_publicised_groups( + get_domain_from_id(user_id), [user_id] + ) + except HttpResponseException as e: + raise e.to_synapse_error() + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") + + result = bulk_result.get("users", {}).get(user_id) + # TODO: Verify attestations + return {"groups": result} + + @defer.inlineCallbacks + def bulk_get_publicised_groups(self, user_ids, proxy=True): + destinations = {} + local_users = set() + + for user_id in user_ids: + if self.hs.is_mine_id(user_id): + local_users.add(user_id) + else: + destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id) + + if not proxy and destinations: + raise SynapseError(400, "Some user_ids are not local") + + results = {} + failed_results = [] + for destination, dest_user_ids in iteritems(destinations): + try: + r = yield self.transport_client.bulk_get_publicised_groups( + destination, list(dest_user_ids) + ) + results.update(r["users"]) + except Exception: + failed_results.extend(dest_user_ids) + + for uid in local_users: + results[uid] = yield self.store.get_publicised_groups_for_user(uid) + + # Check AS associated groups for this user - this depends on the + # RegExps in the AS registration file (under `users`) + for app_service in self.store.get_app_services(): + results[uid].extend(app_service.get_groups_for_user(uid)) + + return {"users": results} + + +class GroupsLocalHandler(GroupsLocalWorkerHandler): + def __init__(self, hs): + super(GroupsLocalHandler, self).__init__(hs) + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + # The following functions merely route the query to the local groups server + # or federation depending on if the group is local or remote + + update_group_profile = _create_rerouter("update_group_profile") + + add_room_to_group = _create_rerouter("add_room_to_group") + update_room_in_group = _create_rerouter("update_room_in_group") + remove_room_from_group = _create_rerouter("remove_room_from_group") + + update_group_summary_room = _create_rerouter("update_group_summary_room") + delete_group_summary_room = _create_rerouter("delete_group_summary_room") + + update_group_category = _create_rerouter("update_group_category") + delete_group_category = _create_rerouter("delete_group_category") + + update_group_summary_user = _create_rerouter("update_group_summary_user") + delete_group_summary_user = _create_rerouter("delete_group_summary_user") + + update_group_role = _create_rerouter("update_group_role") + delete_group_role = _create_rerouter("delete_group_role") + + set_group_join_policy = _create_rerouter("set_group_join_policy") + @defer.inlineCallbacks def create_group(self, group_id, user_id, content): """Create a group @@ -219,48 +334,6 @@ class GroupsLocalHandler(object): return res - @defer.inlineCallbacks - def get_users_in_group(self, group_id, requester_user_id): - """Get users in a group - """ - if self.is_mine_id(group_id): - res = yield self.groups_server_handler.get_users_in_group( - group_id, requester_user_id - ) - return res - - group_server_name = get_domain_from_id(group_id) - - try: - res = yield self.transport_client.get_users_in_group( - get_domain_from_id(group_id), group_id, requester_user_id - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - chunk = res["chunk"] - valid_entries = [] - for entry in chunk: - g_user_id = entry["user_id"] - attestation = entry.pop("attestation", {}) - try: - if get_domain_from_id(g_user_id) != group_server_name: - yield self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - server_name=get_domain_from_id(g_user_id), - ) - valid_entries.append(entry) - except Exception as e: - logger.info("Failed to verify user is in group: %s", e) - - res["chunk"] = valid_entries - - return res - @defer.inlineCallbacks def join_group(self, group_id, user_id, content): """Request to join a group @@ -452,68 +525,3 @@ class GroupsLocalHandler(object): group_id, user_id, membership="leave" ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - - @defer.inlineCallbacks - def get_joined_groups(self, user_id): - group_ids = yield self.store.get_joined_groups(user_id) - return {"groups": group_ids} - - @defer.inlineCallbacks - def get_publicised_groups_for_user(self, user_id): - if self.hs.is_mine_id(user_id): - result = yield self.store.get_publicised_groups_for_user(user_id) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - result.extend(app_service.get_groups_for_user(user_id)) - - return {"groups": result} - else: - try: - bulk_result = yield self.transport_client.bulk_get_publicised_groups( - get_domain_from_id(user_id), [user_id] - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - result = bulk_result.get("users", {}).get(user_id) - # TODO: Verify attestations - return {"groups": result} - - @defer.inlineCallbacks - def bulk_get_publicised_groups(self, user_ids, proxy=True): - destinations = {} - local_users = set() - - for user_id in user_ids: - if self.hs.is_mine_id(user_id): - local_users.add(user_id) - else: - destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id) - - if not proxy and destinations: - raise SynapseError(400, "Some user_ids are not local") - - results = {} - failed_results = [] - for destination, dest_user_ids in iteritems(destinations): - try: - r = yield self.transport_client.bulk_get_publicised_groups( - destination, list(dest_user_ids) - ) - results.update(r["users"]) - except Exception: - failed_results.extend(dest_user_ids) - - for uid in local_users: - results[uid] = yield self.store.get_publicised_groups_for_user(uid) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - results[uid].extend(app_service.get_groups_for_user(uid)) - - return {"users": results} diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 69a4ae42f9..2d4fd08cf5 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage import DataStore +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker +from synapse.storage.data_stores.main.group_server import GroupServerWorkerStore from synapse.storage.database import Database from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore, __func__ -from ._slaved_id_tracker import SlavedIdTracker - -class SlavedGroupServerStore(BaseSlavedStore): +class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore): def __init__(self, database: Database, db_conn, hs): super(SlavedGroupServerStore, self).__init__(database, db_conn, hs) @@ -35,9 +34,8 @@ class SlavedGroupServerStore(BaseSlavedStore): self._group_updates_id_gen.get_current_token(), ) - get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user) - get_group_stream_token = __func__(DataStore.get_group_stream_token) - get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user) + def get_group_stream_token(self): + return self._group_updates_id_gen.get_current_token() def stream_positions(self): result = super(SlavedGroupServerStore, self).stream_positions() diff --git a/synapse/server.py b/synapse/server.py index 7926867b77..fd2f69e928 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -50,7 +50,7 @@ from synapse.federation.send_queue import FederationRemoteSendQueue from synapse.federation.sender import FederationSender from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer -from synapse.groups.groups_server import GroupsServerHandler +from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler from synapse.handlers import Handlers from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler @@ -62,7 +62,7 @@ from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler -from synapse.handlers.groups_local import GroupsLocalHandler +from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler @@ -460,10 +460,16 @@ class HomeServer(object): return UserDirectoryHandler(self) def build_groups_local_handler(self): - return GroupsLocalHandler(self) + if self.config.worker_app: + return GroupsLocalWorkerHandler(self) + else: + return GroupsLocalHandler(self) def build_groups_server_handler(self): - return GroupsServerHandler(self) + if self.config.worker_app: + return GroupsServerWorkerHandler(self) + else: + return GroupsServerHandler(self) def build_groups_attestation_signing(self): return GroupAttestationSigning(self) diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py index 6acd45e9f3..0963e6c250 100644 --- a/synapse/storage/data_stores/main/group_server.py +++ b/synapse/storage/data_stores/main/group_server.py @@ -27,21 +27,7 @@ _DEFAULT_CATEGORY_ID = "" _DEFAULT_ROLE_ID = "" -class GroupServerStore(SQLBaseStore): - def set_group_join_policy(self, group_id, join_policy): - """Set the join policy of a group. - - join_policy can be one of: - * "invite" - * "open" - """ - return self.db.simple_update_one( - table="groups", - keyvalues={"group_id": group_id}, - updatevalues={"join_policy": join_policy}, - desc="set_group_join_policy", - ) - +class GroupServerWorkerStore(SQLBaseStore): def get_group(self, group_id): return self.db.simple_select_one( table="groups", @@ -157,6 +143,366 @@ class GroupServerStore(SQLBaseStore): "get_rooms_for_summary", _get_rooms_for_summary_txn ) + @defer.inlineCallbacks + def get_group_categories(self, group_id): + rows = yield self.db.simple_select_list( + table="group_room_categories", + keyvalues={"group_id": group_id}, + retcols=("category_id", "is_public", "profile"), + desc="get_group_categories", + ) + + return { + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + } + + @defer.inlineCallbacks + def get_group_category(self, group_id, category_id): + category = yield self.db.simple_select_one( + table="group_room_categories", + keyvalues={"group_id": group_id, "category_id": category_id}, + retcols=("is_public", "profile"), + desc="get_group_category", + ) + + category["profile"] = json.loads(category["profile"]) + + return category + + @defer.inlineCallbacks + def get_group_roles(self, group_id): + rows = yield self.db.simple_select_list( + table="group_roles", + keyvalues={"group_id": group_id}, + retcols=("role_id", "is_public", "profile"), + desc="get_group_roles", + ) + + return { + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + } + + @defer.inlineCallbacks + def get_group_role(self, group_id, role_id): + role = yield self.db.simple_select_one( + table="group_roles", + keyvalues={"group_id": group_id, "role_id": role_id}, + retcols=("is_public", "profile"), + desc="get_group_role", + ) + + role["profile"] = json.loads(role["profile"]) + + return role + + def get_local_groups_for_room(self, room_id): + """Get all of the local group that contain a given room + Args: + room_id (str): The ID of a room + Returns: + Deferred[list[str]]: A twisted.Deferred containing a list of group ids + containing this room + """ + return self.db.simple_select_onecol( + table="group_rooms", + keyvalues={"room_id": room_id}, + retcol="group_id", + desc="get_local_groups_for_room", + ) + + def get_users_for_summary_by_role(self, group_id, include_private=False): + """Get the users and roles that should be included in a summary request + + Returns ([users], [roles]) + """ + + def _get_users_for_summary_txn(txn): + keyvalues = {"group_id": group_id} + if not include_private: + keyvalues["is_public"] = True + + sql = """ + SELECT user_id, is_public, role_id, user_order + FROM group_summary_users + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + users = [ + { + "user_id": row[0], + "is_public": row[1], + "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None, + "order": row[3], + } + for row in txn + ] + + sql = """ + SELECT role_id, is_public, profile, role_order + FROM group_summary_roles + INNER JOIN group_roles USING (group_id, role_id) + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + roles = { + row[0]: { + "is_public": row[1], + "profile": json.loads(row[2]), + "order": row[3], + } + for row in txn + } + + return users, roles + + return self.db.runInteraction( + "get_users_for_summary_by_role", _get_users_for_summary_txn + ) + + def is_user_in_group(self, user_id, group_id): + return self.db.simple_select_one_onecol( + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="is_user_in_group", + ).addCallback(lambda r: bool(r)) + + def is_user_admin_in_group(self, group_id, user_id): + return self.db.simple_select_one_onecol( + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="is_admin", + allow_none=True, + desc="is_user_admin_in_group", + ) + + def is_user_invited_to_local_group(self, group_id, user_id): + """Has the group server invited a user? + """ + return self.db.simple_select_one_onecol( + table="group_invites", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + desc="is_user_invited_to_local_group", + allow_none=True, + ) + + def get_users_membership_info_in_group(self, group_id, user_id): + """Get a dict describing the membership of a user in a group. + + Example if joined: + + { + "membership": "join", + "is_public": True, + "is_privileged": False, + } + + Returns an empty dict if the user is not join/invite/etc + """ + + def _get_users_membership_in_group_txn(txn): + row = self.db.simple_select_one_txn( + txn, + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcols=("is_admin", "is_public"), + allow_none=True, + ) + + if row: + return { + "membership": "join", + "is_public": row["is_public"], + "is_privileged": row["is_admin"], + } + + row = self.db.simple_select_one_onecol_txn( + txn, + table="group_invites", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + allow_none=True, + ) + + if row: + return {"membership": "invite"} + + return {} + + return self.db.runInteraction( + "get_users_membership_info_in_group", _get_users_membership_in_group_txn + ) + + def get_publicised_groups_for_user(self, user_id): + """Get all groups a user is publicising + """ + return self.db.simple_select_onecol( + table="local_group_membership", + keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True}, + retcol="group_id", + desc="get_publicised_groups_for_user", + ) + + def get_attestations_need_renewals(self, valid_until_ms): + """Get all attestations that need to be renewed until givent time + """ + + def _get_attestations_need_renewals_txn(txn): + sql = """ + SELECT group_id, user_id FROM group_attestations_renewals + WHERE valid_until_ms <= ? + """ + txn.execute(sql, (valid_until_ms,)) + return self.db.cursor_to_dict(txn) + + return self.db.runInteraction( + "get_attestations_need_renewals", _get_attestations_need_renewals_txn + ) + + @defer.inlineCallbacks + def get_remote_attestation(self, group_id, user_id): + """Get the attestation that proves the remote agrees that the user is + in the group. + """ + row = yield self.db.simple_select_one( + table="group_attestations_remote", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcols=("valid_until_ms", "attestation_json"), + desc="get_remote_attestation", + allow_none=True, + ) + + now = int(self._clock.time_msec()) + if row and now < row["valid_until_ms"]: + return json.loads(row["attestation_json"]) + + return None + + def get_joined_groups(self, user_id): + return self.db.simple_select_onecol( + table="local_group_membership", + keyvalues={"user_id": user_id, "membership": "join"}, + retcol="group_id", + desc="get_joined_groups", + ) + + def get_all_groups_for_user(self, user_id, now_token): + def _get_all_groups_for_user_txn(txn): + sql = """ + SELECT group_id, type, membership, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND membership != 'leave' + AND stream_id <= ? + """ + txn.execute(sql, (user_id, now_token)) + return [ + { + "group_id": row[0], + "type": row[1], + "membership": row[2], + "content": json.loads(row[3]), + } + for row in txn + ] + + return self.db.runInteraction( + "get_all_groups_for_user", _get_all_groups_for_user_txn + ) + + def get_groups_changes_for_user(self, user_id, from_token, to_token): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_entity_changed( + user_id, from_token + ) + if not has_changed: + return defer.succeed([]) + + def _get_groups_changes_for_user_txn(txn): + sql = """ + SELECT group_id, membership, type, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND ? < stream_id AND stream_id <= ? + """ + txn.execute(sql, (user_id, from_token, to_token)) + return [ + { + "group_id": group_id, + "membership": membership, + "type": gtype, + "content": json.loads(content_json), + } + for group_id, membership, gtype, content_json in txn + ] + + return self.db.runInteraction( + "get_groups_changes_for_user", _get_groups_changes_for_user_txn + ) + + def get_all_groups_changes(self, from_token, to_token, limit): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_any_entity_changed( + from_token + ) + if not has_changed: + return defer.succeed([]) + + def _get_all_groups_changes_txn(txn): + sql = """ + SELECT stream_id, group_id, user_id, type, content + FROM local_group_updates + WHERE ? < stream_id AND stream_id <= ? + LIMIT ? + """ + txn.execute(sql, (from_token, to_token, limit)) + return [ + (stream_id, group_id, user_id, gtype, json.loads(content_json)) + for stream_id, group_id, user_id, gtype, content_json in txn + ] + + return self.db.runInteraction( + "get_all_groups_changes", _get_all_groups_changes_txn + ) + + +class GroupServerStore(GroupServerWorkerStore): + def set_group_join_policy(self, group_id, join_policy): + """Set the join policy of a group. + + join_policy can be one of: + * "invite" + * "open" + """ + return self.db.simple_update_one( + table="groups", + keyvalues={"group_id": group_id}, + updatevalues={"join_policy": join_policy}, + desc="set_group_join_policy", + ) + def add_room_to_summary(self, group_id, room_id, category_id, order, is_public): return self.db.runInteraction( "add_room_to_summary", @@ -299,36 +645,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_room_from_summary", ) - @defer.inlineCallbacks - def get_group_categories(self, group_id): - rows = yield self.db.simple_select_list( - table="group_room_categories", - keyvalues={"group_id": group_id}, - retcols=("category_id", "is_public", "profile"), - desc="get_group_categories", - ) - - return { - row["category_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows - } - - @defer.inlineCallbacks - def get_group_category(self, group_id, category_id): - category = yield self.db.simple_select_one( - table="group_room_categories", - keyvalues={"group_id": group_id, "category_id": category_id}, - retcols=("is_public", "profile"), - desc="get_group_category", - ) - - category["profile"] = json.loads(category["profile"]) - - return category - def upsert_group_category(self, group_id, category_id, profile, is_public): """Add/update room category for group """ @@ -360,36 +676,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_group_category", ) - @defer.inlineCallbacks - def get_group_roles(self, group_id): - rows = yield self.db.simple_select_list( - table="group_roles", - keyvalues={"group_id": group_id}, - retcols=("role_id", "is_public", "profile"), - desc="get_group_roles", - ) - - return { - row["role_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows - } - - @defer.inlineCallbacks - def get_group_role(self, group_id, role_id): - role = yield self.db.simple_select_one( - table="group_roles", - keyvalues={"group_id": group_id, "role_id": role_id}, - retcols=("is_public", "profile"), - desc="get_group_role", - ) - - role["profile"] = json.loads(role["profile"]) - - return role - def upsert_group_role(self, group_id, role_id, profile, is_public): """Add/remove user role """ @@ -555,100 +841,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_user_from_summary", ) - def get_local_groups_for_room(self, room_id): - """Get all of the local group that contain a given room - Args: - room_id (str): The ID of a room - Returns: - Deferred[list[str]]: A twisted.Deferred containing a list of group ids - containing this room - """ - return self.db.simple_select_onecol( - table="group_rooms", - keyvalues={"room_id": room_id}, - retcol="group_id", - desc="get_local_groups_for_room", - ) - - def get_users_for_summary_by_role(self, group_id, include_private=False): - """Get the users and roles that should be included in a summary request - - Returns ([users], [roles]) - """ - - def _get_users_for_summary_txn(txn): - keyvalues = {"group_id": group_id} - if not include_private: - keyvalues["is_public"] = True - - sql = """ - SELECT user_id, is_public, role_id, user_order - FROM group_summary_users - WHERE group_id = ? - """ - - if not include_private: - sql += " AND is_public = ?" - txn.execute(sql, (group_id, True)) - else: - txn.execute(sql, (group_id,)) - - users = [ - { - "user_id": row[0], - "is_public": row[1], - "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None, - "order": row[3], - } - for row in txn - ] - - sql = """ - SELECT role_id, is_public, profile, role_order - FROM group_summary_roles - INNER JOIN group_roles USING (group_id, role_id) - WHERE group_id = ? - """ - - if not include_private: - sql += " AND is_public = ?" - txn.execute(sql, (group_id, True)) - else: - txn.execute(sql, (group_id,)) - - roles = { - row[0]: { - "is_public": row[1], - "profile": json.loads(row[2]), - "order": row[3], - } - for row in txn - } - - return users, roles - - return self.db.runInteraction( - "get_users_for_summary_by_role", _get_users_for_summary_txn - ) - - def is_user_in_group(self, user_id, group_id): - return self.db.simple_select_one_onecol( - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="is_user_in_group", - ).addCallback(lambda r: bool(r)) - - def is_user_admin_in_group(self, group_id, user_id): - return self.db.simple_select_one_onecol( - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="is_admin", - allow_none=True, - desc="is_user_admin_in_group", - ) - def add_group_invite(self, group_id, user_id): """Record that the group server has invited a user """ @@ -658,64 +850,6 @@ class GroupServerStore(SQLBaseStore): desc="add_group_invite", ) - def is_user_invited_to_local_group(self, group_id, user_id): - """Has the group server invited a user? - """ - return self.db.simple_select_one_onecol( - table="group_invites", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - desc="is_user_invited_to_local_group", - allow_none=True, - ) - - def get_users_membership_info_in_group(self, group_id, user_id): - """Get a dict describing the membership of a user in a group. - - Example if joined: - - { - "membership": "join", - "is_public": True, - "is_privileged": False, - } - - Returns an empty dict if the user is not join/invite/etc - """ - - def _get_users_membership_in_group_txn(txn): - row = self.db.simple_select_one_txn( - txn, - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcols=("is_admin", "is_public"), - allow_none=True, - ) - - if row: - return { - "membership": "join", - "is_public": row["is_public"], - "is_privileged": row["is_admin"], - } - - row = self.db.simple_select_one_onecol_txn( - txn, - table="group_invites", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - allow_none=True, - ) - - if row: - return {"membership": "invite"} - - return {} - - return self.db.runInteraction( - "get_users_membership_info_in_group", _get_users_membership_in_group_txn - ) - def add_user_to_group( self, group_id, @@ -846,16 +980,6 @@ class GroupServerStore(SQLBaseStore): "remove_room_from_group", _remove_room_from_group_txn ) - def get_publicised_groups_for_user(self, user_id): - """Get all groups a user is publicising - """ - return self.db.simple_select_onecol( - table="local_group_membership", - keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True}, - retcol="group_id", - desc="get_publicised_groups_for_user", - ) - def update_group_publicity(self, group_id, user_id, publicise): """Update whether the user is publicising their membership of the group """ @@ -1000,22 +1124,6 @@ class GroupServerStore(SQLBaseStore): desc="update_group_profile", ) - def get_attestations_need_renewals(self, valid_until_ms): - """Get all attestations that need to be renewed until givent time - """ - - def _get_attestations_need_renewals_txn(txn): - sql = """ - SELECT group_id, user_id FROM group_attestations_renewals - WHERE valid_until_ms <= ? - """ - txn.execute(sql, (valid_until_ms,)) - return self.db.cursor_to_dict(txn) - - return self.db.runInteraction( - "get_attestations_need_renewals", _get_attestations_need_renewals_txn - ) - def update_attestation_renewal(self, group_id, user_id, attestation): """Update an attestation that we have renewed """ @@ -1054,112 +1162,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_attestation_renewal", ) - @defer.inlineCallbacks - def get_remote_attestation(self, group_id, user_id): - """Get the attestation that proves the remote agrees that the user is - in the group. - """ - row = yield self.db.simple_select_one( - table="group_attestations_remote", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcols=("valid_until_ms", "attestation_json"), - desc="get_remote_attestation", - allow_none=True, - ) - - now = int(self._clock.time_msec()) - if row and now < row["valid_until_ms"]: - return json.loads(row["attestation_json"]) - - return None - - def get_joined_groups(self, user_id): - return self.db.simple_select_onecol( - table="local_group_membership", - keyvalues={"user_id": user_id, "membership": "join"}, - retcol="group_id", - desc="get_joined_groups", - ) - - def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): - sql = """ - SELECT group_id, type, membership, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND membership != 'leave' - AND stream_id <= ? - """ - txn.execute(sql, (user_id, now_token)) - return [ - { - "group_id": row[0], - "type": row[1], - "membership": row[2], - "content": json.loads(row[3]), - } - for row in txn - ] - - return self.db.runInteraction( - "get_all_groups_for_user", _get_all_groups_for_user_txn - ) - - def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( - user_id, from_token - ) - if not has_changed: - return defer.succeed([]) - - def _get_groups_changes_for_user_txn(txn): - sql = """ - SELECT group_id, membership, type, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND ? < stream_id AND stream_id <= ? - """ - txn.execute(sql, (user_id, from_token, to_token)) - return [ - { - "group_id": group_id, - "membership": membership, - "type": gtype, - "content": json.loads(content_json), - } - for group_id, membership, gtype, content_json in txn - ] - - return self.db.runInteraction( - "get_groups_changes_for_user", _get_groups_changes_for_user_txn - ) - - def get_all_groups_changes(self, from_token, to_token, limit): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_any_entity_changed( - from_token - ) - if not has_changed: - return defer.succeed([]) - - def _get_all_groups_changes_txn(txn): - sql = """ - SELECT stream_id, group_id, user_id, type, content - FROM local_group_updates - WHERE ? < stream_id AND stream_id <= ? - LIMIT ? - """ - txn.execute(sql, (from_token, to_token, limit)) - return [ - (stream_id, group_id, user_id, gtype, json.loads(content_json)) - for stream_id, group_id, user_id, gtype, content_json in txn - ] - - return self.db.runInteraction( - "get_all_groups_changes", _get_all_groups_changes_txn - ) - def get_group_stream_token(self): return self._group_updates_id_gen.get_current_token() From b08b0a22d505b1555f511e3f38935a62930ea25d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 13:56:38 +0000 Subject: [PATCH 074/382] Add typing to synapse.federation.sender (#6871) --- changelog.d/6871.misc | 1 + synapse/federation/federation_server.py | 7 +- synapse/federation/sender/__init__.py | 99 +++++++++---------- .../sender/per_destination_queue.py | 88 +++++++++-------- .../federation/sender/transaction_manager.py | 16 +-- synapse/federation/units.py | 23 ++++- synapse/server.pyi | 2 + tests/handlers/test_typing.py | 8 +- tox.ini | 1 + 9 files changed, 138 insertions(+), 107 deletions(-) create mode 100644 changelog.d/6871.misc diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc new file mode 100644 index 0000000000..5161af9983 --- /dev/null +++ b/changelog.d/6871.misc @@ -0,0 +1 @@ +Add typing to `synapse.federation.sender` and port to async/await. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2489832a11..a6c966a393 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -294,7 +294,12 @@ class FederationServer(FederationBase): async def _process_edu(edu_dict): received_edus_counter.inc() - edu = Edu(**edu_dict) + edu = Edu( + origin=origin, + destination=self.server_name, + edu_type=edu_dict["edu_type"], + content=edu_dict["content"], + ) await self.registry.on_edu(edu.edu_type, origin, edu.content) await concurrently_execute( diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 36c83c3027..233cb33daf 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Dict, Hashable, Iterable, List, Optional, Set from six import itervalues @@ -23,6 +24,7 @@ from twisted.internet import defer import synapse import synapse.metrics +from synapse.events import EventBase from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu @@ -39,6 +41,8 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.presence import UserPresenceState +from synapse.types import ReadReceipt from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -68,7 +72,7 @@ class FederationSender(object): self._transaction_manager = TransactionManager(hs) # map from destination to PerDestinationQueue - self._per_destination_queues = {} # type: dict[str, PerDestinationQueue] + self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue] LaterGauge( "synapse_federation_transaction_queue_pending_destinations", @@ -84,7 +88,7 @@ class FederationSender(object): # Map of user_id -> UserPresenceState for all the pending presence # to be sent out by user_id. Entries here get processed and put in # pending_presence_by_dest - self.pending_presence = {} + self.pending_presence = {} # type: Dict[str, UserPresenceState] LaterGauge( "synapse_federation_transaction_queue_pending_pdus", @@ -116,20 +120,17 @@ class FederationSender(object): # and that there is a pending call to _flush_rrs_for_room in the system. self._queues_awaiting_rr_flush_by_room = ( {} - ) # type: dict[str, set[PerDestinationQueue]] + ) # type: Dict[str, Set[PerDestinationQueue]] self._rr_txn_interval_per_room_ms = ( - 1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second + 1000.0 / hs.config.federation_rr_transactions_per_room_per_second ) - def _get_per_destination_queue(self, destination): + def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination Args: - destination (str): server_name of remote server - - Returns: - PerDestinationQueue + destination: server_name of remote server """ queue = self._per_destination_queues.get(destination) if not queue: @@ -137,7 +138,7 @@ class FederationSender(object): self._per_destination_queues[destination] = queue return queue - def notify_new_events(self, current_id): + def notify_new_events(self, current_id: int) -> None: """This gets called when we have some new events we might want to send out to other servers. """ @@ -151,13 +152,12 @@ class FederationSender(object): "process_event_queue_for_federation", self._process_event_queue_loop ) - @defer.inlineCallbacks - def _process_event_queue_loop(self): + async def _process_event_queue_loop(self) -> None: try: self._is_processing = True while True: - last_token = yield self.store.get_federation_out_pos("events") - next_token, events = yield self.store.get_all_new_events_stream( + last_token = await self.store.get_federation_out_pos("events") + next_token, events = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 ) @@ -166,8 +166,7 @@ class FederationSender(object): if not events and next_token >= self._last_poked_id: break - @defer.inlineCallbacks - def handle_event(event): + async def handle_event(event: EventBase) -> None: # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) @@ -184,7 +183,7 @@ class FederationSender(object): # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. - destinations = yield self.state.get_hosts_in_room_at_events( + destinations = await self.state.get_hosts_in_room_at_events( event.room_id, event_ids=event.prev_event_ids() ) except Exception: @@ -206,17 +205,16 @@ class FederationSender(object): self._send_pdu(event, destinations) - @defer.inlineCallbacks - def handle_room_events(events): + async def handle_room_events(events: Iterable[EventBase]) -> None: with Measure(self.clock, "handle_room_events"): for event in events: - yield handle_event(event) + await handle_event(event) - events_by_room = {} + events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) - yield make_deferred_yieldable( + await make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) @@ -226,11 +224,11 @@ class FederationSender(object): ) ) - yield self.store.update_federation_out_pos("events", next_token) + await self.store.update_federation_out_pos("events", next_token) if events: now = self.clock.time_msec() - ts = yield self.store.get_received_ts(events[-1].event_id) + ts = await self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender" @@ -254,7 +252,7 @@ class FederationSender(object): finally: self._is_processing = False - def _send_pdu(self, pdu, destinations): + def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus # table and we'll get back to it later. @@ -276,11 +274,11 @@ class FederationSender(object): self._get_per_destination_queue(destination).send_pdu(pdu, order) @defer.inlineCallbacks - def send_read_receipt(self, receipt): + def send_read_receipt(self, receipt: ReadReceipt): """Send a RR to any other servers in the room Args: - receipt (synapse.types.ReadReceipt): receipt to be sent + receipt: receipt to be sent """ # Some background on the rate-limiting going on here. @@ -343,7 +341,7 @@ class FederationSender(object): else: queue.flush_read_receipts_for_room(room_id) - def _schedule_rr_flush_for_room(self, room_id, n_domains): + def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None: # that is going to cause approximately len(domains) transactions, so now back # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM backoff_ms = self._rr_txn_interval_per_room_ms * n_domains @@ -352,7 +350,7 @@ class FederationSender(object): self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id) self._queues_awaiting_rr_flush_by_room[room_id] = set() - def _flush_rrs_for_room(self, room_id): + def _flush_rrs_for_room(self, room_id: str) -> None: queues = self._queues_awaiting_rr_flush_by_room.pop(room_id) logger.debug("Flushing RRs in %s to %s", room_id, queues) @@ -368,14 +366,11 @@ class FederationSender(object): @preserve_fn # the caller should not yield on this @defer.inlineCallbacks - def send_presence(self, states): + def send_presence(self, states: List[UserPresenceState]): """Send the new presence states to the appropriate destinations. This actually queues up the presence states ready for sending and triggers a background task to process them and send out the transactions. - - Args: - states (list(UserPresenceState)) """ if not self.hs.config.use_presence: # No-op if presence is disabled. @@ -412,11 +407,10 @@ class FederationSender(object): finally: self._processing_pending_presence = False - def send_presence_to_destinations(self, states, destinations): + def send_presence_to_destinations( + self, states: List[UserPresenceState], destinations: List[str] + ) -> None: """Send the given presence states to the given destinations. - - Args: - states (list[UserPresenceState]) destinations (list[str]) """ @@ -431,12 +425,9 @@ class FederationSender(object): @measure_func("txnqueue._process_presence") @defer.inlineCallbacks - def _process_presence_inner(self, states): + def _process_presence_inner(self, states: List[UserPresenceState]): """Given a list of states populate self.pending_presence_by_dest and poke to send a new transaction to each destination - - Args: - states (list(UserPresenceState)) """ hosts_and_states = yield get_interested_remotes(self.store, states, self.state) @@ -446,14 +437,20 @@ class FederationSender(object): continue self._get_per_destination_queue(destination).send_presence(states) - def build_and_send_edu(self, destination, edu_type, content, key=None): + def build_and_send_edu( + self, + destination: str, + edu_type: str, + content: dict, + key: Optional[Hashable] = None, + ): """Construct an Edu object, and queue it for sending Args: - destination (str): name of server to send to - edu_type (str): type of EDU to send - content (dict): content of EDU - key (Any|None): clobbering key for this edu + destination: name of server to send to + edu_type: type of EDU to send + content: content of EDU + key: clobbering key for this edu """ if destination == self.server_name: logger.info("Not sending EDU to ourselves") @@ -468,12 +465,12 @@ class FederationSender(object): self.send_edu(edu, key) - def send_edu(self, edu, key): + def send_edu(self, edu: Edu, key: Optional[Hashable]): """Queue an EDU for sending Args: - edu (Edu): edu to send - key (Any|None): clobbering key for this edu + edu: edu to send + key: clobbering key for this edu """ queue = self._get_per_destination_queue(edu.destination) if key: @@ -481,7 +478,7 @@ class FederationSender(object): else: queue.send_edu(edu) - def send_device_messages(self, destination): + def send_device_messages(self, destination: str): if destination == self.server_name: logger.warning("Not sending device update to ourselves") return @@ -501,5 +498,5 @@ class FederationSender(object): self._get_per_destination_queue(destination).attempt_new_transaction() - def get_current_token(self): + def get_current_token(self) -> int: return 0 diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 5012aaea35..e13cd20ffa 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -15,11 +15,11 @@ # limitations under the License. import datetime import logging +from typing import Dict, Hashable, Iterable, List, Tuple from prometheus_client import Counter -from twisted.internet import defer - +import synapse.server from synapse.api.errors import ( FederationDeniedError, HttpResponseException, @@ -31,7 +31,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.presence import UserPresenceState -from synapse.types import StateMap +from synapse.types import ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter # This is defined in the Matrix spec and enforced by the receiver. @@ -56,13 +56,18 @@ class PerDestinationQueue(object): Manages the per-destination transmission queues. Args: - hs (synapse.HomeServer): - transaction_sender (TransactionManager): - destination (str): the server_name of the destination that we are managing + hs + transaction_sender + destination: the server_name of the destination that we are managing transmission for. """ - def __init__(self, hs, transaction_manager, destination): + def __init__( + self, + hs: "synapse.server.HomeServer", + transaction_manager: "synapse.federation.sender.TransactionManager", + destination: str, + ): self._server_name = hs.hostname self._clock = hs.get_clock() self._store = hs.get_datastore() @@ -72,20 +77,20 @@ class PerDestinationQueue(object): self.transmission_loop_running = False # a list of tuples of (pending pdu, order) - self._pending_pdus = [] # type: list[tuple[EventBase, int]] - self._pending_edus = [] # type: list[Edu] + self._pending_pdus = [] # type: List[Tuple[EventBase, int]] + self._pending_edus = [] # type: List[Edu] # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of (edu_type, key) -> Edu - self._pending_edus_keyed = {} # type: StateMap[Edu] + self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu] # Map of user_id -> UserPresenceState of pending presence to be sent to this # destination - self._pending_presence = {} # type: dict[str, UserPresenceState] + self._pending_presence = {} # type: Dict[str, UserPresenceState] # room_id -> receipt_type -> user_id -> receipt_dict - self._pending_rrs = {} + self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]] self._rrs_pending_flush = False # stream_id of last successfully sent to-device message. @@ -95,50 +100,50 @@ class PerDestinationQueue(object): # stream_id of last successfully sent device list update. self._last_device_list_stream_id = 0 - def __str__(self): + def __str__(self) -> str: return "PerDestinationQueue[%s]" % self._destination - def pending_pdu_count(self): + def pending_pdu_count(self) -> int: return len(self._pending_pdus) - def pending_edu_count(self): + def pending_edu_count(self) -> int: return ( len(self._pending_edus) + len(self._pending_presence) + len(self._pending_edus_keyed) ) - def send_pdu(self, pdu, order): + def send_pdu(self, pdu: EventBase, order: int) -> None: """Add a PDU to the queue, and start the transmission loop if neccessary Args: - pdu (EventBase): pdu to send - order (int): + pdu: pdu to send + order """ self._pending_pdus.append((pdu, order)) self.attempt_new_transaction() - def send_presence(self, states): + def send_presence(self, states: Iterable[UserPresenceState]) -> None: """Add presence updates to the queue. Start the transmission loop if neccessary. Args: - states (iterable[UserPresenceState]): presence to send + states: presence to send """ self._pending_presence.update({state.user_id: state for state in states}) self.attempt_new_transaction() - def queue_read_receipt(self, receipt): + def queue_read_receipt(self, receipt: ReadReceipt) -> None: """Add a RR to the list to be sent. Doesn't start the transmission loop yet (see flush_read_receipts_for_room) Args: - receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued + receipt: receipt to be queued """ self._pending_rrs.setdefault(receipt.room_id, {}).setdefault( receipt.receipt_type, {} )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data} - def flush_read_receipts_for_room(self, room_id): + def flush_read_receipts_for_room(self, room_id: str) -> None: # if we don't have any read-receipts for this room, it may be that we've already # sent them out, so we don't need to flush. if room_id not in self._pending_rrs: @@ -146,15 +151,15 @@ class PerDestinationQueue(object): self._rrs_pending_flush = True self.attempt_new_transaction() - def send_keyed_edu(self, edu, key): + def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: self._pending_edus_keyed[(edu.edu_type, key)] = edu self.attempt_new_transaction() - def send_edu(self, edu): + def send_edu(self, edu) -> None: self._pending_edus.append(edu) self.attempt_new_transaction() - def attempt_new_transaction(self): + def attempt_new_transaction(self) -> None: """Try to start a new transaction to this destination If there is already a transaction in progress to this destination, @@ -177,23 +182,22 @@ class PerDestinationQueue(object): self._transaction_transmission_loop, ) - @defer.inlineCallbacks - def _transaction_transmission_loop(self): - pending_pdus = [] + async def _transaction_transmission_loop(self) -> None: + pending_pdus = [] # type: List[Tuple[EventBase, int]] try: self.transmission_loop_running = True # This will throw if we wouldn't retry. We do this here so we fail # quickly, but we will later check this again in the http client, # hence why we throw the result away. - yield get_retry_limiter(self._destination, self._clock, self._store) + await get_retry_limiter(self._destination, self._clock, self._store) pending_pdus = [] while True: # We have to keep 2 free slots for presence and rr_edus limit = MAX_EDUS_PER_TRANSACTION - 2 - device_update_edus, dev_list_id = yield self._get_device_update_edus( + device_update_edus, dev_list_id = await self._get_device_update_edus( limit ) @@ -202,7 +206,7 @@ class PerDestinationQueue(object): ( to_device_edus, device_stream_id, - ) = yield self._get_to_device_message_edus(limit) + ) = await self._get_to_device_message_edus(limit) pending_edus = device_update_edus + to_device_edus @@ -269,7 +273,7 @@ class PerDestinationQueue(object): # END CRITICAL SECTION - success = yield self._transaction_manager.send_new_transaction( + success = await self._transaction_manager.send_new_transaction( self._destination, pending_pdus, pending_edus ) if success: @@ -280,7 +284,7 @@ class PerDestinationQueue(object): # Remove the acknowledged device messages from the database # Only bother if we actually sent some device messages if to_device_edus: - yield self._store.delete_device_msgs_for_remote( + await self._store.delete_device_msgs_for_remote( self._destination, device_stream_id ) @@ -289,7 +293,7 @@ class PerDestinationQueue(object): logger.info( "Marking as sent %r %r", self._destination, dev_list_id ) - yield self._store.mark_as_sent_devices_by_remote( + await self._store.mark_as_sent_devices_by_remote( self._destination, dev_list_id ) @@ -334,7 +338,7 @@ class PerDestinationQueue(object): # We want to be *very* sure we clear this after we stop processing self.transmission_loop_running = False - def _get_rr_edus(self, force_flush): + def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: if not self._pending_rrs: return if not force_flush and not self._rrs_pending_flush: @@ -351,17 +355,16 @@ class PerDestinationQueue(object): self._rrs_pending_flush = False yield edu - def _pop_pending_edus(self, limit): + def _pop_pending_edus(self, limit: int) -> List[Edu]: pending_edus = self._pending_edus pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:] return pending_edus - @defer.inlineCallbacks - def _get_device_update_edus(self, limit): + async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]: last_device_list = self._last_device_list_stream_id # Retrieve list of new device updates to send to the destination - now_stream_id, results = yield self._store.get_device_updates_by_remote( + now_stream_id, results = await self._store.get_device_updates_by_remote( self._destination, last_device_list, limit=limit ) edus = [ @@ -378,11 +381,10 @@ class PerDestinationQueue(object): return (edus, now_stream_id) - @defer.inlineCallbacks - def _get_to_device_message_edus(self, limit): + async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]: last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() - contents, stream_id = yield self._store.get_new_device_msgs_for_remote( + contents, stream_id = await self._store.get_new_device_msgs_for_remote( self._destination, last_device_stream_id, to_device_stream_id, limit ) edus = [ diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 5fed626d5b..3c2a02a3b3 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List from canonicaljson import json -from twisted.internet import defer - +import synapse.server from synapse.api.errors import HttpResponseException +from synapse.events import EventBase from synapse.federation.persistence import TransactionActions -from synapse.federation.units import Transaction +from synapse.federation.units import Edu, Transaction from synapse.logging.opentracing import ( extract_text_map, set_tag, @@ -39,7 +40,7 @@ class TransactionManager(object): shared between PerDestinationQueue objects """ - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self._server_name = hs.hostname self.clock = hs.get_clock() # nb must be called this for @measure_func self._store = hs.get_datastore() @@ -50,8 +51,9 @@ class TransactionManager(object): self._next_txn_id = int(self.clock.time_msec()) @measure_func("_send_new_transaction") - @defer.inlineCallbacks - def send_new_transaction(self, destination, pending_pdus, pending_edus): + async def send_new_transaction( + self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu] + ): # Make a transaction-sending opentracing span. This span follows on from # all the edus in that transaction. This needs to be done since there is @@ -127,7 +129,7 @@ class TransactionManager(object): return data try: - response = yield self._transport_layer.send_transaction( + response = await self._transport_layer.send_transaction( transaction, json_data_cb ) code = 200 diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b4d743cde7..6b32e0dcbf 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -19,11 +19,15 @@ server protocol. import logging +import attr + +from synapse.types import JsonDict from synapse.util.jsonobject import JsonEncodedObject logger = logging.getLogger(__name__) +@attr.s(slots=True) class Edu(JsonEncodedObject): """ An Edu represents a piece of data sent from one homeserver to another. @@ -32,11 +36,24 @@ class Edu(JsonEncodedObject): internal ID or previous references graph. """ - valid_keys = ["origin", "destination", "edu_type", "content"] + edu_type = attr.ib(type=str) + content = attr.ib(type=dict) + origin = attr.ib(type=str) + destination = attr.ib(type=str) - required_keys = ["edu_type"] + def get_dict(self) -> JsonDict: + return { + "edu_type": self.edu_type, + "content": self.content, + } - internal_keys = ["origin", "destination"] + def get_internal_dict(self) -> JsonDict: + return { + "edu_type": self.edu_type, + "content": self.content, + "origin": self.origin, + "destination": self.destination, + } def get_context(self): return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}") diff --git a/synapse/server.pyi b/synapse/server.pyi index 90347ac23e..40eabfe5d9 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -107,3 +107,5 @@ class HomeServer(object): self, ) -> synapse.replication.tcp.client.ReplicationClientHandler: pass + def is_mine_id(self, domain_id: str) -> bool: + pass diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 68b9847bd2..2767b0497a 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -111,7 +111,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): retry_timings_res ) - self.datastore.get_device_updates_by_remote.return_value = (0, []) + self.datastore.get_device_updates_by_remote.return_value = defer.succeed( + (0, []) + ) def get_received_txn_response(*args): return defer.succeed(None) @@ -144,7 +146,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_current_state_deltas.return_value = (0, None) self.datastore.get_to_device_stream_token = lambda: 0 - self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0) + self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: defer.succeed( + ([], 0) + ) self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed( None diff --git a/tox.ini b/tox.ini index ef22368cf1..f8229eba88 100644 --- a/tox.ini +++ b/tox.ini @@ -179,6 +179,7 @@ extras = all commands = mypy \ synapse/api \ synapse/config/ \ + synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/sync.py \ synapse/handlers/ui_auth \ From 799001f2c0b31d72b95a252a3808da25987e1ed3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 15:30:04 +0000 Subject: [PATCH 075/382] Add a `make_event_from_dict` method (#6858) ... and use it in places where it's trivial to do so. This will make it easier to pass room versions into the FrozenEvent constructors. --- changelog.d/6858.misc | 1 + synapse/events/__init__.py | 16 ++++++++++++++-- synapse/events/builder.py | 10 +++------- synapse/federation/federation_base.py | 5 ++--- tests/api/test_filtering.py | 4 ++-- tests/crypto/test_event_signing.py | 6 +++--- tests/events/test_utils.py | 9 +++++---- tests/federation/test_federation_server.py | 4 ++-- tests/replication/slave/storage/test_events.py | 12 ++++++++---- tests/state/test_v2.py | 4 ++-- tests/test_event_auth.py | 10 +++++----- tests/test_federation.py | 6 +++--- tests/test_state.py | 4 ++-- 13 files changed, 52 insertions(+), 39 deletions(-) create mode 100644 changelog.d/6858.misc diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6858.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 89d41d82b6..a842661a90 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -16,12 +16,13 @@ import os from distutils.util import strtobool +from typing import Optional, Type import six from unpaddedbase64 import encode_base64 -from synapse.api.room_versions import EventFormatVersions +from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.types import JsonDict from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -407,7 +408,7 @@ class FrozenEventV3(FrozenEventV2): return self._event_id -def event_type_from_format_version(format_version): +def event_type_from_format_version(format_version: int) -> Type[EventBase]: """Returns the python type to use to construct an Event object for the given event format version. @@ -427,3 +428,14 @@ def event_type_from_format_version(format_version): return FrozenEventV3 else: raise Exception("No event format %r" % (format_version,)) + + +def make_event_from_dict( + event_dict: JsonDict, + room_version: RoomVersion = RoomVersions.V1, + internal_metadata_dict: JsonDict = {}, + rejected_reason: Optional[str] = None, +) -> EventBase: + """Construct an EventBase from the given event dict""" + event_type = event_type_from_format_version(room_version.event_format) + return event_type(event_dict, internal_metadata_dict, rejected_reason) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 8d63ad6dc3..a0c4a40c27 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -28,11 +28,7 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.events import ( - EventBase, - _EventInternalMetadata, - event_type_from_format_version, -) +from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.types import EventID, JsonDict from synapse.util import Clock from synapse.util.stringutils import random_string @@ -256,8 +252,8 @@ def create_local_event_from_event_dict( event_dict.setdefault("signatures", {}) add_hashes_and_signatures(room_version, event_dict, hostname, signing_key) - return event_type_from_format_version(format_version)( - event_dict, internal_metadata_dict=internal_metadata_dict + return make_event_from_dict( + event_dict, room_version, internal_metadata_dict=internal_metadata_dict ) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index ebe8b8e9fe..eea64c1c9f 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -29,7 +29,7 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import check_event_content_hash -from synapse.events import EventBase, event_type_from_format_version +from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( @@ -374,8 +374,7 @@ def event_from_pdu_json( elif depth > MAX_DEPTH: raise SynapseError(400, "Depth too large", Codes.BAD_JSON) - event = event_type_from_format_version(room_version.event_format)(pdu_json) - + event = make_event_from_dict(pdu_json, room_version) event.internal_metadata.outlier = outlier return event diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 63d8633582..4e67503cf0 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -25,7 +25,7 @@ from twisted.internet import defer from synapse.api.constants import EventContentFields from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from tests import unittest from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver @@ -38,7 +38,7 @@ def MockEvent(**kwargs): kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: kwargs["type"] = "fake_type" - return FrozenEvent(kwargs) + return make_event_from_dict(kwargs) class FilteringTestCase(unittest.TestCase): diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 6143a50ab2..62f639a18d 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -19,7 +19,7 @@ from unpaddedbase64 import decode_base64 from synapse.api.room_versions import RoomVersions from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from tests import unittest @@ -54,7 +54,7 @@ class EventSigningTestCase(unittest.TestCase): RoomVersions.V1, event_dict, HOSTNAME, self.signing_key ) - event = FrozenEvent(event_dict) + event = make_event_from_dict(event_dict) self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) @@ -88,7 +88,7 @@ class EventSigningTestCase(unittest.TestCase): RoomVersions.V1, event_dict, HOSTNAME, self.signing_key ) - event = FrozenEvent(event_dict) + event = make_event_from_dict(event_dict) self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 2b13980dfd..45d55b9e94 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.events.utils import ( copy_power_levels_contents, prune_event, @@ -30,7 +29,7 @@ def MockEvent(**kwargs): kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: kwargs["type"] = "fake_type" - return FrozenEvent(kwargs) + return make_event_from_dict(kwargs) class PruneEventTestCase(unittest.TestCase): @@ -38,7 +37,9 @@ class PruneEventTestCase(unittest.TestCase): `matchdict` when it is redacted. """ def run_test(self, evdict, matchdict): - self.assertEquals(prune_event(FrozenEvent(evdict)).get_dict(), matchdict) + self.assertEquals( + prune_event(make_event_from_dict(evdict)).get_dict(), matchdict + ) def test_minimal(self): self.run_test( diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 1ec8c40901..e7d8699040 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -105,7 +105,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): def _create_acl_event(content): - return FrozenEvent( + return make_event_from_dict( { "room_id": "!a:b", "event_id": "$a:b", diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index b1b037006d..d31210fbe4 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -15,7 +15,7 @@ import logging from canonicaljson import encode_canonical_json -from synapse.events import FrozenEvent, _EventInternalMetadata +from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource from synapse.replication.slave.storage.events import SlavedEventStore @@ -90,7 +90,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction - redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) self.check("get_event", [msg.event_id], redacted) def test_backfilled_redactions(self): @@ -110,7 +112,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction - redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) self.check("get_event", [msg.event_id], redacted) def test_invites(self): @@ -345,7 +349,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): if redacts is not None: event_dict["redacts"] = redacts - event = FrozenEvent(event_dict, internal_metadata_dict=internal) + event = make_event_from_dict(event_dict, internal_metadata_dict=internal) self.event_id += 1 diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 0f341d3ac3..5bafad9f19 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -22,7 +22,7 @@ import attr from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store from synapse.types import EventID @@ -89,7 +89,7 @@ class FakeEvent(object): if self.state_key is not None: event_dict["state_key"] = self.state_key - return FrozenEvent(event_dict) + return make_event_from_dict(event_dict) # All graphs start with this set of events diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index ca20b085a2..bfa5d6f510 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -18,7 +18,7 @@ import unittest from synapse import event_auth from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict class EventAuthTestCase(unittest.TestCase): @@ -94,7 +94,7 @@ TEST_ROOM_ID = "!test:room" def _create_event(user_id): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -106,7 +106,7 @@ def _create_event(user_id): def _join_event(user_id): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -119,7 +119,7 @@ def _join_event(user_id): def _power_levels_event(sender, content): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -132,7 +132,7 @@ def _power_levels_event(sender, content): def _random_state_event(sender): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), diff --git a/tests/test_federation.py b/tests/test_federation.py index 68684460c6..9b5cf562f3 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -2,7 +2,7 @@ from mock import Mock from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.logging.context import LoggingContext from synapse.types import Requester, UserID from synapse.util import Clock @@ -43,7 +43,7 @@ class MessageAcceptTests(unittest.TestCase): ) )[0] - join_event = FrozenEvent( + join_event = make_event_from_dict( { "room_id": self.room_id, "sender": "@baduser:test.serv", @@ -105,7 +105,7 @@ class MessageAcceptTests(unittest.TestCase): )[0] # Now lie about an event - lying_event = FrozenEvent( + lying_event = make_event_from_dict( { "room_id": self.room_id, "sender": "@baduser:test.serv", diff --git a/tests/test_state.py b/tests/test_state.py index 1e4449fa1c..d1578fe581 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.auth import Auth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler @@ -66,7 +66,7 @@ def create_event( d.update(kwargs) - event = FrozenEvent(d) + event = make_event_from_dict(d) return event From e1d858984d71b6edf56e1024f1475224bfa49054 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 15:30:26 +0000 Subject: [PATCH 076/382] Remove unused `get_room_stats_state` method. (#6869) --- changelog.d/6869.misc | 1 + synapse/storage/data_stores/main/stats.py | 25 ----------------------- 2 files changed, 1 insertion(+), 25 deletions(-) create mode 100644 changelog.d/6869.misc diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc new file mode 100644 index 0000000000..14f88f9bb7 --- /dev/null +++ b/changelog.d/6869.misc @@ -0,0 +1 @@ +Remove unused `get_room_stats_state` method. diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py index 7af1495e47..380c1ec7da 100644 --- a/synapse/storage/data_stores/main/stats.py +++ b/synapse/storage/data_stores/main/stats.py @@ -271,31 +271,6 @@ class StatsStore(StateDeltasStore): return slice_list - def get_room_stats_state(self, room_id): - """ - Returns the current room_stats_state for a room. - - Args: - room_id (str): The ID of the room to return state for. - - Returns (dict): - Dictionary containing these keys: - "name", "topic", "canonical_alias", "avatar", "join_rules", - "history_visibility" - """ - return self.db.simple_select_one( - "room_stats_state", - {"room_id": room_id}, - retcols=( - "name", - "topic", - "canonical_alias", - "avatar", - "join_rules", - "history_visibility", - ), - ) - @cached() def get_earliest_token_for_stats(self, stats_type, id): """ From 21db35f77e4718cfe6d6b292baada9dd02ef8280 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 15:45:39 +0000 Subject: [PATCH 077/382] Add support for putting fed user query API on workers (#6873) --- changelog.d/6873.feature | 1 + docs/workers.md | 1 + synapse/app/federation_reader.py | 2 ++ synapse/federation/federation_server.py | 7 +++-- synapse/handlers/device.py | 35 +++++++++++-------------- 5 files changed, 25 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6873.feature diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature new file mode 100644 index 0000000000..bbedf8f7f0 --- /dev/null +++ b/changelog.d/6873.feature @@ -0,0 +1 @@ +Add ability to route federation user device queries to workers. diff --git a/docs/workers.md b/docs/workers.md index 82442d6a0a..6f7ec58780 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -176,6 +176,7 @@ endpoints matching the following regular expressions: ^/_matrix/federation/v1/query_auth/ ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ + ^/_matrix/federation/v1/user/devices/ ^/_matrix/federation/v1/send/ ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 5e17ef1396..d055d11b23 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -33,6 +33,7 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore @@ -68,6 +69,7 @@ class FederationReaderSlavedStore( SlavedKeyStore, SlavedRegistrationStore, SlavedGroupServerStore, + SlavedDeviceStore, RoomStore, DirectoryStore, SlavedTransactionStore, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a6c966a393..7f9da49326 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -81,6 +81,8 @@ class FederationServer(FederationBase): self.handler = hs.get_handlers().federation_handler self.state = hs.get_state_handler() + self.device_handler = hs.get_device_handler() + self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") @@ -523,8 +525,9 @@ class FederationServer(FederationBase): def on_query_client_keys(self, origin, content): return self.on_query_request("client_keys", content) - def on_query_user_devices(self, origin, user_id): - return self.on_query_request("user_devices", user_id) + async def on_query_user_devices(self, origin: str, user_id: str): + keys = await self.device_handler.on_federation_query_user_devices(user_id) + return 200, keys @trace async def on_claim_client_keys(self, origin, content): diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a9bd431486..6d8e48ed39 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -225,6 +225,22 @@ class DeviceWorkerHandler(BaseHandler): return result + @defer.inlineCallbacks + def on_federation_query_user_devices(self, user_id): + stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) + master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master") + self_signing_key = yield self.store.get_e2e_cross_signing_key( + user_id, "self_signing" + ) + + return { + "user_id": user_id, + "stream_id": stream_id, + "devices": devices, + "master_key": master_key, + "self_signing_key": self_signing_key, + } + class DeviceHandler(DeviceWorkerHandler): def __init__(self, hs): @@ -239,9 +255,6 @@ class DeviceHandler(DeviceWorkerHandler): federation_registry.register_edu_handler( "m.device_list_update", self.device_list_updater.incoming_device_list_update ) - federation_registry.register_query_handler( - "user_devices", self.on_federation_query_user_devices - ) hs.get_distributor().observe("user_left_room", self.user_left_room) @@ -456,22 +469,6 @@ class DeviceHandler(DeviceWorkerHandler): self.notifier.on_new_event("device_list_key", position, users=[from_user_id]) - @defer.inlineCallbacks - def on_federation_query_user_devices(self, user_id): - stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) - master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master") - self_signing_key = yield self.store.get_e2e_cross_signing_key( - user_id, "self_signing" - ) - - return { - "user_id": user_id, - "stream_id": stream_id, - "devices": devices, - "master_key": master_key, - "self_signing_key": self_signing_key, - } - @defer.inlineCallbacks def user_left_room(self, user, room_id): user_id = user.to_string() From fe73f0d533dcdcf11f069d89ebbff2ce88d16bb3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 Feb 2020 00:41:20 +0000 Subject: [PATCH 078/382] Update setuptools for python 3.5 tests (#6880) Workaround for jaraco/zipp#40 --- .buildkite/scripts/test_old_deps.sh | 18 ++++++++++++++++++ changelog.d/6880.misc | 1 + 2 files changed, 19 insertions(+) create mode 100755 .buildkite/scripts/test_old_deps.sh create mode 100644 changelog.d/6880.misc diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh new file mode 100755 index 0000000000..dfd71b2511 --- /dev/null +++ b/.buildkite/scripts/test_old_deps.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# this script is run by buildkite in a plain `xenial` container; it installs the +# minimal requirements for tox and hands over to the py35-old tox environment. + +set -ex + +apt-get update +apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev + +# workaround for https://github.com/jaraco/zipp/issues/40 +python3.5 -m pip install 'setuptools>=34.4.0' + +python3.5 -m pip install tox + +export LANG="C.UTF-8" + +exec tox -e py35-old,combine diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc new file mode 100644 index 0000000000..8344a6ed1e --- /dev/null +++ b/changelog.d/6880.misc @@ -0,0 +1 @@ +Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. From 8e64c5a24c26a733c0cfd3e997ea4079ae457096 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 10 Feb 2020 09:36:23 +0000 Subject: [PATCH 079/382] filter out m.room.aliases from the CS API until a better solution is specced (#6878) We're in the middle of properly mitigating spam caused by malicious aliases being added to a room. However, until this work fully lands, we temporarily filter out all m.room.aliases events from /sync and /messages on the CS API, to remove abusive aliases. This is considered acceptable as m.room.aliases events were never a reliable record of the given alias->id mapping and were purely informational, and in their current state do more harm than good. --- changelog.d/6878.feature | 1 + synapse/visibility.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/6878.feature diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature new file mode 100644 index 0000000000..af3e958a43 --- /dev/null +++ b/changelog.d/6878.feature @@ -0,0 +1 @@ +Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. diff --git a/synapse/visibility.py b/synapse/visibility.py index 100dc47a8a..d0abd8f04f 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -122,6 +122,13 @@ def filter_events_for_client( if not event.is_state() and event.sender in ignore_list: return None + # Until MSC2261 has landed we can't redact malicious alias events, so for + # now we temporarily filter out m.room.aliases entirely to mitigate + # abuse, while we spec a better solution to advertising aliases + # on rooms. + if event.type == EventTypes.Aliases: + return None + # Don't try to apply the room's retention policy if the event is a state event, as # MSC1763 states that retention is only considered for non-state events. if apply_retention_policies and not event.is_state(): From 3de57e706209d98a331265e6d5a51bfd24939a3b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Feb 2020 09:56:42 +0000 Subject: [PATCH 080/382] 1.10.0rc3 --- CHANGES.md | 15 +++++++++++++++ changelog.d/6878.feature | 1 - changelog.d/6880.misc | 1 - synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/6878.feature delete mode 100644 changelog.d/6880.misc diff --git a/CHANGES.md b/CHANGES.md index c2aa735908..4a81a04627 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.10.0rc3 (2020-02-10) +============================== + +Features +-------- + +- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) + + +Internal Changes +---------------- + +- Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. ([\#6880](https://github.com/matrix-org/synapse/issues/6880)) + + Synapse 1.10.0rc2 (2020-02-06) ============================== diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature deleted file mode 100644 index af3e958a43..0000000000 --- a/changelog.d/6878.feature +++ /dev/null @@ -1 +0,0 @@ -Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc deleted file mode 100644 index 8344a6ed1e..0000000000 --- a/changelog.d/6880.misc +++ /dev/null @@ -1 +0,0 @@ -Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4f1859bd57..36c0cf557a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc2" +__version__ = "1.10.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 3a3118f4ecb631dec3cc44a928a3666b734f5dcb Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 10 Feb 2020 11:47:18 -0500 Subject: [PATCH 081/382] Add an additional test to the SyTest blacklist for worker mode. (#6883) --- .buildkite/worker-blacklist | 2 ++ changelog.d/6883.misc | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6883.misc diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index 158ab79154..094b6c94da 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update # this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536 Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state + +Can get rooms/{roomId}/members at a given point diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc new file mode 100644 index 0000000000..e0837d7987 --- /dev/null +++ b/changelog.d/6883.misc @@ -0,0 +1 @@ +Add an additional entry to the SyTest blacklist for worker mode. From 01209382fbb5ccf7bdef71f1d42f754b125f625a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 10 Feb 2020 18:07:35 +0000 Subject: [PATCH 082/382] filter out m.room.aliases from /sync state blocks (#6884) We forgot to filter out aliases from /sync state blocks as well as the timeline. --- changelog.d/6884.feature | 1 + synapse/handlers/sync.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/6884.feature diff --git a/changelog.d/6884.feature b/changelog.d/6884.feature new file mode 100644 index 0000000000..4f6b9630b8 --- /dev/null +++ b/changelog.d/6884.feature @@ -0,0 +1 @@ +Filter out m.room.aliases from /sync state blocks until a full fix lands. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd95f85e3f..2b62fd83fd 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -883,6 +883,7 @@ class SyncHandler(object): for e in sync_config.filter_collection.filter_room_state( list(state.values()) ) + if e.type != EventTypes.Aliases # until MSC2261 or alternative solution } async def unread_notifs_for_room_id(self, room_id, sync_config): From a92e703ab9d78aecc062e797f941bb7e206650a5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 10 Feb 2020 16:35:26 -0500 Subject: [PATCH 083/382] Reject device display names that are too long (#6882) * Reject device display names that are too long. Too long is currently defined as 100 characters in length. * Add a regression test for rejecting a too long device display name. --- changelog.d/6882.misc | 1 + synapse/handlers/device.py | 14 +++++++++++++- tests/handlers/test_device.py | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6882.misc diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc new file mode 100644 index 0000000000..e8382e36ae --- /dev/null +++ b/changelog.d/6882.misc @@ -0,0 +1 @@ +Reject device display names over 100 characters in length. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 6d8e48ed39..50cea3f378 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( FederationDeniedError, HttpResponseException, RequestSendFailed, + SynapseError, ) from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import RoomStreamToken, get_domain_from_id @@ -39,6 +40,8 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +MAX_DEVICE_DISPLAY_NAME_LEN = 100 + class DeviceWorkerHandler(BaseHandler): def __init__(self, hs): @@ -404,9 +407,18 @@ class DeviceHandler(DeviceWorkerHandler): defer.Deferred: """ + # Reject a new displayname which is too long. + new_display_name = content.get("display_name") + if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN: + raise SynapseError( + 400, + "Device display name is too long (max %i)" + % (MAX_DEVICE_DISPLAY_NAME_LEN,), + ) + try: yield self.store.update_device( - user_id, device_id, new_display_name=content.get("display_name") + user_id, device_id, new_display_name=new_display_name ) yield self.notify_device_update(user_id, [device_id]) except errors.StoreError as e: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index a3aa0a1cf2..62b47f6574 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -160,6 +160,24 @@ class DeviceTestCase(unittest.HomeserverTestCase): res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") + def test_update_device_too_long_display_name(self): + """Update a device with a display name that is invalid (too long).""" + self._record_users() + + # Request to update a device display name with a new value that is longer than allowed. + update = { + "display_name": "a" + * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) + } + self.get_failure( + self.handler.update_device(user1, "abc", update), + synapse.api.errors.SynapseError, + ) + + # Ensure the display name was not updated. + res = self.get_success(self.handler.get_device(user1, "abc")) + self.assertEqual(res["display_name"], "display 2") + def test_update_unknown_device(self): update = {"display_name": "new_display"} res = self.handler.update_device("user_id", "unknown_device_id", update) From 3edc65dd24f1916501e24e2353b4523bc2e3635c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 11 Feb 2020 10:43:16 +0000 Subject: [PATCH 084/382] 1.10.0rc4 --- CHANGES.md | 9 +++++++++ changelog.d/6884.feature | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6884.feature diff --git a/CHANGES.md b/CHANGES.md index 4a81a04627..f776560de7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.10.0rc4 (2020-02-11) +============================== + +Features +-------- + +- Filter out m.room.aliases from /sync state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) + + Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/changelog.d/6884.feature b/changelog.d/6884.feature deleted file mode 100644 index 4f6b9630b8..0000000000 --- a/changelog.d/6884.feature +++ /dev/null @@ -1 +0,0 @@ -Filter out m.room.aliases from /sync state blocks until a full fix lands. diff --git a/synapse/__init__.py b/synapse/__init__.py index 36c0cf557a..cc69ff0c41 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc3" +__version__ = "1.10.0rc4" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9e45d573d499796bd1878d026cdd820347c10c00 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:50:55 +0000 Subject: [PATCH 085/382] changelog formatting --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f776560de7..8f252bb1b2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.10.0rc4 (2020-02-11) Features -------- -- Filter out m.room.aliases from /sync state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Filter out `m.room.aliase`s from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) @@ -13,7 +13,7 @@ Synapse 1.10.0rc3 (2020-02-10) Features -------- -- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) +- Filter out `m.room.aliases` from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) Internal Changes From aa7e4291ee55f4c1d817bfb4bf87930ae2f40aab Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:51:13 +0000 Subject: [PATCH 086/382] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8f252bb1b2..4248b7065b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.10.0rc4 (2020-02-11) Features -------- -- Filter out `m.room.aliase`s from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Filter out `m.room.aliases` from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) From 78d170262c7349cbb85baa4ad0749ccb5e2794e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:53:25 +0000 Subject: [PATCH 087/382] changelog wording --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4248b7065b..ab031fccdc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,10 @@ Synapse 1.10.0rc4 (2020-02-11) ============================== -Features +Bugfixes -------- -- Filter out `m.room.aliases` from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) From 856b2a9555b6c5a70b98ad2a1e14b7dbee0949f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 11 Feb 2020 11:06:28 +0000 Subject: [PATCH 088/382] 1.10.0rc5 --- CHANGES.md | 6 +++++- synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ab031fccdc..1995a70b19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -Synapse 1.10.0rc4 (2020-02-11) +Synapse 1.10.0rc5 (2020-02-11) ============================== Bugfixes @@ -6,6 +6,10 @@ Bugfixes - Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +Synapse 1.10.0rc4 (2020-02-11) +============================== + +This release candidate was built incorrectly and is superceded by 1.10.0rc5. Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/synapse/__init__.py b/synapse/__init__.py index cc69ff0c41..ba339004ba 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc4" +__version__ = "1.10.0rc5" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From a443d2a25dc3cd99519c6b9a3a326545a0b2f933 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:37:09 +0000 Subject: [PATCH 089/382] Spell out that Synapse never purges the last event sent in a room --- docs/message_retention_policies.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 4300809dfe..f2e2794252 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -42,6 +42,10 @@ purged according to its room's policy, then the receiving server will process and store that event until it's picked up by the next purge job, though it will always hide it from clients. +With the current implementation of this feature, in order not to break +rooms, Synapse will never delete the last message sent to a room, and +will only hide it from clients. + ## Server configuration From 705c978366146e85280af0dcf216e78d521352a3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:38:27 +0000 Subject: [PATCH 090/382] Changelog --- changelog.d/6891.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6891.doc diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc new file mode 100644 index 0000000000..f3afbbccda --- /dev/null +++ b/changelog.d/6891.doc @@ -0,0 +1 @@ +Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support. From 6b21986e4ee999eb3669ec90f6db3bdfa7ce71a1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:56:04 +0000 Subject: [PATCH 091/382] Also spell it out in the purge history API doc --- docs/admin_api/purge_history_api.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index f7be226fd9..f2c4dc03ac 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -8,6 +8,9 @@ Depending on the amount of history being purged a call to the API may take several minutes or longer. During this period users will not be able to paginate further back in the room from the point being purged from. +Note that, in order to not break the room, this API won't delete the last +message sent to it. + The API is: ``POST /_synapse/admin/v1/purge_history/[/]`` From a0c4769f1ae6d6aaf1a548b869f857836efbfb18 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:56:42 +0000 Subject: [PATCH 092/382] Update the changelog file --- changelog.d/6891.doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc index f3afbbccda..2f46c385b7 100644 --- a/changelog.d/6891.doc +++ b/changelog.d/6891.doc @@ -1 +1 @@ -Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support. +Spell out that the last event sent to a room won't be deleted by a purge. From ba547ec3a94b17cfb634758deb4cdbc98fc840a9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 12 Feb 2020 07:02:19 -0500 Subject: [PATCH 093/382] Use BSD-compatible in-place editing for sed. (#6887) --- changelog.d/6887.misc | 1 + scripts-dev/config-lint.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6887.misc diff --git a/changelog.d/6887.misc b/changelog.d/6887.misc new file mode 100644 index 0000000000..b351d47c7b --- /dev/null +++ b/changelog.d/6887.misc @@ -0,0 +1 @@ +Fix the use of sed in the linting scripts when using BSD sed. diff --git a/scripts-dev/config-lint.sh b/scripts-dev/config-lint.sh index 677a854c85..189ca66535 100755 --- a/scripts-dev/config-lint.sh +++ b/scripts-dev/config-lint.sh @@ -3,7 +3,8 @@ # Exits with 0 if there are no problems, or another code otherwise. # Fix non-lowercase true/false values -sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml +sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml +rm docs/sample_config.yaml.bak # Check if anything changed git diff --exit-code docs/sample_config.yaml From 3dd2b5f5e313c0350a1a64f1d5e40c8bdc46029c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 12 Feb 2020 12:02:53 +0000 Subject: [PATCH 094/382] bump the version of Alpine Linux used in the docker images (#6897) --- changelog.d/6897.docker | 1 + docker/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6897.docker diff --git a/changelog.d/6897.docker b/changelog.d/6897.docker new file mode 100644 index 0000000000..8e7bcd719a --- /dev/null +++ b/changelog.d/6897.docker @@ -0,0 +1 @@ +Update the docker images to Alpine Linux 3.11. diff --git a/docker/Dockerfile b/docker/Dockerfile index e5a0d6d5f6..93d61739ae 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7 ### ### Stage 0: builder ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder +FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder # install the OS build deps From fdb816713aacb295fe804290f5043bdac866dcf5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 12:19:19 +0000 Subject: [PATCH 095/382] 1.10.0 --- CHANGES.md | 9 +++++++++ changelog.d/6897.docker | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6897.docker diff --git a/CHANGES.md b/CHANGES.md index 1995a70b19..603a50054e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.10.0 (2020-02-12) +=========================== + +Updates to the Docker image +--------------------------- + +- Update the docker images to Alpine Linux 3.11. ([\#6897](https://github.com/matrix-org/synapse/issues/6897)) + + Synapse 1.10.0rc5 (2020-02-11) ============================== diff --git a/changelog.d/6897.docker b/changelog.d/6897.docker deleted file mode 100644 index 8e7bcd719a..0000000000 --- a/changelog.d/6897.docker +++ /dev/null @@ -1 +0,0 @@ -Update the docker images to Alpine Linux 3.11. diff --git a/debian/changelog b/debian/changelog index 74eb29c5ee..cdc3b1a5c2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.10.0) stable; urgency=medium + + * New synapse release 1.10.0. + + -- Synapse Packaging team Wed, 12 Feb 2020 12:18:54 +0000 + matrix-synapse-py3 (1.9.1) stable; urgency=medium * New synapse release 1.9.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index ba339004ba..9d285fca38 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc5" +__version__ = "1.10.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 7b8d654a6196d889c8e1c2a403f5176650216432 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 12:20:37 +0000 Subject: [PATCH 096/382] Move the warning at the top of the release changes --- CHANGES.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 603a50054e..0bce84f400 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.10.0 (2020-02-12) =========================== +**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. + Updates to the Docker image --------------------------- @@ -54,9 +56,6 @@ Internal Changes Synapse 1.10.0rc1 (2020-01-31) ============================== -**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. - - Features -------- From 08e050c3fddb35cc54f6e0704fa9b54128dddc39 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 15:39:40 +0000 Subject: [PATCH 097/382] Rephrase --- docs/admin_api/purge_history_api.rst | 4 ++-- docs/message_retention_policies.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index f2c4dc03ac..e2a620c54f 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -8,8 +8,8 @@ Depending on the amount of history being purged a call to the API may take several minutes or longer. During this period users will not be able to paginate further back in the room from the point being purged from. -Note that, in order to not break the room, this API won't delete the last -message sent to it. +Note that Synapse requires at least one message in each room, so it will never +delete the last message in a room. The API is: diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index f2e2794252..1dd60bdad9 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -42,9 +42,9 @@ purged according to its room's policy, then the receiving server will process and store that event until it's picked up by the next purge job, though it will always hide it from clients. -With the current implementation of this feature, in order not to break -rooms, Synapse will never delete the last message sent to a room, and -will only hide it from clients. +Synapse requires at least one message in each room, so it will never +delete the last message in a room. It will, however, hide it from +clients. ## Server configuration From d8994942f28f5028e560f6aba52512fae3ca1a6a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 12 Feb 2020 18:14:10 +0000 Subject: [PATCH 098/382] Return a 404 for admin api user lookup if user not found (#6901) --- changelog.d/6901.misc | 1 + synapse/rest/admin/users.py | 5 ++++- tests/rest/admin/test_user.py | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6901.misc diff --git a/changelog.d/6901.misc b/changelog.d/6901.misc new file mode 100644 index 0000000000..b2f12bbe86 --- /dev/null +++ b/changelog.d/6901.misc @@ -0,0 +1 @@ +Return a 404 instead of 200 for querying information of a non-existant user through the admin API. \ No newline at end of file diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index e75c5f1370..2107b5dc56 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -21,7 +21,7 @@ from six import text_type from six.moves import http_client from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -152,6 +152,9 @@ class UserRestServletV2(RestServlet): ret = await self.admin_handler.get_user(target_user) + if not ret: + raise NotFoundError("User not found") + return 200, ret async def on_PUT(self, request, user_id): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 3b5169b38d..490ce8f55d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -401,6 +401,22 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("You are not a server admin", channel.json_body["error"]) + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + self.hs.config.registration_shared_secret = None + + request, channel = self.make_request( + "GET", + "/_synapse/admin/v2/users/@unknown_person:test", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) + def test_requester_is_admin(self): """ If the user is a server admin, a new user is created. From f092029d2ddc333d557c3551ebc443d59221433c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:14:16 +0000 Subject: [PATCH 099/382] Update ACME.md to mention ACME v1 deprecation --- docs/ACME.md | 48 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/docs/ACME.md b/docs/ACME.md index 9eb18a9cf5..6d06cae3b3 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -1,12 +1,46 @@ # ACME -Synapse v1.0 will require valid TLS certificates for communication between -servers (port `8448` by default) in addition to those that are client-facing -(port `443`). If you do not already have a valid certificate for your domain, -the easiest way to get one is with Synapse's new ACME support, which will use -the ACME protocol to provision a certificate automatically. Synapse v0.99.0+ -will provision server-to-server certificates automatically for you for free -through [Let's Encrypt](https://letsencrypt.org/) if you tell it to. +From version 1.0 (June 2019) onwards, Synapse requires valid TLS +certificates for communication between servers (by default on port +`8448`) in addition to those that are client-facing (port `443`). To +help homeserver admins fulfil this new requirement, Synapse v0.99.0 +introduced support for automatically provisioning certificates through +[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol. + +## Deprecation of ACME v1 + +In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430), +Let's Encrypt announced that they were deprecating version 1 of the ACME +protocol, with the plan to disable the use of it for new accounts in +November 2019, and for existing accounts in June 2020. + +Synapse doesn't currently support version 2 of the ACME protocol, which +means that: + +* for existing installs, Synapse's built-in ACME support will continue + to work until June 2020. +* for new installs, this feature will not work at all. + +Either way, it is recommended to move from Synapse's ACME support +feature to an external automated tool such as [certbot](https://github.com/certbot/certbot) +(or browse [this list](https://letsencrypt.org/fr/docs/client-options/) +for an alternative ACME client). + +It's also recommended to use a reverse proxy for the server-facing +communications (mode documentation about this can be found +[here](/docs/reverse_proxy.md)) as well as the client-facing ones and +have it serve the certificates. + +In case you can't do that and need Synapse to serve them itself, make +sure to set the `tls_certificate_path` configuration setting to the path +of the certificate (make sure to use the certificate containing the full +certification chain, e.g. `fullchain.pem` if using certbot) and +`tls_private_key_path` to the path of the matching private key. + +If you still want to use Synapse's built-in ACME support, the rest of +this document explains how to set it up. + +## Initial setup In the case that your `server_name` config variable is the same as the hostname that the client connects to, then the same certificate can be From e45a7c09396c56d6ca7e3f42827cc354a942ba5d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:14:59 +0000 Subject: [PATCH 100/382] Remove duplicated info about certbot et al --- docs/ACME.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/ACME.md b/docs/ACME.md index 6d06cae3b3..3b4416efe1 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -66,11 +66,6 @@ If you already have certificates, you will need to back up or delete them (files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root directory), Synapse's ACME implementation will not overwrite them. -You may wish to use alternate methods such as Certbot to obtain a certificate -from Let's Encrypt, depending on your server configuration. Of course, if you -already have a valid certificate for your homeserver's domain, that can be -placed in Synapse's config directory without the need for any ACME setup. - ## ACME setup The main steps for enabling ACME support in short summary are: From e88a5dd108f607c9ec99356a2601147e41a20533 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:15:41 +0000 Subject: [PATCH 101/382] Changelog --- changelog.d/6905.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6905.doc diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc new file mode 100644 index 0000000000..ca10b38301 --- /dev/null +++ b/changelog.d/6905.doc @@ -0,0 +1 @@ +Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins. From 459d089af7e90a703df9637a071e9285bf85eb12 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 21:05:30 +0000 Subject: [PATCH 102/382] Mention that using Synapse to serve certificates requires restarts --- docs/ACME.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/ACME.md b/docs/ACME.md index 3b4416efe1..97ac3c5ba3 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -35,7 +35,9 @@ In case you can't do that and need Synapse to serve them itself, make sure to set the `tls_certificate_path` configuration setting to the path of the certificate (make sure to use the certificate containing the full certification chain, e.g. `fullchain.pem` if using certbot) and -`tls_private_key_path` to the path of the matching private key. +`tls_private_key_path` to the path of the matching private key. Note +that in this case you will need to restart Synapse after each +certificate renewal so that Synapse stops using the old certificate. If you still want to use Synapse's built-in ACME support, the rest of this document explains how to set it up. From 862669d6cc802ff610de6f6df644ef2a6706abb3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 11:29:08 +0000 Subject: [PATCH 103/382] Update docs/ACME.md --- docs/ACME.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ACME.md b/docs/ACME.md index 97ac3c5ba3..f4c4740476 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -27,7 +27,7 @@ feature to an external automated tool such as [certbot](https://github.com/certb for an alternative ACME client). It's also recommended to use a reverse proxy for the server-facing -communications (mode documentation about this can be found +communications (more documentation about this can be found [here](/docs/reverse_proxy.md)) as well as the client-facing ones and have it serve the certificates. From dc3f9987061e24bec77ad9e26a7679a1907cee4f Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Thu, 13 Feb 2020 06:02:32 -0600 Subject: [PATCH 104/382] Remove m.lazy_load_members from unstable features since it is in CS r0.5.0 (#6877) Fixes #5528 --- changelog.d/6877.removal | 1 + synapse/rest/client/versions.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/6877.removal diff --git a/changelog.d/6877.removal b/changelog.d/6877.removal new file mode 100644 index 0000000000..9545e31fbe --- /dev/null +++ b/changelog.d/6877.removal @@ -0,0 +1 @@ +Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3d0fefb4df..3eeb3607f4 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -52,7 +52,6 @@ class VersionsRestServlet(RestServlet): ], # as per MSC1497: "unstable_features": { - "m.lazy_load_members": True, # as per MSC2190, as amended by MSC2264 # to be removed in r0.6.0 "m.id_access_token": True, From 361de49c90fd1f35adc4a6bca8206e50e7f15454 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Feb 2020 07:40:57 -0500 Subject: [PATCH 105/382] Add documentation for the spam checker module (#6906) Add documentation for the spam checker. --- changelog.d/6906.doc | 1 + docs/spam_checker.md | 85 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 changelog.d/6906.doc create mode 100644 docs/spam_checker.md diff --git a/changelog.d/6906.doc b/changelog.d/6906.doc new file mode 100644 index 0000000000..053b2436ae --- /dev/null +++ b/changelog.d/6906.doc @@ -0,0 +1 @@ +Add documentation for the spam checker. diff --git a/docs/spam_checker.md b/docs/spam_checker.md new file mode 100644 index 0000000000..97ff17f952 --- /dev/null +++ b/docs/spam_checker.md @@ -0,0 +1,85 @@ +# Handling spam in Synapse + +Synapse has support to customize spam checking behavior. It can plug into a +variety of events and affect how they are presented to users on your homeserver. + +The spam checking behavior is implemented as a Python class, which must be +able to be imported by the running Synapse. + +## Python spam checker class + +The Python class is instantiated with two objects: + +* Any configuration (see below). +* An instance of `synapse.spam_checker_api.SpamCheckerApi`. + +It then implements methods which return a boolean to alter behavior in Synapse. + +There's a generic method for checking every event (`check_event_for_spam`), as +well as some specific methods: + +* `user_may_invite` +* `user_may_create_room` +* `user_may_create_room_alias` +* `user_may_publish_room` + +The details of the each of these methods (as well as their inputs and outputs) +are documented in the `synapse.events.spamcheck.SpamChecker` class. + +The `SpamCheckerApi` class provides a way for the custom spam checker class to +call back into the homeserver internals. It currently implements the following +methods: + +* `get_state_events_in_room` + +### Example + +```python +class ExampleSpamChecker: + def __init__(self, config, api): + self.config = config + self.api = api + + def check_event_for_spam(self, foo): + return False # allow all events + + def user_may_invite(self, inviter_userid, invitee_userid, room_id): + return True # allow all invites + + def user_may_create_room(self, userid): + return True # allow all room creations + + def user_may_create_room_alias(self, userid, room_alias): + return True # allow all room aliases + + def user_may_publish_room(self, userid, room_id): + return True # allow publishing of all rooms +``` + +## Configuration + +Modify the `spam_checker` section of your `homeserver.yaml` in the following +manner: + +`module` should point to the fully qualified Python class that implements your +custom logic, e.g. `my_module.ExampleSpamChecker`. + +`config` is a dictionary that gets passed to the spam checker class. + +### Example + +This section might look like: + +```yaml +spam_checker: + module: my_module.ExampleSpamChecker + config: + # Enable or disable a specific option in ExampleSpamChecker. + my_custom_option: true +``` + +## Examples + +The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged +example using the Synapse spam checking API, including a bot for dynamic +configuration. From 5820ed905f83c5241b686e03e121f67719a99046 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 14:20:08 +0000 Subject: [PATCH 106/382] Add mention and warning about ACME v1 deprecation to the Synapse config --- docs/sample_config.yaml | 5 +++++ synapse/config/tls.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8e8cf513b0..7232d8f3f8 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -466,6 +466,11 @@ retention: # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # +# Note that ACME v1 is now deprecated, and Synapse currently doesn't support +# ACME v2. This means that this feature currently won't work with installs set +# up after November 2019. For more info, and alternative solutions, see +# https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 +# # Note that provisioning a certificate in this way requires port 80 to be # routed to Synapse so that it can complete the http-01 ACME challenge. # By default, if you enable ACME support, Synapse will attempt to listen on diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 2514b0713d..694f52c032 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -32,6 +32,17 @@ from synapse.util import glob_to_regex logger = logging.getLogger(__name__) +ACME_SUPPORT_ENABLED_WARN = """\ +This server uses Synapse's built-in ACME support. Note that ACME v1 has been +deprecated by Let's Encrypt, and that Synapse doesn't currently support ACME v2, +which means that this feature will not work with Synapse installs set up after +November 2019, and that it may stop working on June 2020 for installs set up +before that date. + +For more info and alternative solutions, see +https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 +""" + class TlsConfig(Config): section = "tls" @@ -44,6 +55,9 @@ class TlsConfig(Config): self.acme_enabled = acme_config.get("enabled", False) + if self.acme_enabled: + logger.warning(ACME_SUPPORT_ENABLED_WARN) + # hyperlink complains on py2 if this is not a Unicode self.acme_url = six.text_type( acme_config.get("url", "https://acme-v01.api.letsencrypt.org/directory") @@ -362,6 +376,11 @@ class TlsConfig(Config): # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # + # Note that ACME v1 is now deprecated, and Synapse currently doesn't support + # ACME v2. This means that this feature currently won't work with installs set + # up after November 2019. For more info, and alternative solutions, see + # https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 + # # Note that provisioning a certificate in this way requires port 80 to be # routed to Synapse so that it can complete the http-01 ACME challenge. # By default, if you enable ACME support, Synapse will attempt to listen on From 12bbcc255a77d76be13d8b8f142e9d329e91d520 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 14:58:34 +0000 Subject: [PATCH 107/382] Add a comprehensive error when failing to register for an ACME account --- synapse/handlers/acme.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index 46ac73106d..cfb5a4f39b 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -22,6 +22,7 @@ from twisted.web import server, static from twisted.web.resource import Resource from synapse.app import check_bind_error +from synapse.config import ConfigError logger = logging.getLogger(__name__) @@ -71,7 +72,18 @@ class AcmeHandler(object): # want it to control where we save the certificates, we have to reach in # and trigger the registration machinery ourselves. self._issuer._registered = False - yield self._issuer._ensure_registered() + + try: + yield self._issuer._ensure_registered() + except Exception: + raise ConfigError("Failed to register with the ACME provider. This is likely" + " happening because the install is new, and ACME v1 has" + " been deprecated by Let's Encrypt and is disabled for" + " installs set up after November 2019. At the moment," + " Synapse doesn't support ACME v2. For more info and" + " alternative solution, check out" + " https://github.com/matrix-org/synapse/blob/master/docs/" + "ACME.md#deprecation-of-acme-v1") @defer.inlineCallbacks def provision_certificate(self): From ef9c275d96bae28c6ea51f16e4907357be418419 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 15:44:14 +0000 Subject: [PATCH 108/382] Add a separator for the config warning --- synapse/config/tls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 694f52c032..5ecd934602 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -41,7 +41,7 @@ before that date. For more info and alternative solutions, see https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 -""" +--------------------------------------------------------------------------------""" class TlsConfig(Config): From 0cb83cde7075bed522058f43a23342a4939c763a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 16:06:31 +0000 Subject: [PATCH 109/382] Lint --- synapse/handlers/acme.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index cfb5a4f39b..c52796983d 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -77,13 +77,12 @@ class AcmeHandler(object): yield self._issuer._ensure_registered() except Exception: raise ConfigError("Failed to register with the ACME provider. This is likely" - " happening because the install is new, and ACME v1 has" - " been deprecated by Let's Encrypt and is disabled for" - " installs set up after November 2019. At the moment," - " Synapse doesn't support ACME v2. For more info and" - " alternative solution, check out" - " https://github.com/matrix-org/synapse/blob/master/docs/" - "ACME.md#deprecation-of-acme-v1") + " happening because the install is new, and ACME v1 has been deprecated" + " by Let's Encrypt and is disabled for installs set up after November" + " 2019. At the moment, Synapse doesn't support ACME v2. For more info" + " and alternative solution, check out https://github.com/matrix-org" + "/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1" + ) @defer.inlineCallbacks def provision_certificate(self): From f3f142259e5c882598b7426f36c26c4aca03c5d6 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 16:10:16 +0000 Subject: [PATCH 110/382] Changelog --- changelog.d/6907.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6907.doc diff --git a/changelog.d/6907.doc b/changelog.d/6907.doc new file mode 100644 index 0000000000..be0e698af8 --- /dev/null +++ b/changelog.d/6907.doc @@ -0,0 +1 @@ +Update Synapse's documentation to warn about the deprecation of ACME v1. From df1c98c22a9ada46a2a103184aab3b5e08539b19 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 16:12:20 +0000 Subject: [PATCH 111/382] Update changelog for #6905 to group it with upcoming PRs --- changelog.d/6905.doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc index ca10b38301..be0e698af8 100644 --- a/changelog.d/6905.doc +++ b/changelog.d/6905.doc @@ -1 +1 @@ -Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins. +Update Synapse's documentation to warn about the deprecation of ACME v1. From 65bdc35a1f1078377e20ea3906120ba32db9057f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 16:14:15 +0000 Subject: [PATCH 112/382] Lint --- synapse/handlers/acme.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index c52796983d..e6797535e6 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -76,12 +76,13 @@ class AcmeHandler(object): try: yield self._issuer._ensure_registered() except Exception: - raise ConfigError("Failed to register with the ACME provider. This is likely" - " happening because the install is new, and ACME v1 has been deprecated" - " by Let's Encrypt and is disabled for installs set up after November" - " 2019. At the moment, Synapse doesn't support ACME v2. For more info" - " and alternative solution, check out https://github.com/matrix-org" - "/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1" + raise ConfigError( + "Failed to register with the ACME provider. This is likely happening" + " because the install is new, and ACME v1 has been deprecated by Let's" + " Encrypt and is disabled for installs set up after November 2019. At the" + " moment, Synapse doesn't support ACME v2. For more info and alternative" + " solution, check out https://github.com/matrix-org/synapse/blob/master" + "/docs/ACME.md#deprecation-of-acme-v1" ) @defer.inlineCallbacks From 36af094017f87f0e3ec06e6ab92caa7971b43b8e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 17:03:41 +0000 Subject: [PATCH 113/382] Linters are hard but in they end they just want what's best for us --- synapse/config/tls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 5ecd934602..97a12d51f6 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -40,7 +40,7 @@ November 2019, and that it may stop working on June 2020 for installs set up before that date. For more info and alternative solutions, see -https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 +https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 --------------------------------------------------------------------------------""" From 71cc6bab5fc37f20a8fc14a52d8bd7930aec1c23 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 17:22:44 +0000 Subject: [PATCH 114/382] Update INSTALL.md to recommend reverse proxying and warn about ACMEv1 deprecation --- INSTALL.md | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index d25fcf0753..42132f1eb4 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -388,15 +388,17 @@ Once you have installed synapse as above, you will need to configure it. ## TLS certificates -The default configuration exposes a single HTTP port: http://localhost:8008. It -is suitable for local testing, but for any practical use, you will either need -to enable a reverse proxy, or configure Synapse to expose an HTTPS port. +The default configuration exposes a single HTTP port on the local +interface: `http://localhost:8008`. It is suitable for local testing, +but for any practical use, you will need Synapse's APIs to be served +over HTTPS. -For information on using a reverse proxy, see +The recommended way to do so is to set up a reverse proxy on port +`8448`. You can find documentation on doing so in [docs/reverse_proxy.md](docs/reverse_proxy.md). -To configure Synapse to expose an HTTPS port, you will need to edit -`homeserver.yaml`, as follows: +Alternatively, you can configure Synapse to expose an HTTPS port. To do +so, you will need to edit `homeserver.yaml`, as follows: * First, under the `listeners` section, uncomment the configuration for the TLS-enabled listener. (Remove the hash sign (`#`) at the start of @@ -414,11 +416,13 @@ To configure Synapse to expose an HTTPS port, you will need to edit point these settings at an existing certificate and key, or you can enable Synapse's built-in ACME (Let's Encrypt) support. Instructions for having Synapse automatically provision and renew federation - certificates through ACME can be found at [ACME.md](docs/ACME.md). If you - are using your own certificate, be sure to use a `.pem` file that includes - the full certificate chain including any intermediate certificates (for - instance, if using certbot, use `fullchain.pem` as your certificate, not - `cert.pem`). + certificates through ACME can be found at [ACME.md](docs/ACME.md). + Note that, as pointed out in that document, this feature will not + work with installs set up after November 2020. If you are using your + own certificate, be sure to use a `.pem` file that includes the full + certificate chain including any intermediate certificates (for + instance, if using certbot, use `fullchain.pem` as your certificate, + not `cert.pem`). For a more detailed guide to configuring your server for federation, see [federate.md](docs/federate.md) From 79460ce9c987195afeb9453a33386240ffc0af3f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 17:24:14 +0000 Subject: [PATCH 115/382] Changelog --- changelog.d/6909.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6909.doc diff --git a/changelog.d/6909.doc b/changelog.d/6909.doc new file mode 100644 index 0000000000..be0e698af8 --- /dev/null +++ b/changelog.d/6909.doc @@ -0,0 +1 @@ +Update Synapse's documentation to warn about the deprecation of ACME v1. From ffe1fc111d3760e975f7ae2e676c2807b4363b7b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 18:16:48 +0000 Subject: [PATCH 116/382] Update INSTALL.md Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- INSTALL.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/INSTALL.md b/INSTALL.md index 42132f1eb4..9fe767704b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -418,7 +418,9 @@ so, you will need to edit `homeserver.yaml`, as follows: for having Synapse automatically provision and renew federation certificates through ACME can be found at [ACME.md](docs/ACME.md). Note that, as pointed out in that document, this feature will not - work with installs set up after November 2020. If you are using your + work with installs set up after November 2020. + + If you are using your own certificate, be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use `fullchain.pem` as your certificate, From 49f877d32efc79cb40b2766cb052cf35bad31de5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 07:17:54 -0500 Subject: [PATCH 117/382] Filter the results of user directory searching via the spam checker (#6888) Add a method to the spam checker to filter the user directory results. --- changelog.d/6888.feature | 1 + docs/spam_checker.md | 3 + synapse/events/spamcheck.py | 27 ++++++++ synapse/handlers/user_directory.py | 14 +++- tests/handlers/test_user_directory.py | 92 +++++++++++++++++++++++++++ 5 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6888.feature diff --git a/changelog.d/6888.feature b/changelog.d/6888.feature new file mode 100644 index 0000000000..1b7ac0c823 --- /dev/null +++ b/changelog.d/6888.feature @@ -0,0 +1 @@ +The result of a user directory search can now be filtered via the spam checker. diff --git a/docs/spam_checker.md b/docs/spam_checker.md index 97ff17f952..5b5f5000b7 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md @@ -54,6 +54,9 @@ class ExampleSpamChecker: def user_may_publish_room(self, userid, room_id): return True # allow publishing of all rooms + + def check_username_for_spam(self, user_profile): + return False # allow all usernames ``` ## Configuration diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 5a907718d6..0a13fca9a4 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -15,6 +15,7 @@ # limitations under the License. import inspect +from typing import Dict from synapse.spam_checker_api import SpamCheckerApi @@ -125,3 +126,29 @@ class SpamChecker(object): return True return self.spam_checker.user_may_publish_room(userid, room_id) + + def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool: + """Checks if a user ID or display name are considered "spammy" by this server. + + If the server considers a username spammy, then it will not be included in + user directory results. + + Args: + user_profile: The user information to check, it contains the keys: + * user_id + * display_name + * avatar_url + + Returns: + True if the user is spammy. + """ + if self.spam_checker is None: + return False + + # For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering. + checker = getattr(self.spam_checker, "check_username_for_spam", None) + if not checker: + return False + # Make a copy of the user profile object to ensure the spam checker + # cannot modify it. + return checker(user_profile.copy()) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 81aa58dc8c..722760c59d 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -52,6 +52,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.update_user_directory self.search_all_users = hs.config.user_directory_search_all_users + self.spam_checker = hs.get_spam_checker() # The current position in the current_state_delta stream self.pos = None @@ -65,7 +66,7 @@ class UserDirectoryHandler(StateDeltasHandler): # we start populating the user directory self.clock.call_later(0, self.notify_new_event) - def search_users(self, user_id, search_term, limit): + async def search_users(self, user_id, search_term, limit): """Searches for users in directory Returns: @@ -82,7 +83,16 @@ class UserDirectoryHandler(StateDeltasHandler): ] } """ - return self.store.search_user_dir(user_id, search_term, limit) + results = await self.store.search_user_dir(user_id, search_term, limit) + + # Remove any spammy users from the results. + results["results"] = [ + user + for user in results["results"] + if not self.spam_checker.check_username_for_spam(user) + ] + + return results def notify_new_event(self): """Called when there may be more deltas to process diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 26071059d2..0a4765fff4 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -147,6 +147,98 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): s = self.get_success(self.handler.search_users(u1, "user3", 10)) self.assertEqual(len(s["results"]), 0) + def test_spam_checker(self): + """ + A user which fails to the spam checks will not appear in search results. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + + # We do not add users to the directory until they join a room. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + # Check we have populated the database correctly. + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + self.assertEqual( + self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + ) + self.assertEqual(public_users, []) + + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + + # Configure a spam checker that does not filter any users. + spam_checker = self.hs.get_spam_checker() + + class AllowAll(object): + def check_username_for_spam(self, user_profile): + # Allow all users. + return False + + spam_checker.spam_checker = AllowAll() + + # The results do not change: + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + + # Configure a spam checker that filters all users. + class BlockAll(object): + def check_username_for_spam(self, user_profile): + # All users are spammy. + return True + + spam_checker.spam_checker = BlockAll() + + # User1 now gets no search results for any of the other users. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + def test_legacy_spam_checker(self): + """ + A spam checker without the expected method should be ignored. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + + # We do not add users to the directory until they join a room. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + # Check we have populated the database correctly. + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + self.assertEqual( + self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + ) + self.assertEqual(public_users, []) + + # Configure a spam checker. + spam_checker = self.hs.get_spam_checker() + # The spam checker doesn't need any methods, so create a bare object. + spam_checker.spam_checker = object() + + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + def _compress_shared(self, shared): """ Compress a list of users who share rooms dicts to a list of tuples. From 02e89021f58f931068ab0337de039181cc7f6569 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 09:05:43 -0500 Subject: [PATCH 118/382] Convert the directory handler tests to use HomeserverTestCase (#6919) Convert directory handler tests to use HomeserverTestCase. --- changelog.d/6919.misc | 1 + tests/handlers/test_directory.py | 41 +++++++++++++------------------- 2 files changed, 18 insertions(+), 24 deletions(-) create mode 100644 changelog.d/6919.misc diff --git a/changelog.d/6919.misc b/changelog.d/6919.misc new file mode 100644 index 0000000000..aa2cd89998 --- /dev/null +++ b/changelog.d/6919.misc @@ -0,0 +1 @@ +Convert the directory handler tests to use HomeserverTestCase. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 91c7a17070..ee88cf5a4b 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -19,24 +19,16 @@ from mock import Mock from twisted.internet import defer from synapse.config.room_directory import RoomDirectoryConfig -from synapse.handlers.directory import DirectoryHandler from synapse.rest.client.v1 import directory, room from synapse.types import RoomAlias from tests import unittest -from tests.utils import setup_test_homeserver -class DirectoryHandlers(object): - def __init__(self, hs): - self.directory_handler = DirectoryHandler(hs) - - -class DirectoryTestCase(unittest.TestCase): +class DirectoryTestCase(unittest.HomeserverTestCase): """ Tests the directory service. """ - @defer.inlineCallbacks - def setUp(self): + def make_homeserver(self, reactor, clock): self.mock_federation = Mock() self.mock_registry = Mock() @@ -47,14 +39,12 @@ class DirectoryTestCase(unittest.TestCase): self.mock_registry.register_query_handler = register_query_handler - hs = yield setup_test_homeserver( - self.addCleanup, + hs = self.setup_test_homeserver( http_client=None, resource_for_federation=Mock(), federation_client=self.mock_federation, federation_registry=self.mock_registry, ) - hs.handlers = DirectoryHandlers(hs) self.handler = hs.get_handlers().directory_handler @@ -64,23 +54,25 @@ class DirectoryTestCase(unittest.TestCase): self.your_room = RoomAlias.from_string("#your-room:test") self.remote_room = RoomAlias.from_string("#another:remote") - @defer.inlineCallbacks + return hs + def test_get_local_association(self): - yield self.store.create_room_alias_association( - self.my_room, "!8765qwer:test", ["test"] + self.get_success( + self.store.create_room_alias_association( + self.my_room, "!8765qwer:test", ["test"] + ) ) - result = yield self.handler.get_association(self.my_room) + result = self.get_success(self.handler.get_association(self.my_room)) self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result) - @defer.inlineCallbacks def test_get_remote_association(self): self.mock_federation.make_query.return_value = defer.succeed( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} ) - result = yield self.handler.get_association(self.remote_room) + result = self.get_success(self.handler.get_association(self.remote_room)) self.assertEquals( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}, result @@ -93,14 +85,15 @@ class DirectoryTestCase(unittest.TestCase): ignore_backoff=True, ) - @defer.inlineCallbacks def test_incoming_fed_query(self): - yield self.store.create_room_alias_association( - self.your_room, "!8765asdf:test", ["test"] + self.get_success( + self.store.create_room_alias_association( + self.your_room, "!8765asdf:test", ["test"] + ) ) - response = yield self.query_handlers["directory"]( - {"room_alias": "#your-room:test"} + response = self.get_success( + self.handler.on_directory_query({"room_alias": "#your-room:test"}) ) self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) From 97a42bbc3a4789620c48746f8e87291446f6f5ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 Feb 2020 16:22:30 +0000 Subject: [PATCH 119/382] Add a warning about indentation to generated config (#6920) Fixes #6916. --- changelog.d/6920.misc | 1 + docs/.sample_config_header.yaml | 4 +++- docs/sample_config.yaml | 12 +++++++++++- synapse/config/_base.py | 16 ++++++++++++++-- 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6920.misc diff --git a/changelog.d/6920.misc b/changelog.d/6920.misc new file mode 100644 index 0000000000..d333add990 --- /dev/null +++ b/changelog.d/6920.misc @@ -0,0 +1 @@ +Add a warning about indentation to generated configuration files. diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml index e001ef5983..35a591d042 100644 --- a/docs/.sample_config_header.yaml +++ b/docs/.sample_config_header.yaml @@ -1,4 +1,4 @@ -# The config is maintained as an up-to-date snapshot of the default +# This file is maintained as an up-to-date snapshot of the default # homeserver.yaml configuration generated by Synapse. # # It is intended to act as a reference for the default configuration, @@ -10,3 +10,5 @@ # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in INSTALL.md. +################################################################################ + diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8e8cf513b0..93236daddc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1,4 +1,4 @@ -# The config is maintained as an up-to-date snapshot of the default +# This file is maintained as an up-to-date snapshot of the default # homeserver.yaml configuration generated by Synapse. # # It is intended to act as a reference for the default configuration, @@ -10,6 +10,16 @@ # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in INSTALL.md. +################################################################################ + +# Configuration file for Synapse. +# +# This is a YAML file: see [1] for a quick introduction. Note in particular +# that *indentation is important*: all the elements of a list or dictionary +# should have the same indentation. +# +# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html + ## Server ## # The domain name of the server, with optional explicit port. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 08619404bb..ba846042c4 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -53,6 +53,18 @@ Missing mandatory `server_name` config option. """ +CONFIG_FILE_HEADER = """\ +# Configuration file for Synapse. +# +# This is a YAML file: see [1] for a quick introduction. Note in particular +# that *indentation is important*: all the elements of a list or dictionary +# should have the same indentation. +# +# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html + +""" + + def path_exists(file_path): """Check if a file exists @@ -344,7 +356,7 @@ class RootConfig(object): str: the yaml config file """ - return "\n\n".join( + return CONFIG_FILE_HEADER + "\n\n".join( dedent(conf) for conf in self.invoke_all( "generate_config_section", @@ -574,8 +586,8 @@ class RootConfig(object): if not path_exists(config_dir_path): os.makedirs(config_dir_path) with open(config_path, "w") as config_file: - config_file.write("# vim:ft=yaml\n\n") config_file.write(config_str) + config_file.write("\n\n# vim:ft=yaml") config_dict = yaml.safe_load(config_str) obj.generate_missing_files(config_dict, config_dir_path) From 32873efa87055518b08d9b3b001b3bf9b60437f9 Mon Sep 17 00:00:00 2001 From: Fridtjof Mund <2780577+fridtjof@users.noreply.github.com> Date: Fri, 14 Feb 2020 17:27:29 +0100 Subject: [PATCH 120/382] contrib/docker: Ensure correct encoding and locale settings on DB creation (#6921) Signed-off-by: Fridtjof Mund --- changelog.d/6921.docker | 1 + contrib/docker/docker-compose.yml | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/6921.docker diff --git a/changelog.d/6921.docker b/changelog.d/6921.docker new file mode 100644 index 0000000000..152e723339 --- /dev/null +++ b/changelog.d/6921.docker @@ -0,0 +1 @@ +Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 2b044baf78..5df29379c8 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -56,6 +56,9 @@ services: environment: - POSTGRES_USER=synapse - POSTGRES_PASSWORD=changeme + # ensure the database gets created correctly + # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database + - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C" volumes: # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data From 43b2be9764bf244122f25a0f2bf8ec8c761f5cad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Feb 2020 13:08:34 +0000 Subject: [PATCH 121/382] Replace _event_dict_property with DictProperty this amounts to the same thing, but replaces `_event_dict` with `_dict`, and removes some of the function layers generated by `property`. --- synapse/events/__init__.py | 144 ++++++++++++++++++-------------- tests/storage/test_redaction.py | 2 +- 2 files changed, 80 insertions(+), 66 deletions(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index a842661a90..512254f65d 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -37,6 +37,65 @@ from synapse.util.frozenutils import freeze USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0")) +class DictProperty: + """An object property which delegates to the `_dict` within its parent object.""" + + __slots__ = ["key"] + + def __init__(self, key: str): + self.key = key + + def __get__(self, instance, owner=None): + # if the property is accessed as a class property rather than an instance + # property, return the property itself rather than the value + if instance is None: + return self + try: + return instance._dict[self.key] + except KeyError as e1: + # We want this to look like a regular attribute error (mostly so that + # hasattr() works correctly), so we convert the KeyError into an + # AttributeError. + # + # To exclude the KeyError from the traceback, we explicitly + # 'raise from e1.__context__' (which is better than 'raise from None', + # becuase that would omit any *earlier* exceptions). + # + raise AttributeError( + "'%s' has no '%s' property" % (type(instance), self.key) + ) from e1.__context__ + + def __set__(self, instance, v): + instance._dict[self.key] = v + + def __delete__(self, instance): + try: + del instance._dict[self.key] + except KeyError as e1: + raise AttributeError( + "'%s' has no '%s' property" % (type(instance), self.key) + ) from e1.__context__ + + +class DefaultDictProperty(DictProperty): + """An extension of DictProperty which provides a default if the property is + not present in the parent's _dict. + + Note that this means that hasattr() on the property always returns True. + """ + + __slots__ = ["default"] + + def __init__(self, key, default): + super().__init__(key) + self.default = default + + def __get__(self, instance, owner=None): + if instance is None: + return self + return instance._dict.get(self.key, self.default) + + class _EventInternalMetadata(object): def __init__(self, internal_metadata_dict): self.__dict__ = dict(internal_metadata_dict) @@ -117,51 +176,6 @@ class _EventInternalMetadata(object): return getattr(self, "redacted", False) -_SENTINEL = object() - - -def _event_dict_property(key, default=_SENTINEL): - """Creates a new property for the given key that delegates access to - `self._event_dict`. - - The default is used if the key is missing from the `_event_dict`, if given, - otherwise an AttributeError will be raised. - - Note: If a default is given then `hasattr` will always return true. - """ - - # We want to be able to use hasattr with the event dict properties. - # However, (on python3) hasattr expects AttributeError to be raised. Hence, - # we need to transform the KeyError into an AttributeError - - def getter_raises(self): - try: - return self._event_dict[key] - except KeyError: - raise AttributeError(key) - - def getter_default(self): - return self._event_dict.get(key, default) - - def setter(self, v): - try: - self._event_dict[key] = v - except KeyError: - raise AttributeError(key) - - def delete(self): - try: - del self._event_dict[key] - except KeyError: - raise AttributeError(key) - - if default is _SENTINEL: - # No default given, so use the getter that raises - return property(getter_raises, setter, delete) - else: - return property(getter_default, setter, delete) - - class EventBase(object): def __init__( self, @@ -175,23 +189,23 @@ class EventBase(object): self.unsigned = unsigned self.rejected_reason = rejected_reason - self._event_dict = event_dict + self._dict = event_dict self.internal_metadata = _EventInternalMetadata(internal_metadata_dict) - auth_events = _event_dict_property("auth_events") - depth = _event_dict_property("depth") - content = _event_dict_property("content") - hashes = _event_dict_property("hashes") - origin = _event_dict_property("origin") - origin_server_ts = _event_dict_property("origin_server_ts") - prev_events = _event_dict_property("prev_events") - redacts = _event_dict_property("redacts", None) - room_id = _event_dict_property("room_id") - sender = _event_dict_property("sender") - state_key = _event_dict_property("state_key") - type = _event_dict_property("type") - user_id = _event_dict_property("sender") + auth_events = DictProperty("auth_events") + depth = DictProperty("depth") + content = DictProperty("content") + hashes = DictProperty("hashes") + origin = DictProperty("origin") + origin_server_ts = DictProperty("origin_server_ts") + prev_events = DictProperty("prev_events") + redacts = DefaultDictProperty("redacts", None) + room_id = DictProperty("room_id") + sender = DictProperty("sender") + state_key = DictProperty("state_key") + type = DictProperty("type") + user_id = DictProperty("sender") @property def event_id(self) -> str: @@ -205,13 +219,13 @@ class EventBase(object): return hasattr(self, "state_key") and self.state_key is not None def get_dict(self) -> JsonDict: - d = dict(self._event_dict) + d = dict(self._dict) d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)}) return d def get(self, key, default=None): - return self._event_dict.get(key, default) + return self._dict.get(key, default) def get_internal_metadata_dict(self): return self.internal_metadata.get_dict() @@ -233,16 +247,16 @@ class EventBase(object): raise AttributeError("Unrecognized attribute %s" % (instance,)) def __getitem__(self, field): - return self._event_dict[field] + return self._dict[field] def __contains__(self, field): - return field in self._event_dict + return field in self._dict def items(self): - return list(self._event_dict.items()) + return list(self._dict.items()) def keys(self): - return six.iterkeys(self._event_dict) + return six.iterkeys(self._dict) def prev_event_ids(self): """Returns the list of prev event IDs. The order matches the order diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index b9ee6ec1ec..db3667dc43 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -240,7 +240,7 @@ class RedactionTestCase(unittest.HomeserverTestCase): built_event = yield self._base_builder.build(prev_event_ids) built_event._event_id = self._event_id - built_event._event_dict["event_id"] = self._event_id + built_event._dict["event_id"] = self._event_id assert built_event.event_id == self._event_id return built_event From 9551911f88b558d20e2fd9c2ae791fe5b732db02 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Feb 2020 13:22:17 +0000 Subject: [PATCH 122/382] Rewrite _EventInternalMetadata to back it with a _dict Mostly, this gives mypy an easier time. --- synapse/events/__init__.py | 55 +++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 512254f65d..7307116556 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -97,33 +98,55 @@ class DefaultDictProperty(DictProperty): class _EventInternalMetadata(object): - def __init__(self, internal_metadata_dict): - self.__dict__ = dict(internal_metadata_dict) + __slots__ = ["_dict"] - def get_dict(self): - return dict(self.__dict__) + def __init__(self, internal_metadata_dict: JsonDict): + # we have to copy the dict, because it turns out that the same dict is + # reused. TODO: fix that + self._dict = dict(internal_metadata_dict) - def is_outlier(self): - return getattr(self, "outlier", False) + outlier = DictProperty("outlier") # type: bool + out_of_band_membership = DictProperty("out_of_band_membership") # type: bool + send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str + recheck_redaction = DictProperty("recheck_redaction") # type: bool + soft_failed = DictProperty("soft_failed") # type: bool + proactively_send = DictProperty("proactively_send") # type: bool + redacted = DictProperty("redacted") # type: bool + txn_id = DictProperty("txn_id") # type: str + token_id = DictProperty("token_id") # type: str + stream_ordering = DictProperty("stream_ordering") # type: int - def is_out_of_band_membership(self): + # XXX: These are set by StreamWorkerStore._set_before_and_after. + # I'm pretty sure that these are never persisted to the database, so shouldn't + # be here + before = DictProperty("before") # type: str + after = DictProperty("after") # type: str + order = DictProperty("order") # type: int + + def get_dict(self) -> JsonDict: + return dict(self._dict) + + def is_outlier(self) -> bool: + return self._dict.get("outlier", False) + + def is_out_of_band_membership(self) -> bool: """Whether this is an out of band membership, like an invite or an invite rejection. This is needed as those events are marked as outliers, but they still need to be processed as if they're new events (e.g. updating invite state in the database, relaying to clients, etc). """ - return getattr(self, "out_of_band_membership", False) + return self._dict.get("out_of_band_membership", False) - def get_send_on_behalf_of(self): + def get_send_on_behalf_of(self) -> Optional[str]: """Whether this server should send the event on behalf of another server. This is used by the federation "send_join" API to forward the initial join event for a server in the room. returns a str with the name of the server this event is sent on behalf of. """ - return getattr(self, "send_on_behalf_of", None) + return self._dict.get("send_on_behalf_of") - def need_to_check_redaction(self): + def need_to_check_redaction(self) -> bool: """Whether the redaction event needs to be rechecked when fetching from the database. @@ -136,9 +159,9 @@ class _EventInternalMetadata(object): Returns: bool """ - return getattr(self, "recheck_redaction", False) + return self._dict.get("recheck_redaction", False) - def is_soft_failed(self): + def is_soft_failed(self) -> bool: """Whether the event has been soft failed. Soft failed events should be handled as usual, except: @@ -150,7 +173,7 @@ class _EventInternalMetadata(object): Returns: bool """ - return getattr(self, "soft_failed", False) + return self._dict.get("soft_failed", False) def should_proactively_send(self): """Whether the event, if ours, should be sent to other clients and @@ -162,7 +185,7 @@ class _EventInternalMetadata(object): Returns: bool """ - return getattr(self, "proactively_send", True) + return self._dict.get("proactively_send", True) def is_redacted(self): """Whether the event has been redacted. @@ -173,7 +196,7 @@ class _EventInternalMetadata(object): Returns: bool """ - return getattr(self, "redacted", False) + return self._dict.get("redacted", False) class EventBase(object): From 5a78f47f6e21505f84490c3b8da49e96ac8e3483 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 7 Feb 2020 13:27:58 +0000 Subject: [PATCH 123/382] changelog --- changelog.d/6872.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6872.misc diff --git a/changelog.d/6872.misc b/changelog.d/6872.misc new file mode 100644 index 0000000000..215a0c82c3 --- /dev/null +++ b/changelog.d/6872.misc @@ -0,0 +1 @@ +Refactor _EventInternalMetadata object to improve type safety. From 10027c80b031f1e62b47cd61c534420673f49a71 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 12:49:40 -0500 Subject: [PATCH 124/382] Add type hints to the spam check module (#6915) Add typing information to the spam checker modules. --- changelog.d/6915.misc | 1 + synapse/events/spamcheck.py | 44 ++++++++++++++++------------ synapse/spam_checker_api/__init__.py | 12 +++++--- tox.ini | 1 + 4 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 changelog.d/6915.misc diff --git a/changelog.d/6915.misc b/changelog.d/6915.misc new file mode 100644 index 0000000000..3a181ef243 --- /dev/null +++ b/changelog.d/6915.misc @@ -0,0 +1 @@ +Add type hints to the spam checker module. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 0a13fca9a4..a23b6b7b61 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -19,9 +19,13 @@ from typing import Dict from synapse.spam_checker_api import SpamCheckerApi +MYPY = False +if MYPY: + import synapse.server + class SpamChecker(object): - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.spam_checker = None module = None @@ -41,7 +45,7 @@ class SpamChecker(object): else: self.spam_checker = module(config=config) - def check_event_for_spam(self, event): + def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -49,26 +53,30 @@ class SpamChecker(object): users receive a blank event. Args: - event (synapse.events.EventBase): the event to be checked + event: the event to be checked Returns: - bool: True if the event is spammy. + True if the event is spammy. """ if self.spam_checker is None: return False return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, inviter_userid, invitee_userid, room_id): + def user_may_invite( + self, inviter_userid: str, invitee_userid: str, room_id: str + ) -> bool: """Checks if a given user may send an invite If this method returns false, the invite will be rejected. Args: - userid (string): The sender's user ID + inviter_userid: The user ID of the sender of the invitation + invitee_userid: The user ID targeted in the invitation + room_id: The room ID Returns: - bool: True if the user may send an invite, otherwise False + True if the user may send an invite, otherwise False """ if self.spam_checker is None: return True @@ -77,50 +85,50 @@ class SpamChecker(object): inviter_userid, invitee_userid, room_id ) - def user_may_create_room(self, userid): + def user_may_create_room(self, userid: str) -> bool: """Checks if a given user may create a room If this method returns false, the creation request will be rejected. Args: - userid (string): The sender's user ID + userid: The ID of the user attempting to create a room Returns: - bool: True if the user may create a room, otherwise False + True if the user may create a room, otherwise False """ if self.spam_checker is None: return True return self.spam_checker.user_may_create_room(userid) - def user_may_create_room_alias(self, userid, room_alias): + def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool: """Checks if a given user may create a room alias If this method returns false, the association request will be rejected. Args: - userid (string): The sender's user ID - room_alias (string): The alias to be created + userid: The ID of the user attempting to create a room alias + room_alias: The alias to be created Returns: - bool: True if the user may create a room alias, otherwise False + True if the user may create a room alias, otherwise False """ if self.spam_checker is None: return True return self.spam_checker.user_may_create_room_alias(userid, room_alias) - def user_may_publish_room(self, userid, room_id): + def user_may_publish_room(self, userid: str, room_id: str) -> bool: """Checks if a given user may publish a room to the directory If this method returns false, the publish request will be rejected. Args: - userid (string): The sender's user ID - room_id (string): The ID of the room that would be published + userid: The user ID attempting to publish the room + room_id: The ID of the room that would be published Returns: - bool: True if the user may publish the room, otherwise False + True if the user may publish the room, otherwise False """ if self.spam_checker is None: return True diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index efcc10f808..9b78924d96 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -18,6 +18,10 @@ from twisted.internet import defer from synapse.storage.state import StateFilter +MYPY = False +if MYPY: + import synapse.server + logger = logging.getLogger(__name__) @@ -26,18 +30,18 @@ class SpamCheckerApi(object): access to rooms and other relevant information. """ - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.hs = hs self._store = hs.get_datastore() @defer.inlineCallbacks - def get_state_events_in_room(self, room_id, types): + def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred: """Gets state events for the given room. Args: - room_id (string): The room ID to get state events in. - types (tuple): The event type and state key (using None + room_id: The room ID to get state events in. + types: The event type and state key (using None to represent 'any') of the room state to acquire. Returns: diff --git a/tox.ini b/tox.ini index f8229eba88..b9132a3177 100644 --- a/tox.ini +++ b/tox.ini @@ -179,6 +179,7 @@ extras = all commands = mypy \ synapse/api \ synapse/config/ \ + synapse/events/spamcheck.py \ synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/sync.py \ From 46fa66bbfd367b2c1fbdf585107cec75fa1bb193 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Feb 2020 11:30:50 +0000 Subject: [PATCH 125/382] wait for current_state_events_membership before delete_old_current_state_events (#6924) --- changelog.d/6924.bugfix | 1 + .../schema/delta/57/delete_old_current_state_events.sql | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6924.bugfix diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix new file mode 100644 index 0000000000..33e6611929 --- /dev/null +++ b/changelog.d/6924.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql index a133d87a19..aec06c8261 100644 --- a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql +++ b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql @@ -15,5 +15,8 @@ -- Add background update to go and delete current state events for rooms the -- server is no longer in. -INSERT into background_updates (update_name, progress_json) - VALUES ('delete_old_current_state_events', '{}'); +-- +-- this relies on the 'membership' column of current_state_events, so make sure +-- that's populated first! +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ('delete_old_current_state_events', '{}', 'current_state_events_membership'); From 3404ad289b1d2e5bc5c7f277f519b9698dbdaa15 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 17 Feb 2020 13:23:37 +0000 Subject: [PATCH 126/382] Raise the default power levels for invites, tombstones and server acls (#6834) --- changelog.d/6834.misc | 1 + synapse/handlers/room.py | 10 +++++++++- tests/rest/client/v1/test_rooms.py | 4 +++- 3 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6834.misc diff --git a/changelog.d/6834.misc b/changelog.d/6834.misc new file mode 100644 index 0000000000..79acebe516 --- /dev/null +++ b/changelog.d/6834.misc @@ -0,0 +1 @@ +Change the default power levels of invites, tombstones and server ACLs for new rooms. \ No newline at end of file diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab07edd2fc..033083acac 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -64,18 +64,21 @@ class RoomCreationHandler(BaseHandler): "history_visibility": "shared", "original_invitees_have_ops": False, "guest_can_join": True, + "power_level_content_override": {"invite": 0}, }, RoomCreationPreset.TRUSTED_PRIVATE_CHAT: { "join_rules": JoinRules.INVITE, "history_visibility": "shared", "original_invitees_have_ops": True, "guest_can_join": True, + "power_level_content_override": {"invite": 0}, }, RoomCreationPreset.PUBLIC_CHAT: { "join_rules": JoinRules.PUBLIC, "history_visibility": "shared", "original_invitees_have_ops": False, "guest_can_join": False, + "power_level_content_override": {}, }, } @@ -829,19 +832,24 @@ class RoomCreationHandler(BaseHandler): # This will be reudundant on pre-MSC2260 rooms, since the # aliases event is special-cased. EventTypes.Aliases: 0, + EventTypes.Tombstone: 100, + EventTypes.ServerACL: 100, }, "events_default": 0, "state_default": 50, "ban": 50, "kick": 50, "redact": 50, - "invite": 0, + "invite": 50, } if config["original_invitees_have_ops"]: for invitee in invite_list: power_level_content["users"][invitee] = 100 + # Power levels overrides are defined per chat preset + power_level_content.update(config["power_level_content_override"]) + if power_level_content_override: power_level_content.update(power_level_content_override) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index e3af280ba6..fb681a1db9 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1612,7 +1612,9 @@ class ContextTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.user_id = self.register_user("user", "password") self.tok = self.login("user", "password") - self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + self.room_id = self.helper.create_room_as( + self.user_id, tok=self.tok, is_public=False + ) self.other_user_id = self.register_user("user2", "password") self.other_tok = self.login("user2", "password") From d2455ec3aafe2b06174e0343799b30e12d08171d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Feb 2020 11:30:50 +0000 Subject: [PATCH 127/382] wait for current_state_events_membership before delete_old_current_state_events (#6924) --- changelog.d/6924.bugfix | 1 + .../schema/delta/57/delete_old_current_state_events.sql | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6924.bugfix diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix new file mode 100644 index 0000000000..33e6611929 --- /dev/null +++ b/changelog.d/6924.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql index a133d87a19..aec06c8261 100644 --- a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql +++ b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql @@ -15,5 +15,8 @@ -- Add background update to go and delete current state events for rooms the -- server is no longer in. -INSERT into background_updates (update_name, progress_json) - VALUES ('delete_old_current_state_events', '{}'); +-- +-- this relies on the 'membership' column of current_state_events, so make sure +-- that's populated first! +INSERT into background_updates (update_name, progress_json, depends_on) + VALUES ('delete_old_current_state_events', '{}', 'current_state_events_membership'); From fd6d83ed96971e267fe4ce9c120d1d6ec87a3582 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Feb 2020 16:27:33 +0000 Subject: [PATCH 128/382] 1.10.1 --- CHANGES.md | 9 +++++++++ changelog.d/6924.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6924.bugfix diff --git a/CHANGES.md b/CHANGES.md index 0bce84f400..37b650a848 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.10.1 (2020-02-17) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. ([\#6924](https://github.com/matrix-org/synapse/issues/6924)) + + Synapse 1.10.0 (2020-02-12) =========================== diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix deleted file mode 100644 index 33e6611929..0000000000 --- a/changelog.d/6924.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. diff --git a/debian/changelog b/debian/changelog index cdc3b1a5c2..90314d36af 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.10.1) stable; urgency=medium + + * New synapse release 1.10.1. + + -- Synapse Packaging team Mon, 17 Feb 2020 16:27:28 +0000 + matrix-synapse-py3 (1.10.0) stable; urgency=medium * New synapse release 1.10.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 9d285fca38..8313f177d2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0" +__version__ = "1.10.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From bc831d1d9a380efd4fc063565d5f1eda341e9644 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 17 Feb 2020 16:34:13 +0000 Subject: [PATCH 129/382] #6924 has been released in 1.10.1 --- changelog.d/6924.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/6924.bugfix diff --git a/changelog.d/6924.bugfix b/changelog.d/6924.bugfix deleted file mode 100644 index 33e6611929..0000000000 --- a/changelog.d/6924.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.10.0 which would cause room state to be cleared in the database if Synapse was upgraded direct from 1.2.1 or earlier to 1.10.0. From 3be2abd0a9a089a147b23c6d58fc26dde63faa27 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Feb 2020 11:41:53 +0000 Subject: [PATCH 130/382] Kill off deprecated "config-on-the-fly" docker mode (#6918) Lots of people seem to get confused by this mode, and it's been deprecated since Synapse 1.1.0. It's time for it to go. --- changelog.d/6918.docker | 1 + docker/README.md | 23 +++++++++++-------- docker/start.py | 51 +++++++++++++++-------------------------- 3 files changed, 34 insertions(+), 41 deletions(-) create mode 100644 changelog.d/6918.docker diff --git a/changelog.d/6918.docker b/changelog.d/6918.docker new file mode 100644 index 0000000000..cc2db5e071 --- /dev/null +++ b/changelog.d/6918.docker @@ -0,0 +1 @@ +The deprecated "generate-config-on-the-fly" mode is no longer supported. diff --git a/docker/README.md b/docker/README.md index 9f112a01d0..8c337149ca 100644 --- a/docker/README.md +++ b/docker/README.md @@ -110,12 +110,12 @@ argument to `docker run`. ## Legacy dynamic configuration file support -For backwards-compatibility only, the docker image supports creating a dynamic -configuration file based on environment variables. This is now deprecated, but -is enabled when the `SYNAPSE_SERVER_NAME` variable is set (and `generate` is -not given). +The docker image used to support creating a dynamic configuration file based +on environment variables. This is no longer supported, and an error will be +raised if you try to run synapse without a config file. -To migrate from a dynamic configuration file to a static one, run the docker +It is, however, possible to generate a static configuration file based on +the environment variables that were previously used. To do this, run the docker container once with the environment variables set, and `migrate_config` command line option. For example: @@ -127,15 +127,20 @@ docker run -it --rm \ matrixdotorg/synapse:latest migrate_config ``` -This will generate the same configuration file as the legacy mode used, but -will store it in `/data/homeserver.yaml` instead of a temporary location. You -can then use it as shown above at [Running synapse](#running-synapse). +This will generate the same configuration file as the legacy mode used, and +will store it in `/data/homeserver.yaml`. You can then use it as shown above at +[Running synapse](#running-synapse). + +Note that the defaults used in this configuration file may be different to +those when generating a new config file with `generate`: for example, TLS is +enabled by default in this mode. You are encouraged to inspect the generated +configuration file and edit it to ensure it meets your needs. ## Building the image If you need to build the image from a Synapse checkout, use the following `docker build` command from the repo's root: - + ``` docker build -t matrixdotorg/synapse -f docker/Dockerfile . ``` diff --git a/docker/start.py b/docker/start.py index 97fd247f8f..2a25c9380e 100755 --- a/docker/start.py +++ b/docker/start.py @@ -188,11 +188,6 @@ def main(args, environ): else: ownership = "{}:{}".format(desired_uid, desired_gid) - log( - "Container running as UserID %s:%s, ENV (or defaults) requests %s:%s" - % (os.getuid(), os.getgid(), desired_uid, desired_gid) - ) - if ownership is None: log("Will not perform chmod/su-exec as UserID already matches request") @@ -213,38 +208,30 @@ def main(args, environ): if mode is not None: error("Unknown execution mode '%s'" % (mode,)) - if "SYNAPSE_SERVER_NAME" in environ: - # backwards-compatibility generate-a-config-on-the-fly mode - if "SYNAPSE_CONFIG_PATH" in environ: - error( - "SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH " - "in `generate` or `migrate_config` mode. To start synapse using a " - "config file, unset the SYNAPSE_SERVER_NAME environment variable." - ) + config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") + config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") - config_path = "/compiled/homeserver.yaml" - log( - "Generating config file '%s' on-the-fly from environment variables.\n" - "Note that this mode is deprecated. You can migrate to a static config\n" - "file by running with 'migrate_config'. See the README for more details." - % (config_path,) - ) - - generate_config_from_template("/compiled", config_path, environ, ownership) - else: - config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") - config_path = environ.get( - "SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml" - ) - if not os.path.exists(config_path): + if not os.path.exists(config_path): + if "SYNAPSE_SERVER_NAME" in environ: error( - "Config file '%s' does not exist. You should either create a new " - "config file by running with the `generate` argument (and then edit " - "the resulting file before restarting) or specify the path to an " - "existing config file with the SYNAPSE_CONFIG_PATH variable." + """\ +Config file '%s' does not exist. + +The synapse docker image no longer supports generating a config file on-the-fly +based on environment variables. You can migrate to a static config file by +running with 'migrate_config'. See the README for more details. +""" % (config_path,) ) + error( + "Config file '%s' does not exist. You should either create a new " + "config file by running with the `generate` argument (and then edit " + "the resulting file before restarting) or specify the path to an " + "existing config file with the SYNAPSE_CONFIG_PATH variable." + % (config_path,) + ) + log("Starting synapse with config file " + config_path) args = ["python", "-m", synapse_worker, "--config-path", config_path] From 8ee0d745169fa12fd116692a9484930bd7b38167 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 12:05:45 +0000 Subject: [PATCH 131/382] Split the delegating documentation out of federate.md and trim it down --- docs/delegate.md | 87 +++++++++++++++++++++++++++++++ docs/federate.md | 133 ----------------------------------------------- 2 files changed, 87 insertions(+), 133 deletions(-) create mode 100644 docs/delegate.md diff --git a/docs/delegate.md b/docs/delegate.md new file mode 100644 index 0000000000..4126fffe71 --- /dev/null +++ b/docs/delegate.md @@ -0,0 +1,87 @@ +# Delegation + +For a more flexible configuration, you can have `server_name` +resources (eg: `@user:example.com`) served by a different host and +port (eg: `synapse.example.com:443`). + +Without configuring delegation, the matrix federation will +expect to find your server via `example.com:8448`. The following methods +allow you retain a `server_name` of `example.com` so that your user IDs, room +aliases, etc continue to look like `*:example.com`, whilst having your +federation traffic routed to a different server (e.g. `synapse.example.com`). + +## .well-known delegation + +To use this method, you need to be able to alter the +`server_name` 's https server to serve the `/.well-known/matrix/server` +URL. Having an active server (with a valid TLS certificate) serving your +`server_name` domain is out of the scope of this documentation. + +The URL `https:///.well-known/matrix/server` should +return a JSON structure containing the key `m.server` like so: + +```json +{ + "m.server": "[:]" +} +``` + +In our example, this would mean that URL `https://example.com/.well-known/matrix/server` +should return: + +```json +{ + "m.server": "synapse.example.com:443" +} +``` + +Note, specifying a port is optional. If no port is specified, then it defaults +to 8448. + +Most installations will not need to configure .well-known. However, it can be +useful in cases where the admin is hosting on behalf of someone else and +therefore cannot gain access to the necessary certificate. With .well-known, +federation servers will check for a valid TLS certificate for the delegated +hostname (in our example: `synapse.example.com`). + +## Delegation FAQ + +### When do I need delegation? + +If your homeserver's APIs are accessible on the default federation port (8448) +and the domain your `server_name` points to, you do not need any delegation. + +For instance, if you registered `example.com` and pointed its DNS A record at a +fresh server, you could install Synapse on that host, giving it a `server_name` +of `example.com`, and once a reverse proxy has been set up to proxy all requests +sent to the port `8448` and serve TLS certificates for `example.com`, you +wouldn't need any delegation set up. + +**However**, if your homeserver's APIs aren't accessible on port 8448 and on the +domain `server_name` points to, you will need to let other servers know how to +find it using delegation. + +### Do you still recommend against using a reverse proxy on the federation port? + +We no longer actively recommend against using a reverse proxy. Many admins will +find it easier to direct federation traffic to a reverse proxy and manage their +own TLS certificates, and this is a supported configuration. + +See [reverse_proxy.md](reverse_proxy.md) for information on setting up a +reverse proxy. + +### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy? + +This is no longer necessary. If you are using a reverse proxy for all of your +TLS traffic, then you can set `no_tls: True` in the Synapse config. + +In that case, the only reason Synapse needs the certificate is to populate a legacy +`tls_fingerprints` field in the federation API. This is ignored by Synapse 0.99.0 +and later, and the only time pre-0.99 Synapses will check it is when attempting to +fetch the server keys - and generally this is delegated via `matrix.org`, which +is running a modern version of Synapse. + +### Do I need the same certificate for the client and federation port? + +No. There is nothing stopping you from using different certificates, +particularly if you are using a reverse proxy. \ No newline at end of file diff --git a/docs/federate.md b/docs/federate.md index f9f17fcca5..8552927225 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -18,139 +18,6 @@ Once federation has been configured, you should be able to join a room over federation. A good place to start is ``#synapse:matrix.org`` - a room for Synapse admins. - -## Delegation - -For a more flexible configuration, you can have ``server_name`` -resources (eg: ``@user:example.com``) served by a different host and -port (eg: ``synapse.example.com:443``). There are two ways to do this: - -- adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``. -- adding a DNS ``SRV`` record in the DNS zone of domain - ``example.com``. - -Without configuring delegation, the matrix federation will -expect to find your server via ``example.com:8448``. The following methods -allow you retain a `server_name` of `example.com` so that your user IDs, room -aliases, etc continue to look like `*:example.com`, whilst having your -federation traffic routed to a different server. - -### .well-known delegation - -To use this method, you need to be able to alter the -``server_name`` 's https server to serve the ``/.well-known/matrix/server`` -URL. Having an active server (with a valid TLS certificate) serving your -``server_name`` domain is out of the scope of this documentation. - -The URL ``https:///.well-known/matrix/server`` should -return a JSON structure containing the key ``m.server`` like so: - - { - "m.server": "[:]" - } - -In our example, this would mean that URL ``https://example.com/.well-known/matrix/server`` -should return: - - { - "m.server": "synapse.example.com:443" - } - -Note, specifying a port is optional. If a port is not specified an SRV lookup -is performed, as described below. If the target of the -delegation does not have an SRV record, then the port defaults to 8448. - -Most installations will not need to configure .well-known. However, it can be -useful in cases where the admin is hosting on behalf of someone else and -therefore cannot gain access to the necessary certificate. With .well-known, -federation servers will check for a valid TLS certificate for the delegated -hostname (in our example: ``synapse.example.com``). - -### DNS SRV delegation - -To use this delegation method, you need to have write access to your -``server_name`` 's domain zone DNS records (in our example it would be -``example.com`` DNS zone). - -This method requires the target server to provide a -valid TLS certificate for the original ``server_name``. - -You need to add a SRV record in your ``server_name`` 's DNS zone with -this format: - - _matrix._tcp. IN SRV - -In our example, we would need to add this SRV record in the -``example.com`` DNS zone: - - _matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com. - -Once done and set up, you can check the DNS record with ``dig -t srv -_matrix._tcp.``. In our example, we would expect this: - - $ dig -t srv _matrix._tcp.example.com - _matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com. - -Note that the target of a SRV record cannot be an alias (CNAME record): it has to point -directly to the server hosting the synapse instance. - -### Delegation FAQ -#### When do I need a SRV record or .well-known URI? - -If your homeserver listens on the default federation port (8448), and your -`server_name` points to the host that your homeserver runs on, you do not need an SRV -record or `.well-known/matrix/server` URI. - -For instance, if you registered `example.com` and pointed its DNS A record at a -fresh server, you could install Synapse on that host, -giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled, -it would automatically generate a valid TLS certificate for you via Let's Encrypt -and no SRV record or .well-known URI would be needed. - -**However**, if your server does not listen on port 8448, or if your `server_name` -does not point to the host that your homeserver runs on, you will need to let -other servers know how to find it. The way to do this is via .well-known or an -SRV record. - -#### I have created a .well-known URI. Do I also need an SRV record? - -No. You can use either `.well-known` delegation or use an SRV record for delegation. You -do not need to use both to delegate to the same location. - -#### Can I manage my own certificates rather than having Synapse renew certificates itself? - -Yes, you are welcome to manage your certificates yourself. Synapse will only -attempt to obtain certificates from Let's Encrypt if you configure it to do -so.The only requirement is that there is a valid TLS cert present for -federation end points. - -#### Do you still recommend against using a reverse proxy on the federation port? - -We no longer actively recommend against using a reverse proxy. Many admins will -find it easier to direct federation traffic to a reverse proxy and manage their -own TLS certificates, and this is a supported configuration. - -See [reverse_proxy.md](reverse_proxy.md) for information on setting up a -reverse proxy. - -#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy? - -Practically speaking, this is no longer necessary. - -If you are using a reverse proxy for all of your TLS traffic, then you can set -`no_tls: True` in the Synapse config. In that case, the only reason Synapse -needs the certificate is to populate a legacy `tls_fingerprints` field in the -federation API. This is ignored by Synapse 0.99.0 and later, and the only time -pre-0.99 Synapses will check it is when attempting to fetch the server keys - -and generally this is delegated via `matrix.org`, which will be running a modern -version of Synapse. - -#### Do I need the same certificate for the client and federation port? - -No. There is nothing stopping you from using different certificates, -particularly if you are using a reverse proxy. However, Synapse will use the -same certificate on any ports where TLS is configured. - ## Troubleshooting You can use the [federation tester]( From fe3941f6e33a17fa7cdf209a4370f4e805341db4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 18 Feb 2020 07:29:44 -0500 Subject: [PATCH 132/382] Stop sending events when creating or deleting aliases (#6904) Stop sending events when creating or deleting associations (room aliases). Send an updated canonical alias event if one of the alt_aliases is deleted. --- changelog.d/6904.removal | 1 + synapse/handlers/directory.py | 75 ++++++++------- synapse/handlers/room.py | 6 +- tests/handlers/test_directory.py | 154 ++++++++++++++++++++++++++++++- 4 files changed, 194 insertions(+), 42 deletions(-) create mode 100644 changelog.d/6904.removal diff --git a/changelog.d/6904.removal b/changelog.d/6904.removal new file mode 100644 index 0000000000..a5cc0c3605 --- /dev/null +++ b/changelog.d/6904.removal @@ -0,0 +1 @@ +Stop sending alias events during adding / removing aliases. Check alt_aliases in the latest canonical aliases event when deleting an alias. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 8c5980cb0c..f718388884 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -81,13 +81,7 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def create_association( - self, - requester, - room_alias, - room_id, - servers=None, - send_event=True, - check_membership=True, + self, requester, room_alias, room_id, servers=None, check_membership=True, ): """Attempt to create a new alias @@ -97,7 +91,6 @@ class DirectoryHandler(BaseHandler): room_id (str) servers (list[str]|None): List of servers that others servers should try and join via - send_event (bool): Whether to send an updated m.room.aliases event check_membership (bool): Whether to check if the user is in the room before the alias can be set (if the server's config requires it). @@ -150,16 +143,9 @@ class DirectoryHandler(BaseHandler): ) yield self._create_association(room_alias, room_id, servers, creator=user_id) - if send_event: - try: - yield self.send_room_alias_update_event(requester, room_id) - except AuthError as e: - # sending the aliases event may fail due to the user not having - # permission in the room; this is permitted. - logger.info("Skipping updating aliases event due to auth error %s", e) @defer.inlineCallbacks - def delete_association(self, requester, room_alias, send_event=True): + def delete_association(self, requester, room_alias): """Remove an alias from the directory (this is only meant for human users; AS users should call @@ -168,9 +154,6 @@ class DirectoryHandler(BaseHandler): Args: requester (Requester): room_alias (RoomAlias): - send_event (bool): Whether to send an updated m.room.aliases event. - Note that, if we delete the canonical alias, we will always attempt - to send an m.room.canonical_alias event Returns: Deferred[unicode]: room id that the alias used to point to @@ -206,9 +189,6 @@ class DirectoryHandler(BaseHandler): room_id = yield self._delete_association(room_alias) try: - if send_event: - yield self.send_room_alias_update_event(requester, room_id) - yield self._update_canonical_alias( requester, requester.user.to_string(), room_id, room_alias ) @@ -319,25 +299,50 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def _update_canonical_alias(self, requester, user_id, room_id, room_alias): + """ + Send an updated canonical alias event if the removed alias was set as + the canonical alias or listed in the alt_aliases field. + """ alias_event = yield self.state.get_current_state( room_id, EventTypes.CanonicalAlias, "" ) - alias_str = room_alias.to_string() - if not alias_event or alias_event.content.get("alias", "") != alias_str: + # There is no canonical alias, nothing to do. + if not alias_event: return - yield self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.CanonicalAlias, - "state_key": "", - "room_id": room_id, - "sender": user_id, - "content": {}, - }, - ratelimit=False, - ) + # Obtain a mutable version of the event content. + content = dict(alias_event.content) + send_update = False + + # Remove the alias property if it matches the removed alias. + alias_str = room_alias.to_string() + if alias_event.content.get("alias", "") == alias_str: + send_update = True + content.pop("alias", "") + + # Filter alt_aliases for the removed alias. + alt_aliases = content.pop("alt_aliases", None) + # If the aliases are not a list (or not found) do not attempt to modify + # the list. + if isinstance(alt_aliases, list): + send_update = True + alt_aliases = [alias for alias in alt_aliases if alias != alias_str] + if alt_aliases: + content["alt_aliases"] = alt_aliases + + if send_update: + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.CanonicalAlias, + "state_key": "", + "room_id": room_id, + "sender": user_id, + "content": content, + }, + ratelimit=False, + ) @defer.inlineCallbacks def get_association_from_room_alias(self, room_alias): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 033083acac..49ec2f48bc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -478,9 +478,7 @@ class RoomCreationHandler(BaseHandler): for alias_str in aliases: alias = RoomAlias.from_string(alias_str) try: - yield directory_handler.delete_association( - requester, alias, send_event=False - ) + yield directory_handler.delete_association(requester, alias) removed_aliases.append(alias_str) except SynapseError as e: logger.warning("Unable to remove alias %s from old room: %s", alias, e) @@ -511,7 +509,6 @@ class RoomCreationHandler(BaseHandler): RoomAlias.from_string(alias), new_room_id, servers=(self.hs.hostname,), - send_event=False, check_membership=False, ) logger.info("Moved alias %s to new room", alias) @@ -664,7 +661,6 @@ class RoomCreationHandler(BaseHandler): room_id=room_id, room_alias=room_alias, servers=[self.hs.hostname], - send_event=False, check_membership=False, ) diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index ee88cf5a4b..27b916aed4 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -18,9 +18,11 @@ from mock import Mock from twisted.internet import defer +import synapse.api.errors +from synapse.api.constants import EventTypes from synapse.config.room_directory import RoomDirectoryConfig -from synapse.rest.client.v1 import directory, room -from synapse.types import RoomAlias +from synapse.rest.client.v1 import directory, login, room +from synapse.types import RoomAlias, create_requester from tests import unittest @@ -85,6 +87,38 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) + def test_delete_alias_not_allowed(self): + room_id = "!8765qwer:test" + self.get_success( + self.store.create_room_alias_association(self.my_room, room_id, ["test"]) + ) + + self.get_failure( + self.handler.delete_association( + create_requester("@user:test"), self.my_room + ), + synapse.api.errors.AuthError, + ) + + def test_delete_alias(self): + room_id = "!8765qwer:test" + user_id = "@user:test" + self.get_success( + self.store.create_room_alias_association( + self.my_room, room_id, ["test"], user_id + ) + ) + + result = self.get_success( + self.handler.delete_association(create_requester(user_id), self.my_room) + ) + self.assertEquals(room_id, result) + + # The alias should not be found. + self.get_failure( + self.handler.get_association(self.my_room), synapse.api.errors.SynapseError + ) + def test_incoming_fed_query(self): self.get_success( self.store.create_room_alias_association( @@ -99,6 +133,122 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) +class CanonicalAliasTestCase(unittest.HomeserverTestCase): + """Test modifications of the canonical alias when delete aliases. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + self.handler = hs.get_handlers().directory_handler + self.state_handler = hs.get_state_handler() + + # Create user + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create a test room + self.room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.test_alias = "#test:test" + self.room_alias = RoomAlias.from_string(self.test_alias) + + # Create a new alias to this room. + self.get_success( + self.store.create_room_alias_association( + self.room_alias, self.room_id, ["test"], self.admin_user + ) + ) + + def test_remove_alias(self): + """Removing an alias that is the canonical alias should remove it there too.""" + # Set this new alias as the canonical alias for this room + self.helper.send_state( + self.room_id, + "m.room.canonical_alias", + {"alias": self.test_alias, "alt_aliases": [self.test_alias]}, + tok=self.admin_user_tok, + ) + + data = self.get_success( + self.state_handler.get_current_state( + self.room_id, EventTypes.CanonicalAlias, "" + ) + ) + self.assertEqual(data["content"]["alias"], self.test_alias) + self.assertEqual(data["content"]["alt_aliases"], [self.test_alias]) + + # Finally, delete the alias. + self.get_success( + self.handler.delete_association( + create_requester(self.admin_user), self.room_alias + ) + ) + + data = self.get_success( + self.state_handler.get_current_state( + self.room_id, EventTypes.CanonicalAlias, "" + ) + ) + self.assertNotIn("alias", data["content"]) + self.assertNotIn("alt_aliases", data["content"]) + + def test_remove_other_alias(self): + """Removing an alias listed as in alt_aliases should remove it there too.""" + # Create a second alias. + other_test_alias = "#test2:test" + other_room_alias = RoomAlias.from_string(other_test_alias) + self.get_success( + self.store.create_room_alias_association( + other_room_alias, self.room_id, ["test"], self.admin_user + ) + ) + + # Set the alias as the canonical alias for this room. + self.helper.send_state( + self.room_id, + "m.room.canonical_alias", + { + "alias": self.test_alias, + "alt_aliases": [self.test_alias, other_test_alias], + }, + tok=self.admin_user_tok, + ) + + data = self.get_success( + self.state_handler.get_current_state( + self.room_id, EventTypes.CanonicalAlias, "" + ) + ) + self.assertEqual(data["content"]["alias"], self.test_alias) + self.assertEqual( + data["content"]["alt_aliases"], [self.test_alias, other_test_alias] + ) + + # Delete the second alias. + self.get_success( + self.handler.delete_association( + create_requester(self.admin_user), other_room_alias + ) + ) + + data = self.get_success( + self.state_handler.get_current_state( + self.room_id, EventTypes.CanonicalAlias, "" + ) + ) + self.assertEqual(data["content"]["alias"], self.test_alias) + self.assertEqual(data["content"]["alt_aliases"], [self.test_alias]) + + class TestCreateAliasACL(unittest.HomeserverTestCase): user_id = "@test:test" From 3c67eee6dc960ce7218f15489594a292a2708bd8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 13:51:03 +0000 Subject: [PATCH 133/382] Make federate.md more of a sumary of the steps to follow to set up replication --- docs/federate.md | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/docs/federate.md b/docs/federate.md index 8552927225..255e907b58 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -1,30 +1,41 @@ -Setting up Federation +Setting up federation ===================== Federation is the process by which users on different servers can participate in the same room. For this to work, those other servers must be able to contact yours to send messages. -The ``server_name`` configured in the Synapse configuration file (often -``homeserver.yaml``) defines how resources (users, rooms, etc.) will be -identified (eg: ``@user:example.com``, ``#room:example.com``). By -default, it is also the domain that other servers will use to -try to reach your server (via port 8448). This is easy to set -up and will work provided you set the ``server_name`` to match your -machine's public DNS hostname, and provide Synapse with a TLS certificate -which is valid for your ``server_name``. +The `server_name` configured in the Synapse configuration file (often +`homeserver.yaml`) defines how resources (users, rooms, etc.) will be +identified (eg: `@user:example.com`, `#room:example.com`). By default, +it is also the domain that other servers will use to try to reach your +server (via port 8448). This is easy to set up and will work provided +you set the `server_name` to match your machine's public DNS hostname. + +You will also need a valid TLS certificate for this `server_name` served +on port 8448 - the preferred way to do that is by using a reverse proxy, +see [reverse_proxy.md]() for instructions on how to +correctly set one up. + +In some cases you might not want Synapse to be running on the machine that +has the `server_name` as its public DNS hostname, or federation traffic +to use port than 8448 (e.g. you want to use `example.com` as your `server_name` +but want Synapse to be reachable on `synapse.example.com:443`). This can +be done using delegation, which allows an admin to dictate where federation +traffic should be sent, see [delegate.md]() for instructions on +how to set this up. Once federation has been configured, you should be able to join a room over -federation. A good place to start is ``#synapse:matrix.org`` - a room for +federation. A good place to start is `#synapse:matrix.org` - a room for Synapse admins. ## Troubleshooting -You can use the [federation tester]( -) to check if your homeserver is -configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN). -Note that you'll have to modify this URL to replace ``DOMAIN`` with your -``server_name``. Hitting the API directly provides extra detail. +You can use the [federation tester]() +to check if your homeserver is configured correctly. Alternatively try the +[JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN). +Note that you'll have to modify this URL to replace `DOMAIN` with your +`server_name`. Hitting the API directly provides extra detail. The typical failure mode for federation is that when the server tries to join a room, it is rejected with "401: Unauthorized". Generally this means that other @@ -36,8 +47,8 @@ you invite them to. This can be caused by an incorrectly-configured reverse proxy: see [reverse_proxy.md]() for instructions on how to correctly configure a reverse proxy. -## Running a Demo Federation of Synapses +## Running a demo federation of Synapses If you want to get up and running quickly with a trio of homeservers in a -private federation, there is a script in the ``demo`` directory. This is mainly +private federation, there is a script in the `demo` directory. This is mainly useful just for development purposes. See [demo/README](<../demo/README>). From e837be5b5cb43406bd124e3f27a2b7be1bd31aa8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 13:53:46 +0000 Subject: [PATCH 134/382] Fix links in the reverse proxy doc --- docs/reverse_proxy.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index dcfc5c64aa..af6d73927a 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -18,9 +18,10 @@ When setting up a reverse proxy, remember that Matrix clients and other Matrix servers do not necessarily need to connect to your server via the same server name or port. Indeed, clients will use port 443 by default, whereas servers default to port 8448. Where these are different, we -refer to the 'client port' and the \'federation port\'. See [Setting -up federation](federate.md) for more details of the algorithm used for -federation connections. +refer to the 'client port' and the \'federation port\'. See [the Matrix +specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names) +for more details of the algorithm used for federation connections, and +[delegate.md]() for instructions on setting up delegation. Let's assume that we expect clients to connect to our server at `https://matrix.example.com`, and other servers to connect at From ba7a5238547cf6c23dda37b2f2424ea5af5d9192 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 13:57:15 +0000 Subject: [PATCH 135/382] Argh trailing spaces --- docs/federate.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/federate.md b/docs/federate.md index 255e907b58..e85a077c34 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -10,7 +10,7 @@ The `server_name` configured in the Synapse configuration file (often identified (eg: `@user:example.com`, `#room:example.com`). By default, it is also the domain that other servers will use to try to reach your server (via port 8448). This is easy to set up and will work provided -you set the `server_name` to match your machine's public DNS hostname. +you set the `server_name` to match your machine's public DNS hostname. You will also need a valid TLS certificate for this `server_name` served on port 8448 - the preferred way to do that is by using a reverse proxy, @@ -23,7 +23,7 @@ to use port than 8448 (e.g. you want to use `example.com` as your `server_name` but want Synapse to be reachable on `synapse.example.com:443`). This can be done using delegation, which allows an admin to dictate where federation traffic should be sent, see [delegate.md]() for instructions on -how to set this up. +how to set this up. Once federation has been configured, you should be able to join a room over federation. A good place to start is `#synapse:matrix.org` - a room for From d009535639116ba27268a44ed2cd5514a9f8a6a8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 14:07:41 +0000 Subject: [PATCH 136/382] Add mention of SRV records as an advanced topic --- docs/delegate.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/delegate.md b/docs/delegate.md index 4126fffe71..7b55827cec 100644 --- a/docs/delegate.md +++ b/docs/delegate.md @@ -44,6 +44,16 @@ therefore cannot gain access to the necessary certificate. With .well-known, federation servers will check for a valid TLS certificate for the delegated hostname (in our example: `synapse.example.com`). +## SRV DNS record delegation + +It is also possible to do delegation using a SRV DNS record. However, that is +considered an advanced topic since it's a bit complex to set up, and `.well-known` +delegation is already enough in most cases. + +However, if you really need it, you can find some documentation on how such a +record should look like and how Synapse will use it in [the Matrix +specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names). + ## Delegation FAQ ### When do I need delegation? From b1255077f584260a2296d4f3b7b78b54596a76b5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 14:27:57 +0000 Subject: [PATCH 137/382] Changelog --- changelog.d/6940.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6940.doc diff --git a/changelog.d/6940.doc b/changelog.d/6940.doc new file mode 100644 index 0000000000..8c75f48d3d --- /dev/null +++ b/changelog.d/6940.doc @@ -0,0 +1 @@ +Clean up and update docs on setting up federation. From bfbe2f5b08857dc845664645b9d4e24fe479d2a0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 15:10:41 +0000 Subject: [PATCH 138/382] Print the error as an error log and raise the same exception we got --- synapse/handlers/acme.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index e6797535e6..2942df3ac7 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -22,10 +22,17 @@ from twisted.web import server, static from twisted.web.resource import Resource from synapse.app import check_bind_error -from synapse.config import ConfigError logger = logging.getLogger(__name__) +ACME_REGISTER_FAIL_ERROR = """ +Failed to register with the ACME provider. This is likely happening because the install +is new, and ACME v1 has been deprecated by Let's Encrypt and is disabled for installs set +up after November 2019. +At the moment, Synapse doesn't support ACME v2. For more info and alternative solution, +check out https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 +------------------------------------------------------""" + class AcmeHandler(object): def __init__(self, hs): @@ -76,14 +83,8 @@ class AcmeHandler(object): try: yield self._issuer._ensure_registered() except Exception: - raise ConfigError( - "Failed to register with the ACME provider. This is likely happening" - " because the install is new, and ACME v1 has been deprecated by Let's" - " Encrypt and is disabled for installs set up after November 2019. At the" - " moment, Synapse doesn't support ACME v2. For more info and alternative" - " solution, check out https://github.com/matrix-org/synapse/blob/master" - "/docs/ACME.md#deprecation-of-acme-v1" - ) + logger.error(ACME_REGISTER_FAIL_ERROR) + raise @defer.inlineCallbacks def provision_certificate(self): From 9801a042f3e5dc5ad623ab5a2f39661a2ccbd8f9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 15:15:43 +0000 Subject: [PATCH 139/382] Make the log more noticeable --- synapse/handlers/acme.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index 2942df3ac7..250faa997b 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -26,12 +26,13 @@ from synapse.app import check_bind_error logger = logging.getLogger(__name__) ACME_REGISTER_FAIL_ERROR = """ +-------------------------------------------------------------------------------- Failed to register with the ACME provider. This is likely happening because the install is new, and ACME v1 has been deprecated by Let's Encrypt and is disabled for installs set up after November 2019. At the moment, Synapse doesn't support ACME v2. For more info and alternative solution, check out https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 -------------------------------------------------------""" +--------------------------------------------------------------------------------""" class AcmeHandler(object): From 818def82486627513dc95e64c46c0bb452651e7e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Feb 2020 15:27:45 +0000 Subject: [PATCH 140/382] Fix worker docs to point `/publicised_groups` API correctly. (#6938) --- changelog.d/6938.doc | 1 + docs/workers.md | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6938.doc diff --git a/changelog.d/6938.doc b/changelog.d/6938.doc new file mode 100644 index 0000000000..117f76f48a --- /dev/null +++ b/changelog.d/6938.doc @@ -0,0 +1 @@ +Fix worker docs to point `/publicised_groups` API correctly. diff --git a/docs/workers.md b/docs/workers.md index 6f7ec58780..0d84a58958 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -261,7 +261,8 @@ following regular expressions: ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ - ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$ + ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$ + ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/ Additionally, the following REST endpoints can be handled for GET requests: @@ -287,8 +288,8 @@ the following regular expressions: ^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$ -When using this worker you must also set `update_user_directory: False` in the -shared configuration file to stop the main synapse running background +When using this worker you must also set `update_user_directory: False` in the +shared configuration file to stop the main synapse running background jobs related to updating the user directory. ### `synapse.app.frontend_proxy` From 8a380d0fe24edd746256d652836ec27003a05e7e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 18 Feb 2020 15:39:09 +0000 Subject: [PATCH 141/382] Increase perf of `get_auth_chain_ids` used in state res v2. (#6937) We do this by moving the recursive query to be fully in the DB. --- changelog.d/6937.misc | 1 + .../data_stores/main/event_federation.py | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 changelog.d/6937.misc diff --git a/changelog.d/6937.misc b/changelog.d/6937.misc new file mode 100644 index 0000000000..6d00e58654 --- /dev/null +++ b/changelog.d/6937.misc @@ -0,0 +1 @@ +Increase perf of `get_auth_chain_ids` used in state res v2. diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 60c67457b4..1746f40adf 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -26,6 +26,7 @@ from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.signatures import SignatureWorkerStore from synapse.storage.database import Database +from synapse.storage.engines import PostgresEngine from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -61,6 +62,28 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas ) def _get_auth_chain_ids_txn(self, txn, event_ids, include_given): + if isinstance(self.database_engine, PostgresEngine): + # For efficiency we make the database do this if we can. + sql = """ + WITH RECURSIVE auth_chain(event_id) AS ( + SELECT auth_id FROM event_auth WHERE event_id = ANY(?) + UNION + SELECT auth_id FROM event_auth + INNER JOIN auth_chain USING (event_id) + ) + SELECT event_id FROM auth_chain + """ + txn.execute(sql, (list(event_ids),)) + + results = set(event_id for event_id, in txn) + + if include_given: + results.update(event_ids) + + return list(results) + + # Database doesn't necessarily support recursive CTE, so we fall + # back to do doing it manually. if include_given: results = set(event_ids) else: From a0d2f9d089b33ead392d1ac8ded0b44512cd6273 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 16:16:49 +0000 Subject: [PATCH 142/382] Phrasing --- docs/federate.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/federate.md b/docs/federate.md index e85a077c34..55f2a6d0ee 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -13,7 +13,7 @@ server (via port 8448). This is easy to set up and will work provided you set the `server_name` to match your machine's public DNS hostname. You will also need a valid TLS certificate for this `server_name` served -on port 8448 - the preferred way to do that is by using a reverse proxy, +on port 8448. The preferred way to do that is by using a reverse proxy, see [reverse_proxy.md]() for instructions on how to correctly set one up. From adfaea8c698a38ffe14ac682a946abc9f8152635 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Feb 2020 16:23:25 +0000 Subject: [PATCH 143/382] Implement GET /_matrix/client/r0/rooms/{roomId}/aliases (#6939) per matrix-org/matrix-doc#2432 --- changelog.d/6939.feature | 1 + synapse/handlers/directory.py | 17 +++++++- synapse/rest/client/v1/room.py | 23 ++++++++++ tests/rest/client/v1/test_rooms.py | 70 +++++++++++++++++++++++++++++- tests/unittest.py | 28 ++++++++---- 5 files changed, 128 insertions(+), 11 deletions(-) create mode 100644 changelog.d/6939.feature diff --git a/changelog.d/6939.feature b/changelog.d/6939.feature new file mode 100644 index 0000000000..40fe7fc9a9 --- /dev/null +++ b/changelog.d/6939.feature @@ -0,0 +1 @@ +Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index f718388884..3f8c792149 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -16,6 +16,7 @@ import logging import string +from typing import List from twisted.internet import defer @@ -28,7 +29,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) -from synapse.types import RoomAlias, UserID, get_domain_from_id +from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -452,3 +453,17 @@ class DirectoryHandler(BaseHandler): yield self.store.set_room_is_public_appservice( room_id, appservice_id, network_id, visibility == "public" ) + + async def get_aliases_for_room( + self, requester: Requester, room_id: str + ) -> List[str]: + """ + Get a list of the aliases that currently point to this room on this server + """ + # allow access to server admins and current members of the room + is_admin = await self.auth.is_server_admin(requester.user) + if not is_admin: + await self.auth.check_joined_room(room_id, requester.user.to_string()) + + aliases = await self.store.get_aliases_for_room(room_id) + return aliases diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 6f31584c51..143dc738c6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -45,6 +45,10 @@ from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID +MYPY = False +if MYPY: + import synapse.server + logger = logging.getLogger(__name__) @@ -843,6 +847,24 @@ class RoomTypingRestServlet(RestServlet): return 200, {} +class RoomAliasListServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/aliases", unstable=False) + + def __init__(self, hs: "synapse.server.HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.directory_handler = hs.get_handlers().directory_handler + + async def on_GET(self, request, room_id): + requester = await self.auth.get_user_by_req(request) + + alias_list = await self.directory_handler.get_aliases_for_room( + requester, room_id + ) + + return 200, {"aliases": alias_list} + + class SearchRestServlet(RestServlet): PATTERNS = client_patterns("/search$", v1=True) @@ -931,6 +953,7 @@ def register_servlets(hs, http_server): JoinedRoomsRestServlet(hs).register(http_server) RoomEventServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) + RoomAliasListServlet(hs).register(http_server) def register_deprecated_servlets(hs, http_server): diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index fb681a1db9..fb08a45d27 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -28,8 +28,9 @@ from twisted.internet import defer import synapse.rest.admin from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.handlers.pagination import PurgeStatus -from synapse.rest.client.v1 import login, profile, room +from synapse.rest.client.v1 import directory, login, profile, room from synapse.rest.client.v2_alpha import account +from synapse.types import JsonDict, RoomAlias from synapse.util.stringutils import random_string from tests import unittest @@ -1726,3 +1727,70 @@ class ContextTestCase(unittest.HomeserverTestCase): self.assertEqual(len(events_after), 2, events_after) self.assertDictEqual(events_after[0].get("content"), {}, events_after[0]) self.assertEqual(events_after[1].get("content"), {}, events_after[1]) + + +class DirectoryTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + directory.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.room_owner = self.register_user("room_owner", "test") + self.room_owner_tok = self.login("room_owner", "test") + + self.room_id = self.helper.create_room_as( + self.room_owner, tok=self.room_owner_tok + ) + + def test_no_aliases(self): + res = self._get_aliases(self.room_owner_tok) + self.assertEqual(res["aliases"], []) + + def test_not_in_room(self): + self.register_user("user", "test") + user_tok = self.login("user", "test") + res = self._get_aliases(user_tok, expected_code=403) + self.assertEqual(res["errcode"], "M_FORBIDDEN") + + def test_with_aliases(self): + alias1 = self._random_alias() + alias2 = self._random_alias() + + self._set_alias_via_directory(alias1) + self._set_alias_via_directory(alias2) + + res = self._get_aliases(self.room_owner_tok) + self.assertEqual(set(res["aliases"]), {alias1, alias2}) + + def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict: + """Calls the endpoint under test. returns the json response object.""" + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,), + access_token=access_token, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + res = channel.json_body + self.assertIsInstance(res, dict) + if expected_code == 200: + self.assertIsInstance(res["aliases"], list) + return res + + def _random_alias(self) -> str: + return RoomAlias(random_string(5), self.hs.hostname).to_string() + + def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + url = "/_matrix/client/r0/directory/room/" + alias + data = {"room_id": self.room_id} + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.room_owner_tok + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index 98bf27d39c..8816a4d152 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -21,6 +21,7 @@ import hmac import inspect import logging import time +from typing import Optional, Tuple, Type, TypeVar, Union from mock import Mock @@ -42,7 +43,13 @@ from synapse.server import HomeServer from synapse.types import Requester, UserID, create_requester from synapse.util.ratelimitutils import FederationRateLimiter -from tests.server import get_clock, make_request, render, setup_test_homeserver +from tests.server import ( + FakeChannel, + get_clock, + make_request, + render, + setup_test_homeserver, +) from tests.test_utils.logging_setup import setup_logging from tests.utils import default_config, setupdb @@ -71,6 +78,9 @@ def around(target): return _around +T = TypeVar("T") + + class TestCase(unittest.TestCase): """A subclass of twisted.trial's TestCase which looks for 'loglevel' attributes on both itself and its individual test methods, to override the @@ -334,14 +344,14 @@ class HomeserverTestCase(TestCase): def make_request( self, - method, - path, - content=b"", - access_token=None, - request=SynapseRequest, - shorthand=True, - federation_auth_origin=None, - ): + method: Union[bytes, str], + path: Union[bytes, str], + content: Union[bytes, dict] = b"", + access_token: Optional[str] = None, + request: Type[T] = SynapseRequest, + shorthand: bool = True, + federation_auth_origin: str = None, + ) -> Tuple[T, FakeChannel]: """ Create a SynapseRequest at the path using the method and containing the given content. From 61b457e3ec72ec005605b8569eae8b8e547101ee Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 17:20:03 +0000 Subject: [PATCH 144/382] Incorporate review --- docs/delegate.md | 22 +++++++++------------- docs/federate.md | 16 ++++++++-------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/docs/delegate.md b/docs/delegate.md index 7b55827cec..57ae2c4c70 100644 --- a/docs/delegate.md +++ b/docs/delegate.md @@ -1,14 +1,13 @@ # Delegation -For a more flexible configuration, you can have `server_name` -resources (eg: `@user:example.com`) served by a different host and -port (eg: `synapse.example.com:443`). +Without configuring delegation, homeservers will expect the server +responsible for resources using e.g. `example.com` as their `server_name` +(e.g. `@user:example.com`) to be served at `example.com:8448`. -Without configuring delegation, the matrix federation will -expect to find your server via `example.com:8448`. The following methods -allow you retain a `server_name` of `example.com` so that your user IDs, room -aliases, etc continue to look like `*:example.com`, whilst having your -federation traffic routed to a different server (e.g. `synapse.example.com`). +Delegation is a Matrix feature allowing a homeserver admin to retain a +`server_name` of `example.com` so that your user IDs, room aliases, etc +continue to look like `*:example.com`, whilst having your federation +traffic routed to a different server and/or port (e.g. `synapse.example.com:443`). ## .well-known delegation @@ -38,11 +37,8 @@ should return: Note, specifying a port is optional. If no port is specified, then it defaults to 8448. -Most installations will not need to configure .well-known. However, it can be -useful in cases where the admin is hosting on behalf of someone else and -therefore cannot gain access to the necessary certificate. With .well-known, -federation servers will check for a valid TLS certificate for the delegated -hostname (in our example: `synapse.example.com`). +With .well-known, federation servers will check for a valid TLS certificate +for the delegated hostname (in our example: `synapse.example.com`). ## SRV DNS record delegation diff --git a/docs/federate.md b/docs/federate.md index 55f2a6d0ee..5fc839b58b 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -17,13 +17,13 @@ on port 8448. The preferred way to do that is by using a reverse proxy, see [reverse_proxy.md]() for instructions on how to correctly set one up. -In some cases you might not want Synapse to be running on the machine that -has the `server_name` as its public DNS hostname, or federation traffic -to use port than 8448 (e.g. you want to use `example.com` as your `server_name` -but want Synapse to be reachable on `synapse.example.com:443`). This can -be done using delegation, which allows an admin to dictate where federation -traffic should be sent, see [delegate.md]() for instructions on -how to set this up. +In some cases you might not want to run Synapse on the machine that has +the `server_name` as its public DNS hostname, or you might want federation +traffic to use a different port than 8448. For example, you might want to +have your user names look like `@user:example.com`, but you want to run +Synapse on `synapse.example.com` on port 443. This can be done using +delegation, which allows an admin to control where federation traffic should +be sent. See [delegate.md](delegate.md) for instructions on how to set this up. Once federation has been configured, you should be able to join a room over federation. A good place to start is `#synapse:matrix.org` - a room for @@ -31,7 +31,7 @@ Synapse admins. ## Troubleshooting -You can use the [federation tester]() +You can use the [federation tester](https://matrix.org/federationtester) to check if your homeserver is configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN). Note that you'll have to modify this URL to replace `DOMAIN` with your From f31a94a6dd1d27cf53fd7865057ac86b669e910f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 17:29:57 +0000 Subject: [PATCH 145/382] Fix log in message retention purge jobs --- synapse/handlers/pagination.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index caf841a643..9bf6d39668 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -133,7 +133,7 @@ class PaginationHandler(object): include_null = False logger.info( - "[purge] Running purge job for %d < max_lifetime <= %d (include NULLs = %s)", + "[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)", min_ms, max_ms, include_null, From 771d70e89cde1645650bce0b3ec5d1ac4b8bd8f5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 18 Feb 2020 17:31:02 +0000 Subject: [PATCH 146/382] Changelog --- changelog.d/6945.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6945.bugfix diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix new file mode 100644 index 0000000000..78470a0ef6 --- /dev/null +++ b/changelog.d/6945.bugfix @@ -0,0 +1 @@ +Fix bogus log in the purge jobs related to the message retention policies support. From b58d17e44f9d9ff7e70578e0f4e328bb9113ec7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Feb 2020 23:13:29 +0000 Subject: [PATCH 147/382] Refactor the membership check methods in Auth these were getting a bit unwieldy, so let's combine `check_joined_room` and `check_user_was_in_room` into a single `check_user_in_room`. --- synapse/api/auth.py | 80 +++++++++++++++----------------- synapse/handlers/initial_sync.py | 31 ++----------- synapse/handlers/typing.py | 4 +- tests/handlers/test_typing.py | 4 +- 4 files changed, 46 insertions(+), 73 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 8b1277ad02..de7b75ca36 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Optional from six import itervalues @@ -35,6 +36,7 @@ from synapse.api.errors import ( ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import is_threepid_reserved +from synapse.events import EventBase from synapse.types import StateMap, UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches.lrucache import LruCache @@ -92,20 +94,34 @@ class Auth(object): ) @defer.inlineCallbacks - def check_joined_room(self, room_id, user_id, current_state=None): - """Check if the user is currently joined in the room + def check_user_in_room( + self, + room_id: str, + user_id: str, + current_state: Optional[StateMap[EventBase]] = None, + allow_departed_users: bool = False, + ): + """Check if the user is in the room, or was at some point. Args: - room_id(str): The room to check. - user_id(str): The user to check. - current_state(dict): Optional map of the current state of the room. + room_id: The room to check. + + user_id: The user to check. + + current_state: Optional map of the current state of the room. If provided then that map is used to check whether they are a member of the room. Otherwise the current membership is loaded from the database. + + allow_departed_users: if True, accept users that were previously + members but have now departed. + Raises: - AuthError if the user is not in the room. + AuthError if the user is/was not in the room. Returns: - A deferred membership event for the user if the user is in - the room. + Deferred[Optional[EventBase]]: + Membership event for the user if the user was in the + room. This will be the join event if they are currently joined to + the room. This will be the leave event if they have left the room. """ if current_state: member = current_state.get((EventTypes.Member, user_id), None) @@ -113,37 +129,19 @@ class Auth(object): member = yield self.state.get_current_state( room_id=room_id, event_type=EventTypes.Member, state_key=user_id ) - - self._check_joined_room(member, user_id, room_id) - return member - - @defer.inlineCallbacks - def check_user_was_in_room(self, room_id, user_id): - """Check if the user was in the room at some point. - Args: - room_id(str): The room to check. - user_id(str): The user to check. - Raises: - AuthError if the user was never in the room. - Returns: - A deferred membership event for the user if the user was in the - room. This will be the join event if they are currently joined to - the room. This will be the leave event if they have left the room. - """ - member = yield self.state.get_current_state( - room_id=room_id, event_type=EventTypes.Member, state_key=user_id - ) membership = member.membership if member else None - if membership not in (Membership.JOIN, Membership.LEAVE): - raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) + if membership == Membership.JOIN: + return member - if membership == Membership.LEAVE: + # XXX this looks totally bogus. Why do we not allow users who have been banned, + # or those who were members previously and have been re-invited? + if allow_departed_users and membership == Membership.LEAVE: forgot = yield self.store.did_forget(user_id, room_id) - if forgot: - raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) + if not forgot: + return member - return member + raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) @defer.inlineCallbacks def check_host_in_room(self, room_id, host): @@ -151,12 +149,6 @@ class Auth(object): latest_event_ids = yield self.store.is_host_joined(room_id, host) return latest_event_ids - def _check_joined_room(self, member, user_id, room_id): - if not member or member.membership != Membership.JOIN: - raise AuthError( - 403, "User %s not in room %s (%s)" % (user_id, room_id, repr(member)) - ) - def can_federate(self, event, auth_events): creation_event = auth_events.get((EventTypes.Create, "")) @@ -560,7 +552,7 @@ class Auth(object): return True user_id = user.to_string() - yield self.check_joined_room(room_id, user_id) + yield self.check_user_in_room(room_id, user_id) # We currently require the user is a "moderator" in the room. We do this # by checking if they would (theoretically) be able to change the @@ -645,12 +637,14 @@ class Auth(object): """ try: - # check_user_was_in_room will return the most recent membership + # check_user_in_room will return the most recent membership # event for the user if: # * The user is a non-guest user, and was ever in the room # * The user is a guest user, and has joined the room # else it will throw. - member_event = yield self.check_user_was_in_room(room_id, user_id) + member_event = yield self.check_user_in_room( + room_id, user_id, allow_departed_users=True + ) return member_event.membership, member_event.event_id except AuthError: visibility = yield self.state.get_current_state( diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 2e6755f19c..b7c6a921d9 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -18,7 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, SynapseError +from synapse.api.errors import SynapseError from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -274,9 +274,10 @@ class InitialSyncHandler(BaseHandler): user_id = requester.user.to_string() - membership, member_event_id = await self._check_in_room_or_world_readable( - room_id, user_id - ) + ( + membership, + member_event_id, + ) = await self.auth.check_user_in_room_or_world_readable(room_id, user_id) is_peeking = member_event_id is None if membership == Membership.JOIN: @@ -433,25 +434,3 @@ class InitialSyncHandler(BaseHandler): ret["membership"] = membership return ret - - async def _check_in_room_or_world_readable(self, room_id, user_id): - try: - # check_user_was_in_room will return the most recent membership - # event for the user if: - # * The user is a non-guest user, and was ever in the room - # * The user is a guest user, and has joined the room - # else it will throw. - member_event = await self.auth.check_user_was_in_room(room_id, user_id) - return member_event.membership, member_event.event_id - except AuthError: - visibility = await self.state_handler.get_current_state( - room_id, EventTypes.RoomHistoryVisibility, "" - ) - if ( - visibility - and visibility.content["history_visibility"] == "world_readable" - ): - return Membership.JOIN, None - raise AuthError( - 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN - ) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index d5ca9cb07b..5406618431 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -125,7 +125,7 @@ class TypingHandler(object): if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") - yield self.auth.check_joined_room(room_id, target_user_id) + yield self.auth.check_user_in_room(room_id, target_user_id) logger.debug("%s has started typing in %s", target_user_id, room_id) @@ -155,7 +155,7 @@ class TypingHandler(object): if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") - yield self.auth.check_joined_room(room_id, target_user_id) + yield self.auth.check_user_in_room(room_id, target_user_id) logger.debug("%s has stopped typing in %s", target_user_id, room_id) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 2767b0497a..140cc0a3c2 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -122,11 +122,11 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.room_members = [] - def check_joined_room(room_id, user_id): + def check_user_in_room(room_id, user_id): if user_id not in [u.to_string() for u in self.room_members]: raise AuthError(401, "User is not in the room") - hs.get_auth().check_joined_room = check_joined_room + hs.get_auth().check_user_in_room = check_user_in_room def get_joined_hosts_for_room(room_id): return set(member.domain for member in self.room_members) From a0a1fd0bec5cb596cc41c8f052a4aa0e8c01cf08 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Feb 2020 23:14:57 +0000 Subject: [PATCH 148/382] Add `allow_departed_users` param to `check_in_room_or_world_readable` ... and set it everywhere it's called. while we're here, rename it for consistency with `check_user_in_room` (and to help check that I haven't missed any instances) --- synapse/api/auth.py | 16 +++++++++++++--- synapse/handlers/initial_sync.py | 4 +++- synapse/handlers/message.py | 12 ++++++++---- synapse/handlers/pagination.py | 4 +++- synapse/rest/client/v2_alpha/relations.py | 12 ++++++------ 5 files changed, 33 insertions(+), 15 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index de7b75ca36..f576d65388 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -625,10 +625,18 @@ class Auth(object): return query_params[0].decode("ascii") @defer.inlineCallbacks - def check_in_room_or_world_readable(self, room_id, user_id): + def check_user_in_room_or_world_readable( + self, room_id: str, user_id: str, allow_departed_users: bool = False + ): """Checks that the user is or was in the room or the room is world readable. If it isn't then an exception is raised. + Args: + room_id: room to check + user_id: user to check + allow_departed_users: if True, accept users that were previously + members but have now departed + Returns: Deferred[tuple[str, str|None]]: Resolves to the current membership of the user in the room and the membership event ID of the user. If @@ -643,7 +651,7 @@ class Auth(object): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_in_room( - room_id, user_id, allow_departed_users=True + room_id, user_id, allow_departed_users=allow_departed_users ) return member_event.membership, member_event.event_id except AuthError: @@ -656,7 +664,9 @@ class Auth(object): ): return Membership.JOIN, None raise AuthError( - 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN + 403, + "User %s not in room %s, and room previews are disabled" + % (user_id, room_id), ) @defer.inlineCallbacks diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index b7c6a921d9..b116500c7d 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -277,7 +277,9 @@ class InitialSyncHandler(BaseHandler): ( membership, member_event_id, - ) = await self.auth.check_user_in_room_or_world_readable(room_id, user_id) + ) = await self.auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True, + ) is_peeking = member_event_id is None if membership == Membership.JOIN: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index be6ae18a92..d6be280952 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -99,7 +99,9 @@ class MessageHandler(object): ( membership, membership_event_id, - ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id) + ) = yield self.auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) if membership == Membership.JOIN: data = yield self.state.get_current_state(room_id, event_type, state_key) @@ -177,7 +179,9 @@ class MessageHandler(object): ( membership, membership_event_id, - ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id) + ) = yield self.auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) if membership == Membership.JOIN: state_ids = yield self.store.get_filtered_current_state_ids( @@ -216,8 +220,8 @@ class MessageHandler(object): if not requester.app_service: # We check AS auth after fetching the room membership, as it # requires us to pull out all joined members anyway. - membership, _ = yield self.auth.check_in_room_or_world_readable( - room_id, user_id + membership, _ = yield self.auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True ) if membership != Membership.JOIN: raise NotImplementedError( diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index caf841a643..254a9f6856 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -335,7 +335,9 @@ class PaginationHandler(object): ( membership, member_event_id, - ) = await self.auth.check_in_room_or_world_readable(room_id, user_id) + ) = await self.auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) if source_config.direction == "b": # if we're going backwards, we might need to backfill. This diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 9be9a34b91..63f07b63da 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -142,8 +142,8 @@ class RelationPaginationServlet(RestServlet): ): requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_in_room_or_world_readable( - room_id, requester.user.to_string() + await self.auth.check_user_in_room_or_world_readable( + room_id, requester.user.to_string(), allow_departed_users=True ) # This gets the original event and checks that a) the event exists and @@ -235,8 +235,8 @@ class RelationAggregationPaginationServlet(RestServlet): ): requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_in_room_or_world_readable( - room_id, requester.user.to_string() + await self.auth.check_user_in_room_or_world_readable( + room_id, requester.user.to_string(), allow_departed_users=True, ) # This checks that a) the event exists and b) the user is allowed to @@ -313,8 +313,8 @@ class RelationAggregationGroupPaginationServlet(RestServlet): async def on_GET(self, request, room_id, parent_id, relation_type, event_type, key): requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_in_room_or_world_readable( - room_id, requester.user.to_string() + await self.auth.check_user_in_room_or_world_readable( + room_id, requester.user.to_string(), allow_departed_users=True, ) # This checks that a) the event exists and b) the user is allowed to From 709e81f5183d8ff67d86f4569234cb4a8be7a8d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Feb 2020 23:15:54 +0000 Subject: [PATCH 149/382] Make room alias lists peekable As per https://github.com/matrix-org/matrix-doc/pull/2432#pullrequestreview-360566830, make room alias lists accessible to users outside world_readable rooms. --- synapse/handlers/directory.py | 4 +++- tests/rest/client/v1/test_rooms.py | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 3f8c792149..db2104c5f6 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -463,7 +463,9 @@ class DirectoryHandler(BaseHandler): # allow access to server admins and current members of the room is_admin = await self.auth.is_server_admin(requester.user) if not is_admin: - await self.auth.check_joined_room(room_id, requester.user.to_string()) + await self.auth.check_user_in_room_or_world_readable( + room_id, requester.user.to_string() + ) aliases = await self.store.get_aliases_for_room(room_id) return aliases diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index fb08a45d27..8e389eb6c9 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1766,6 +1766,23 @@ class DirectoryTestCase(unittest.HomeserverTestCase): res = self._get_aliases(self.room_owner_tok) self.assertEqual(set(res["aliases"]), {alias1, alias2}) + def test_peekable_room(self): + alias1 = self._random_alias() + self._set_alias_via_directory(alias1) + + self.helper.send_state( + self.room_id, + EventTypes.RoomHistoryVisibility, + body={"history_visibility": "world_readable"}, + tok=self.room_owner_tok, + ) + + self.register_user("user", "test") + user_tok = self.login("user", "test") + + res = self._get_aliases(user_tok) + self.assertEqual(res["aliases"], [alias1]) + def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict: """Calls the endpoint under test. returns the json response object.""" request, channel = self.make_request( From 603618c002eaf0b763f376e27477792b38ea00ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 18 Feb 2020 23:20:16 +0000 Subject: [PATCH 150/382] changelog --- changelog.d/6949.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6949.feature diff --git a/changelog.d/6949.feature b/changelog.d/6949.feature new file mode 100644 index 0000000000..40fe7fc9a9 --- /dev/null +++ b/changelog.d/6949.feature @@ -0,0 +1 @@ +Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). From 5a5abd55e8b47a7c1620c298a72817ccf73f90b0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 09:39:27 +0000 Subject: [PATCH 151/382] Limit size of get_auth_chain_ids query (#6947) --- changelog.d/6947.misc | 1 + .../data_stores/main/event_federation.py | 39 +++++++++++-------- 2 files changed, 23 insertions(+), 17 deletions(-) create mode 100644 changelog.d/6947.misc diff --git a/changelog.d/6947.misc b/changelog.d/6947.misc new file mode 100644 index 0000000000..6d00e58654 --- /dev/null +++ b/changelog.d/6947.misc @@ -0,0 +1 @@ +Increase perf of `get_auth_chain_ids` used in state res v2. diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 1746f40adf..dcc375b840 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -62,32 +62,37 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas ) def _get_auth_chain_ids_txn(self, txn, event_ids, include_given): + if include_given: + results = set(event_ids) + else: + results = set() + if isinstance(self.database_engine, PostgresEngine): # For efficiency we make the database do this if we can. - sql = """ - WITH RECURSIVE auth_chain(event_id) AS ( - SELECT auth_id FROM event_auth WHERE event_id = ANY(?) - UNION - SELECT auth_id FROM event_auth - INNER JOIN auth_chain USING (event_id) - ) - SELECT event_id FROM auth_chain - """ - txn.execute(sql, (list(event_ids),)) - results = set(event_id for event_id, in txn) + # We need to be a little careful with querying large amounts at + # once, for some reason postgres really doesn't like it. We do this + # by only asking for auth chain of 500 events at a time. + event_ids = list(event_ids) + chunks = [event_ids[x : x + 500] for x in range(0, len(event_ids), 500)] + for chunk in chunks: + sql = """ + WITH RECURSIVE auth_chain(event_id) AS ( + SELECT auth_id FROM event_auth WHERE event_id = ANY(?) + UNION + SELECT auth_id FROM event_auth + INNER JOIN auth_chain USING (event_id) + ) + SELECT event_id FROM auth_chain + """ + txn.execute(sql, (chunk,)) - if include_given: - results.update(event_ids) + results.update(event_id for event_id, in txn) return list(results) # Database doesn't necessarily support recursive CTE, so we fall # back to do doing it manually. - if include_given: - results = set(event_ids) - else: - results = set() base_sql = "SELECT auth_id FROM event_auth WHERE " From fa64f836ec661a234cfb240afcb0a65bcae4cbf5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 19 Feb 2020 09:54:13 +0000 Subject: [PATCH 152/382] Update changelog.d/6945.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/6945.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix index 78470a0ef6..8561be16a4 100644 --- a/changelog.d/6945.bugfix +++ b/changelog.d/6945.bugfix @@ -1 +1 @@ -Fix bogus log in the purge jobs related to the message retention policies support. +Fix errors from logging in the purge jobs related to the message retention policies support. From 71d65407e7b09ff630c5388122608629fe733e5d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 19 Feb 2020 10:03:19 +0000 Subject: [PATCH 153/382] Incorporate review --- docs/delegate.md | 17 +++++++++-------- docs/federate.md | 8 ++++---- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/delegate.md b/docs/delegate.md index 57ae2c4c70..208ddb6277 100644 --- a/docs/delegate.md +++ b/docs/delegate.md @@ -1,13 +1,14 @@ # Delegation -Without configuring delegation, homeservers will expect the server -responsible for resources using e.g. `example.com` as their `server_name` -(e.g. `@user:example.com`) to be served at `example.com:8448`. +By default, other homeservers will expect to be able to reach yours via +your `server_name`, on port 8448. For example, if you set your `server_name` +to `example.com` (so that your user names look like `@user:example.com`), +other servers will try to connect to yours at `https://example.com:8448/`. Delegation is a Matrix feature allowing a homeserver admin to retain a -`server_name` of `example.com` so that your user IDs, room aliases, etc -continue to look like `*:example.com`, whilst having your federation -traffic routed to a different server and/or port (e.g. `synapse.example.com:443`). +`server_name` of `example.com` so that user IDs, room aliases, etc continue +to look like `*:example.com`, whilst having federation traffic routed +to a different server and/or port (e.g. `synapse.example.com:443`). ## .well-known delegation @@ -37,8 +38,8 @@ should return: Note, specifying a port is optional. If no port is specified, then it defaults to 8448. -With .well-known, federation servers will check for a valid TLS certificate -for the delegated hostname (in our example: `synapse.example.com`). +With .well-known delegation, federating servers will check for a valid TLS +certificate for the delegated hostname (in our example: `synapse.example.com`). ## SRV DNS record delegation diff --git a/docs/federate.md b/docs/federate.md index 5fc839b58b..a0786b9cf7 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -12,10 +12,10 @@ it is also the domain that other servers will use to try to reach your server (via port 8448). This is easy to set up and will work provided you set the `server_name` to match your machine's public DNS hostname. -You will also need a valid TLS certificate for this `server_name` served -on port 8448. The preferred way to do that is by using a reverse proxy, -see [reverse_proxy.md]() for instructions on how to -correctly set one up. +For this default configuration to work, you will need to listen for TLS +connections on port 8448. The preferred way to do that is by using a +reverse proxy: see [reverse_proxy.md]() for instructions +on how to correctly set one up. In some cases you might not want to run Synapse on the machine that has the `server_name` as its public DNS hostname, or you might want federation From 0d0bc35792aac0490e35cd3514b76d7aada7c8e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 10:15:49 +0000 Subject: [PATCH 154/382] Increase DB/CPU perf of `_is_server_still_joined` check. (#6936) * Increase DB/CPU perf of `_is_server_still_joined` check. For rooms with large amount of state a single user leaving could cause us to go and load a lot of membership events and then pull out membership state in a large number of batches. * Newsfile * Update synapse/storage/persist_events.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> * Fix adding if too soon * Update docstring * Review comments * Woops typo Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/6936.misc | 1 + .../storage/data_stores/main/roommember.py | 31 +++++++++++++ synapse/storage/persist_events.py | 43 ++++++++++++------- 3 files changed, 60 insertions(+), 15 deletions(-) create mode 100644 changelog.d/6936.misc diff --git a/changelog.d/6936.misc b/changelog.d/6936.misc new file mode 100644 index 0000000000..9400725017 --- /dev/null +++ b/changelog.d/6936.misc @@ -0,0 +1 @@ +Increase DB/CPU perf of `_is_server_still_joined` check. diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py index 042289f0e0..d5ced05701 100644 --- a/synapse/storage/data_stores/main/roommember.py +++ b/synapse/storage/data_stores/main/roommember.py @@ -868,6 +868,37 @@ class RoomMemberWorkerStore(EventsWorkerStore): desc="get_membership_from_event_ids", ) + async def is_local_host_in_room_ignoring_users( + self, room_id: str, ignore_users: Collection[str] + ) -> bool: + """Check if there are any local users, excluding those in the given + list, in the room. + """ + + clause, args = make_in_list_sql_clause( + self.database_engine, "user_id", ignore_users + ) + + sql = """ + SELECT 1 FROM local_current_membership + WHERE + room_id = ? AND membership = ? + AND NOT (%s) + LIMIT 1 + """ % ( + clause, + ) + + def _is_local_host_in_room_ignoring_users_txn(txn): + txn.execute(sql, (room_id, Membership.JOIN, *args)) + + return bool(txn.fetchone()) + + return await self.db.runInteraction( + "is_local_host_in_room_ignoring_users", + _is_local_host_in_room_ignoring_users_txn, + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index a5370ed527..b950550f23 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -727,6 +727,7 @@ class EventsPersistenceStorage(object): # Check if any of the given events are a local join that appear in the # current state + events_to_check = [] # Event IDs that aren't an event we're persisting for (typ, state_key), event_id in delta.to_insert.items(): if typ != EventTypes.Member or not self.is_mine_id(state_key): continue @@ -736,8 +737,33 @@ class EventsPersistenceStorage(object): if event.membership == Membership.JOIN: return True - # There's been a change of membership but we don't have a local join - # event in the new events, so we need to check the full state. + # The event is not in `ev_ctx_rm`, so we need to pull it out of + # the DB. + events_to_check.append(event_id) + + # Check if any of the changes that we don't have events for are joins. + if events_to_check: + rows = await self.main_store.get_membership_from_event_ids(events_to_check) + is_still_joined = any(row["membership"] == Membership.JOIN for row in rows) + if is_still_joined: + return True + + # None of the new state events are local joins, so we check the database + # to see if there are any other local users in the room. We ignore users + # whose state has changed as we've already their new state above. + users_to_ignore = [ + state_key + for _, state_key in itertools.chain(delta.to_insert, delta.to_delete) + if self.is_mine_id(state_key) + ] + + if await self.main_store.is_local_host_in_room_ignoring_users( + room_id, users_to_ignore + ): + return True + + # The server will leave the room, so we go and find out which remote + # users will still be joined when we leave. if current_state is None: current_state = await self.main_store.get_current_state_ids(room_id) current_state = dict(current_state) @@ -746,19 +772,6 @@ class EventsPersistenceStorage(object): current_state.update(delta.to_insert) - event_ids = [ - event_id - for (typ, state_key,), event_id in current_state.items() - if typ == EventTypes.Member and self.is_mine_id(state_key) - ] - - rows = await self.main_store.get_membership_from_event_ids(event_ids) - is_still_joined = any(row["membership"] == Membership.JOIN for row in rows) - if is_still_joined: - return True - - # The server will leave the room, so we go and find out which remote - # users will still be joined when we leave. remote_event_ids = [ event_id for (typ, state_key,), event_id in current_state.items() From abf1e5c52669bd41ad803d4645809b6efdfcd61d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 19 Feb 2020 10:38:20 +0000 Subject: [PATCH 155/382] Tiny optimisation for _get_handler_for_request (#6950) we have hundreds of path_regexes (see #5118), so let's not convert the same bytes to str for each of them. --- changelog.d/6950.misc | 1 + synapse/http/server.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6950.misc diff --git a/changelog.d/6950.misc b/changelog.d/6950.misc new file mode 100644 index 0000000000..1c88936b8b --- /dev/null +++ b/changelog.d/6950.misc @@ -0,0 +1 @@ +Tiny optimisation for incoming HTTP request dispatch. diff --git a/synapse/http/server.py b/synapse/http/server.py index 04bc2385a2..042a605198 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -353,10 +353,12 @@ class JsonResource(HttpServer, resource.Resource): if request.method == b"OPTIONS": return _options_handler, "options_request_handler", {} + request_path = request.path.decode("ascii") + # Loop through all the registered callbacks to check if the method # and path regex match for path_entry in self.path_regexs.get(request.method, []): - m = path_entry.pattern.match(request.path.decode("ascii")) + m = path_entry.pattern.match(request_path) if m: # We found a match! return path_entry.callback, path_entry.servlet_classname, m.groupdict() From 880aaac1d82695b1a89f22f1f86c7f295ca205e0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 19 Feb 2020 10:40:27 +0000 Subject: [PATCH 156/382] Move MSC2432 stuff onto unstable prefix (#6948) it's not in the spec yet, so needs to be unstable. Also add a feature flag for it. Also add a test for admin users. --- changelog.d/6948.feature | 1 + synapse/rest/client/v1/room.py | 8 +++++++- synapse/rest/client/versions.py | 2 ++ tests/rest/client/v1/test_rooms.py | 16 +++++++++++++--- 4 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6948.feature diff --git a/changelog.d/6948.feature b/changelog.d/6948.feature new file mode 100644 index 0000000000..40fe7fc9a9 --- /dev/null +++ b/changelog.d/6948.feature @@ -0,0 +1 @@ +Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 143dc738c6..64f51406fb 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -16,6 +16,7 @@ """ This module contains REST servlets to do with rooms: /rooms/ """ import logging +import re from typing import List, Optional from six.moves.urllib import parse as urlparse @@ -848,7 +849,12 @@ class RoomTypingRestServlet(RestServlet): class RoomAliasListServlet(RestServlet): - PATTERNS = client_patterns("/rooms/(?P[^/]*)/aliases", unstable=False) + PATTERNS = [ + re.compile( + r"^/_matrix/client/unstable/org\.matrix\.msc2432" + r"/rooms/(?P[^/]*)/aliases" + ), + ] def __init__(self, hs: "synapse.server.HomeServer"): super().__init__() diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3eeb3607f4..d90a6a890b 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -72,6 +72,8 @@ class VersionsRestServlet(RestServlet): "org.matrix.label_based_filtering": True, # Implements support for cross signing as described in MSC1756 "org.matrix.e2e_cross_signing": True, + # Implements additional endpoints as described in MSC2432 + "org.matrix.msc2432": True, }, }, ) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index fb08a45d27..f82655677c 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1729,8 +1729,7 @@ class ContextTestCase(unittest.HomeserverTestCase): self.assertEqual(events_after[1].get("content"), {}, events_after[1]) -class DirectoryTestCase(unittest.HomeserverTestCase): - +class RoomAliasListTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, directory.register_servlets, @@ -1756,6 +1755,16 @@ class DirectoryTestCase(unittest.HomeserverTestCase): res = self._get_aliases(user_tok, expected_code=403) self.assertEqual(res["errcode"], "M_FORBIDDEN") + def test_admin_user(self): + alias1 = self._random_alias() + self._set_alias_via_directory(alias1) + + self.register_user("user", "test", admin=True) + user_tok = self.login("user", "test") + + res = self._get_aliases(user_tok) + self.assertEqual(res["aliases"], [alias1]) + def test_with_aliases(self): alias1 = self._random_alias() alias2 = self._random_alias() @@ -1770,7 +1779,8 @@ class DirectoryTestCase(unittest.HomeserverTestCase): """Calls the endpoint under test. returns the json response object.""" request, channel = self.make_request( "GET", - "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,), + "/_matrix/client/unstable/org.matrix.msc2432/rooms/%s/aliases" + % (self.room_id,), access_token=access_token, ) self.render(request) From 099c96b89b54b58ecb9b6b6ed781f66f97dea112 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 11:37:35 +0000 Subject: [PATCH 157/382] Revert `get_auth_chain_ids` changes (#6951) --- changelog.d/6951.misc | 1 + .../data_stores/main/event_federation.py | 28 ------------------- 2 files changed, 1 insertion(+), 28 deletions(-) create mode 100644 changelog.d/6951.misc diff --git a/changelog.d/6951.misc b/changelog.d/6951.misc new file mode 100644 index 0000000000..378f52f0a7 --- /dev/null +++ b/changelog.d/6951.misc @@ -0,0 +1 @@ +Revert #6937. diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index dcc375b840..60c67457b4 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -26,7 +26,6 @@ from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.signatures import SignatureWorkerStore from synapse.storage.database import Database -from synapse.storage.engines import PostgresEngine from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -67,33 +66,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas else: results = set() - if isinstance(self.database_engine, PostgresEngine): - # For efficiency we make the database do this if we can. - - # We need to be a little careful with querying large amounts at - # once, for some reason postgres really doesn't like it. We do this - # by only asking for auth chain of 500 events at a time. - event_ids = list(event_ids) - chunks = [event_ids[x : x + 500] for x in range(0, len(event_ids), 500)] - for chunk in chunks: - sql = """ - WITH RECURSIVE auth_chain(event_id) AS ( - SELECT auth_id FROM event_auth WHERE event_id = ANY(?) - UNION - SELECT auth_id FROM event_auth - INNER JOIN auth_chain USING (event_id) - ) - SELECT event_id FROM auth_chain - """ - txn.execute(sql, (chunk,)) - - results.update(event_id for event_id, in txn) - - return list(results) - - # Database doesn't necessarily support recursive CTE, so we fall - # back to do doing it manually. - base_sql = "SELECT auth_id FROM event_auth WHERE " front = set(event_ids) From 197b08de35a7cca7e45deec70312c36aa70a1662 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 19 Feb 2020 13:48:32 +0000 Subject: [PATCH 158/382] 1.11.0rc1 --- CHANGES.md | 73 ++++++++++++++++++++++++++++++++++++++++ changelog.d/6769.feature | 1 - changelog.d/6781.bugfix | 1 - changelog.d/6821.misc | 1 - changelog.d/6823.misc | 1 - changelog.d/6825.bugfix | 1 - changelog.d/6827.misc | 1 - changelog.d/6833.misc | 1 - changelog.d/6834.misc | 1 - changelog.d/6836.misc | 1 - changelog.d/6837.misc | 1 - changelog.d/6840.misc | 1 - changelog.d/6844.bugfix | 1 - changelog.d/6846.doc | 1 - changelog.d/6847.misc | 1 - changelog.d/6849.bugfix | 1 - changelog.d/6854.misc | 1 - changelog.d/6855.misc | 1 - changelog.d/6856.misc | 1 - changelog.d/6857.misc | 1 - changelog.d/6858.misc | 1 - changelog.d/6862.misc | 1 - changelog.d/6864.misc | 1 - changelog.d/6866.feature | 1 - changelog.d/6869.misc | 1 - changelog.d/6871.misc | 1 - changelog.d/6872.misc | 1 - changelog.d/6873.feature | 1 - changelog.d/6877.removal | 1 - changelog.d/6882.misc | 1 - changelog.d/6883.misc | 1 - changelog.d/6887.misc | 1 - changelog.d/6888.feature | 1 - changelog.d/6891.doc | 1 - changelog.d/6901.misc | 1 - changelog.d/6904.removal | 1 - changelog.d/6905.doc | 1 - changelog.d/6906.doc | 1 - changelog.d/6907.doc | 1 - changelog.d/6909.doc | 1 - changelog.d/6915.misc | 1 - changelog.d/6918.docker | 1 - changelog.d/6919.misc | 1 - changelog.d/6920.misc | 1 - changelog.d/6921.docker | 1 - changelog.d/6936.misc | 1 - changelog.d/6937.misc | 1 - changelog.d/6938.doc | 1 - changelog.d/6939.feature | 1 - changelog.d/6940.doc | 1 - changelog.d/6945.bugfix | 1 - changelog.d/6947.misc | 1 - changelog.d/6948.feature | 1 - changelog.d/6949.feature | 1 - changelog.d/6950.misc | 1 - changelog.d/6951.misc | 1 - synapse/__init__.py | 2 +- 57 files changed, 74 insertions(+), 56 deletions(-) delete mode 100644 changelog.d/6769.feature delete mode 100644 changelog.d/6781.bugfix delete mode 100644 changelog.d/6821.misc delete mode 100644 changelog.d/6823.misc delete mode 100644 changelog.d/6825.bugfix delete mode 100644 changelog.d/6827.misc delete mode 100644 changelog.d/6833.misc delete mode 100644 changelog.d/6834.misc delete mode 100644 changelog.d/6836.misc delete mode 100644 changelog.d/6837.misc delete mode 100644 changelog.d/6840.misc delete mode 100644 changelog.d/6844.bugfix delete mode 100644 changelog.d/6846.doc delete mode 100644 changelog.d/6847.misc delete mode 100644 changelog.d/6849.bugfix delete mode 100644 changelog.d/6854.misc delete mode 100644 changelog.d/6855.misc delete mode 100644 changelog.d/6856.misc delete mode 100644 changelog.d/6857.misc delete mode 100644 changelog.d/6858.misc delete mode 100644 changelog.d/6862.misc delete mode 100644 changelog.d/6864.misc delete mode 100644 changelog.d/6866.feature delete mode 100644 changelog.d/6869.misc delete mode 100644 changelog.d/6871.misc delete mode 100644 changelog.d/6872.misc delete mode 100644 changelog.d/6873.feature delete mode 100644 changelog.d/6877.removal delete mode 100644 changelog.d/6882.misc delete mode 100644 changelog.d/6883.misc delete mode 100644 changelog.d/6887.misc delete mode 100644 changelog.d/6888.feature delete mode 100644 changelog.d/6891.doc delete mode 100644 changelog.d/6901.misc delete mode 100644 changelog.d/6904.removal delete mode 100644 changelog.d/6905.doc delete mode 100644 changelog.d/6906.doc delete mode 100644 changelog.d/6907.doc delete mode 100644 changelog.d/6909.doc delete mode 100644 changelog.d/6915.misc delete mode 100644 changelog.d/6918.docker delete mode 100644 changelog.d/6919.misc delete mode 100644 changelog.d/6920.misc delete mode 100644 changelog.d/6921.docker delete mode 100644 changelog.d/6936.misc delete mode 100644 changelog.d/6937.misc delete mode 100644 changelog.d/6938.doc delete mode 100644 changelog.d/6939.feature delete mode 100644 changelog.d/6940.doc delete mode 100644 changelog.d/6945.bugfix delete mode 100644 changelog.d/6947.misc delete mode 100644 changelog.d/6948.feature delete mode 100644 changelog.d/6949.feature delete mode 100644 changelog.d/6950.misc delete mode 100644 changelog.d/6951.misc diff --git a/CHANGES.md b/CHANGES.md index 37b650a848..4032db792e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,76 @@ +Synapse 1.11.0rc1 (2020-02-19) +============================== + +Features +-------- + +- Admin API to add or modify threepids of user accounts. ([\#6769](https://github.com/matrix-org/synapse/issues/6769)) +- Limit the number of events that can be requested by the backfill federation API to 100. ([\#6864](https://github.com/matrix-org/synapse/issues/6864)) +- Add ability to run some group APIs on workers. ([\#6866](https://github.com/matrix-org/synapse/issues/6866)) +- Reject device display names over 100 characters in length. ([\#6882](https://github.com/matrix-org/synapse/issues/6882)) +- Add ability to route federation user device queries to workers. ([\#6873](https://github.com/matrix-org/synapse/issues/6873)) +- The result of a user directory search can now be filtered via the spam checker. ([\#6888](https://github.com/matrix-org/synapse/issues/6888)) +- Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949)) +- Stop sending `m.room.alias` events wheng adding / removing aliases. Check `alt_aliases` in the latest `m.room.canonical_alias` event when deleting an alias. ([\#6904](https://github.com/matrix-org/synapse/issues/6904)) + + +Bugfixes +-------- + +- Fixed third party event rules function `on_create_room`'s return value being ignored. ([\#6781](https://github.com/matrix-org/synapse/issues/6781)) +- Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. ([\#6825](https://github.com/matrix-org/synapse/issues/6825)) +- Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. ([\#6849](https://github.com/matrix-org/synapse/issues/6849)) +- Fix errors from logging in the purge jobs related to the message retention policies support. ([\#6945](https://github.com/matrix-org/synapse/issues/6945)) +- Return a 404 instead of 200 for querying information of a non-existant user through the admin API. ([\#6901](https://github.com/matrix-org/synapse/issues/6901)) + + +Updates to the Docker image +--------------------------- + +- The deprecated "generate-config-on-the-fly" mode is no longer supported. ([\#6918](https://github.com/matrix-org/synapse/issues/6918)) + + +Improved Documentation +---------------------- + +- Add details of PR merge strategy to contributing docs. ([\#6846](https://github.com/matrix-org/synapse/issues/6846)) +- Spell out that the last event sent to a room won't be deleted by a purge. ([\#6891](https://github.com/matrix-org/synapse/issues/6891)) +- Update Synapse's documentation to warn about the deprecation of ACME v1. ([\#6905](https://github.com/matrix-org/synapse/issues/6905), [\#6907](https://github.com/matrix-org/synapse/issues/6907), [\#6909](https://github.com/matrix-org/synapse/issues/6909)) +- Add documentation for the spam checker. ([\#6906](https://github.com/matrix-org/synapse/issues/6906)) +- Fix worker docs to point `/publicised_groups` API correctly. ([\#6938](https://github.com/matrix-org/synapse/issues/6938)) +- Clean up and update docs on setting up federation. ([\#6940](https://github.com/matrix-org/synapse/issues/6940)) +- Add a warning about indentation to generated configuration files. ([\#6920](https://github.com/matrix-org/synapse/issues/6920)) +- Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. ([\#6921](https://github.com/matrix-org/synapse/issues/6921)) + + +Deprecations and Removals +------------------------- + +- Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. ([\#6877](https://github.com/matrix-org/synapse/issues/6877)) + + +Internal Changes +---------------- + +- Add type hints to `SyncHandler`. ([\#6821](https://github.com/matrix-org/synapse/issues/6821)) +- Refactoring work in preparation for changing the event redaction algorithm. ([\#6823](https://github.com/matrix-org/synapse/issues/6823), [\#6827](https://github.com/matrix-org/synapse/issues/6827), [\#6854](https://github.com/matrix-org/synapse/issues/6854), [\#6856](https://github.com/matrix-org/synapse/issues/6856), [\#6857](https://github.com/matrix-org/synapse/issues/6857), [\#6858](https://github.com/matrix-org/synapse/issues/6858)) +- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834)) +- Fix stacktraces when using `ObservableDeferred` and async/await. ([\#6836](https://github.com/matrix-org/synapse/issues/6836)) +- Port much of `synapse.handlers.federation` to async/await. ([\#6837](https://github.com/matrix-org/synapse/issues/6837), [\#6840](https://github.com/matrix-org/synapse/issues/6840)) +- Populate `rooms.room_version` database column at startup, rather than in a background update. ([\#6847](https://github.com/matrix-org/synapse/issues/6847)) +- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855)) +- Reduce amount we log at `INFO` level. ([\#6833](https://github.com/matrix-org/synapse/issues/6833), [\#6862](https://github.com/matrix-org/synapse/issues/6862)) +- Remove unused `get_room_stats_state` method. ([\#6869](https://github.com/matrix-org/synapse/issues/6869)) +- Add typing to `synapse.federation.sender` and port to async/await. ([\#6871](https://github.com/matrix-org/synapse/issues/6871)) +- Refactor _EventInternalMetadata object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872)) +- Add an additional entry to the SyTest blacklist for worker mode. ([\#6883](https://github.com/matrix-org/synapse/issues/6883)) +- Fix the use of sed in the linting scripts when using BSD sed. ([\#6887](https://github.com/matrix-org/synapse/issues/6887)) +- Add type hints to the spam checker module. ([\#6915](https://github.com/matrix-org/synapse/issues/6915)) +- Convert the directory handler tests to use HomeserverTestCase. ([\#6919](https://github.com/matrix-org/synapse/issues/6919)) +- Increase DB/CPU perf of `_is_server_still_joined` check. ([\#6936](https://github.com/matrix-org/synapse/issues/6936)) +- Tiny optimisation for incoming HTTP request dispatch. ([\#6950](https://github.com/matrix-org/synapse/issues/6950)) + + Synapse 1.10.1 (2020-02-17) =========================== diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature deleted file mode 100644 index 8a60e12907..0000000000 --- a/changelog.d/6769.feature +++ /dev/null @@ -1 +0,0 @@ -Admin API to add or modify threepids of user accounts. \ No newline at end of file diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix deleted file mode 100644 index 47cd671bff..0000000000 --- a/changelog.d/6781.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed third party event rules function `on_create_room`'s return value being ignored. diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc deleted file mode 100644 index 1d5265d5e2..0000000000 --- a/changelog.d/6821.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `SyncHandler`. diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6823.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6825.bugfix b/changelog.d/6825.bugfix deleted file mode 100644 index d3cacd6d9a..0000000000 --- a/changelog.d/6825.bugfix +++ /dev/null @@ -1 +0,0 @@ -Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. \ No newline at end of file diff --git a/changelog.d/6827.misc b/changelog.d/6827.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6827.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc deleted file mode 100644 index 8a0605f90b..0000000000 --- a/changelog.d/6833.misc +++ /dev/null @@ -1 +0,0 @@ -Reducing log level to DEBUG for synapse.storage.TIME. diff --git a/changelog.d/6834.misc b/changelog.d/6834.misc deleted file mode 100644 index 79acebe516..0000000000 --- a/changelog.d/6834.misc +++ /dev/null @@ -1 +0,0 @@ -Change the default power levels of invites, tombstones and server ACLs for new rooms. \ No newline at end of file diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc deleted file mode 100644 index 232488e1e5..0000000000 --- a/changelog.d/6836.misc +++ /dev/null @@ -1 +0,0 @@ -Fix stacktraces when using `ObservableDeferred` and async/await. diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc deleted file mode 100644 index 0496f12de8..0000000000 --- a/changelog.d/6837.misc +++ /dev/null @@ -1 +0,0 @@ -Port much of `synapse.handlers.federation` to async/await. diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc deleted file mode 100644 index 0496f12de8..0000000000 --- a/changelog.d/6840.misc +++ /dev/null @@ -1 +0,0 @@ -Port much of `synapse.handlers.federation` to async/await. diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix deleted file mode 100644 index e84aa1029f..0000000000 --- a/changelog.d/6844.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an issue with cross-signing where device signatures were not sent to remote servers. diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc deleted file mode 100644 index ad69d608c0..0000000000 --- a/changelog.d/6846.doc +++ /dev/null @@ -1 +0,0 @@ -Add details of PR merge strategy to contributing docs. \ No newline at end of file diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc deleted file mode 100644 index 094e911adb..0000000000 --- a/changelog.d/6847.misc +++ /dev/null @@ -1 +0,0 @@ -Populate `rooms.room_version` database column at startup, rather than in a background update. diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix deleted file mode 100644 index d928a26ec6..0000000000 --- a/changelog.d/6849.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6854.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc deleted file mode 100644 index 904361ddfb..0000000000 --- a/changelog.d/6855.misc +++ /dev/null @@ -1 +0,0 @@ -Update pip install directiosn in readme to avoid error when using zsh. diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6856.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6857.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6858.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc deleted file mode 100644 index 83626d2939..0000000000 --- a/changelog.d/6862.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce amount we log at `INFO` level. diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc deleted file mode 100644 index d24eb68460..0000000000 --- a/changelog.d/6864.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the number of events that can be requested by the backfill federation API to 100. diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature deleted file mode 100644 index 256feab6ff..0000000000 --- a/changelog.d/6866.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to run some group APIs on workers. diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc deleted file mode 100644 index 14f88f9bb7..0000000000 --- a/changelog.d/6869.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused `get_room_stats_state` method. diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc deleted file mode 100644 index 5161af9983..0000000000 --- a/changelog.d/6871.misc +++ /dev/null @@ -1 +0,0 @@ -Add typing to `synapse.federation.sender` and port to async/await. diff --git a/changelog.d/6872.misc b/changelog.d/6872.misc deleted file mode 100644 index 215a0c82c3..0000000000 --- a/changelog.d/6872.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor _EventInternalMetadata object to improve type safety. diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature deleted file mode 100644 index bbedf8f7f0..0000000000 --- a/changelog.d/6873.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to route federation user device queries to workers. diff --git a/changelog.d/6877.removal b/changelog.d/6877.removal deleted file mode 100644 index 9545e31fbe..0000000000 --- a/changelog.d/6877.removal +++ /dev/null @@ -1 +0,0 @@ -Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc deleted file mode 100644 index e8382e36ae..0000000000 --- a/changelog.d/6882.misc +++ /dev/null @@ -1 +0,0 @@ -Reject device display names over 100 characters in length. diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc deleted file mode 100644 index e0837d7987..0000000000 --- a/changelog.d/6883.misc +++ /dev/null @@ -1 +0,0 @@ -Add an additional entry to the SyTest blacklist for worker mode. diff --git a/changelog.d/6887.misc b/changelog.d/6887.misc deleted file mode 100644 index b351d47c7b..0000000000 --- a/changelog.d/6887.misc +++ /dev/null @@ -1 +0,0 @@ -Fix the use of sed in the linting scripts when using BSD sed. diff --git a/changelog.d/6888.feature b/changelog.d/6888.feature deleted file mode 100644 index 1b7ac0c823..0000000000 --- a/changelog.d/6888.feature +++ /dev/null @@ -1 +0,0 @@ -The result of a user directory search can now be filtered via the spam checker. diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc deleted file mode 100644 index 2f46c385b7..0000000000 --- a/changelog.d/6891.doc +++ /dev/null @@ -1 +0,0 @@ -Spell out that the last event sent to a room won't be deleted by a purge. diff --git a/changelog.d/6901.misc b/changelog.d/6901.misc deleted file mode 100644 index b2f12bbe86..0000000000 --- a/changelog.d/6901.misc +++ /dev/null @@ -1 +0,0 @@ -Return a 404 instead of 200 for querying information of a non-existant user through the admin API. \ No newline at end of file diff --git a/changelog.d/6904.removal b/changelog.d/6904.removal deleted file mode 100644 index a5cc0c3605..0000000000 --- a/changelog.d/6904.removal +++ /dev/null @@ -1 +0,0 @@ -Stop sending alias events during adding / removing aliases. Check alt_aliases in the latest canonical aliases event when deleting an alias. diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc deleted file mode 100644 index be0e698af8..0000000000 --- a/changelog.d/6905.doc +++ /dev/null @@ -1 +0,0 @@ -Update Synapse's documentation to warn about the deprecation of ACME v1. diff --git a/changelog.d/6906.doc b/changelog.d/6906.doc deleted file mode 100644 index 053b2436ae..0000000000 --- a/changelog.d/6906.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation for the spam checker. diff --git a/changelog.d/6907.doc b/changelog.d/6907.doc deleted file mode 100644 index be0e698af8..0000000000 --- a/changelog.d/6907.doc +++ /dev/null @@ -1 +0,0 @@ -Update Synapse's documentation to warn about the deprecation of ACME v1. diff --git a/changelog.d/6909.doc b/changelog.d/6909.doc deleted file mode 100644 index be0e698af8..0000000000 --- a/changelog.d/6909.doc +++ /dev/null @@ -1 +0,0 @@ -Update Synapse's documentation to warn about the deprecation of ACME v1. diff --git a/changelog.d/6915.misc b/changelog.d/6915.misc deleted file mode 100644 index 3a181ef243..0000000000 --- a/changelog.d/6915.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to the spam checker module. diff --git a/changelog.d/6918.docker b/changelog.d/6918.docker deleted file mode 100644 index cc2db5e071..0000000000 --- a/changelog.d/6918.docker +++ /dev/null @@ -1 +0,0 @@ -The deprecated "generate-config-on-the-fly" mode is no longer supported. diff --git a/changelog.d/6919.misc b/changelog.d/6919.misc deleted file mode 100644 index aa2cd89998..0000000000 --- a/changelog.d/6919.misc +++ /dev/null @@ -1 +0,0 @@ -Convert the directory handler tests to use HomeserverTestCase. diff --git a/changelog.d/6920.misc b/changelog.d/6920.misc deleted file mode 100644 index d333add990..0000000000 --- a/changelog.d/6920.misc +++ /dev/null @@ -1 +0,0 @@ -Add a warning about indentation to generated configuration files. diff --git a/changelog.d/6921.docker b/changelog.d/6921.docker deleted file mode 100644 index 152e723339..0000000000 --- a/changelog.d/6921.docker +++ /dev/null @@ -1 +0,0 @@ -Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. diff --git a/changelog.d/6936.misc b/changelog.d/6936.misc deleted file mode 100644 index 9400725017..0000000000 --- a/changelog.d/6936.misc +++ /dev/null @@ -1 +0,0 @@ -Increase DB/CPU perf of `_is_server_still_joined` check. diff --git a/changelog.d/6937.misc b/changelog.d/6937.misc deleted file mode 100644 index 6d00e58654..0000000000 --- a/changelog.d/6937.misc +++ /dev/null @@ -1 +0,0 @@ -Increase perf of `get_auth_chain_ids` used in state res v2. diff --git a/changelog.d/6938.doc b/changelog.d/6938.doc deleted file mode 100644 index 117f76f48a..0000000000 --- a/changelog.d/6938.doc +++ /dev/null @@ -1 +0,0 @@ -Fix worker docs to point `/publicised_groups` API correctly. diff --git a/changelog.d/6939.feature b/changelog.d/6939.feature deleted file mode 100644 index 40fe7fc9a9..0000000000 --- a/changelog.d/6939.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/changelog.d/6940.doc b/changelog.d/6940.doc deleted file mode 100644 index 8c75f48d3d..0000000000 --- a/changelog.d/6940.doc +++ /dev/null @@ -1 +0,0 @@ -Clean up and update docs on setting up federation. diff --git a/changelog.d/6945.bugfix b/changelog.d/6945.bugfix deleted file mode 100644 index 8561be16a4..0000000000 --- a/changelog.d/6945.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix errors from logging in the purge jobs related to the message retention policies support. diff --git a/changelog.d/6947.misc b/changelog.d/6947.misc deleted file mode 100644 index 6d00e58654..0000000000 --- a/changelog.d/6947.misc +++ /dev/null @@ -1 +0,0 @@ -Increase perf of `get_auth_chain_ids` used in state res v2. diff --git a/changelog.d/6948.feature b/changelog.d/6948.feature deleted file mode 100644 index 40fe7fc9a9..0000000000 --- a/changelog.d/6948.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/changelog.d/6949.feature b/changelog.d/6949.feature deleted file mode 100644 index 40fe7fc9a9..0000000000 --- a/changelog.d/6949.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/changelog.d/6950.misc b/changelog.d/6950.misc deleted file mode 100644 index 1c88936b8b..0000000000 --- a/changelog.d/6950.misc +++ /dev/null @@ -1 +0,0 @@ -Tiny optimisation for incoming HTTP request dispatch. diff --git a/changelog.d/6951.misc b/changelog.d/6951.misc deleted file mode 100644 index 378f52f0a7..0000000000 --- a/changelog.d/6951.misc +++ /dev/null @@ -1 +0,0 @@ -Revert #6937. diff --git a/synapse/__init__.py b/synapse/__init__.py index 8313f177d2..076a297b87 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.1" +__version__ = "1.11.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 0001e8397e58906f317361f1548be61f41064962 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 19 Feb 2020 13:54:05 +0000 Subject: [PATCH 159/382] update changes.md --- CHANGES.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4032db792e..fabf909fa3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,12 +7,12 @@ Features - Admin API to add or modify threepids of user accounts. ([\#6769](https://github.com/matrix-org/synapse/issues/6769)) - Limit the number of events that can be requested by the backfill federation API to 100. ([\#6864](https://github.com/matrix-org/synapse/issues/6864)) - Add ability to run some group APIs on workers. ([\#6866](https://github.com/matrix-org/synapse/issues/6866)) -- Reject device display names over 100 characters in length. ([\#6882](https://github.com/matrix-org/synapse/issues/6882)) +- Reject device display names over 100 characters in length to prevent abuse. ([\#6882](https://github.com/matrix-org/synapse/issues/6882)) - Add ability to route federation user device queries to workers. ([\#6873](https://github.com/matrix-org/synapse/issues/6873)) - The result of a user directory search can now be filtered via the spam checker. ([\#6888](https://github.com/matrix-org/synapse/issues/6888)) -- Implement `GET /_matrix/client/r0/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949)) +- Implement new `GET /_matrix/client/unstable/org.matrix.msc2432/rooms/{roomId}/aliases` endpoint as per [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#6939](https://github.com/matrix-org/synapse/issues/6939), [\#6948](https://github.com/matrix-org/synapse/issues/6948), [\#6949](https://github.com/matrix-org/synapse/issues/6949)) - Stop sending `m.room.alias` events wheng adding / removing aliases. Check `alt_aliases` in the latest `m.room.canonical_alias` event when deleting an alias. ([\#6904](https://github.com/matrix-org/synapse/issues/6904)) - +- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834)) Bugfixes -------- @@ -41,6 +41,7 @@ Improved Documentation - Clean up and update docs on setting up federation. ([\#6940](https://github.com/matrix-org/synapse/issues/6940)) - Add a warning about indentation to generated configuration files. ([\#6920](https://github.com/matrix-org/synapse/issues/6920)) - Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. ([\#6921](https://github.com/matrix-org/synapse/issues/6921)) +- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855)) Deprecations and Removals @@ -54,15 +55,13 @@ Internal Changes - Add type hints to `SyncHandler`. ([\#6821](https://github.com/matrix-org/synapse/issues/6821)) - Refactoring work in preparation for changing the event redaction algorithm. ([\#6823](https://github.com/matrix-org/synapse/issues/6823), [\#6827](https://github.com/matrix-org/synapse/issues/6827), [\#6854](https://github.com/matrix-org/synapse/issues/6854), [\#6856](https://github.com/matrix-org/synapse/issues/6856), [\#6857](https://github.com/matrix-org/synapse/issues/6857), [\#6858](https://github.com/matrix-org/synapse/issues/6858)) -- Change the default power levels of invites, tombstones and server ACLs for new rooms. ([\#6834](https://github.com/matrix-org/synapse/issues/6834)) - Fix stacktraces when using `ObservableDeferred` and async/await. ([\#6836](https://github.com/matrix-org/synapse/issues/6836)) - Port much of `synapse.handlers.federation` to async/await. ([\#6837](https://github.com/matrix-org/synapse/issues/6837), [\#6840](https://github.com/matrix-org/synapse/issues/6840)) - Populate `rooms.room_version` database column at startup, rather than in a background update. ([\#6847](https://github.com/matrix-org/synapse/issues/6847)) -- Update pip install directions in readme to avoid error when using zsh. ([\#6855](https://github.com/matrix-org/synapse/issues/6855)) - Reduce amount we log at `INFO` level. ([\#6833](https://github.com/matrix-org/synapse/issues/6833), [\#6862](https://github.com/matrix-org/synapse/issues/6862)) - Remove unused `get_room_stats_state` method. ([\#6869](https://github.com/matrix-org/synapse/issues/6869)) - Add typing to `synapse.federation.sender` and port to async/await. ([\#6871](https://github.com/matrix-org/synapse/issues/6871)) -- Refactor _EventInternalMetadata object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872)) +- Refactor `_EventInternalMetadata` object to improve type safety. ([\#6872](https://github.com/matrix-org/synapse/issues/6872)) - Add an additional entry to the SyTest blacklist for worker mode. ([\#6883](https://github.com/matrix-org/synapse/issues/6883)) - Fix the use of sed in the linting scripts when using BSD sed. ([\#6887](https://github.com/matrix-org/synapse/issues/6887)) - Add type hints to the spam checker module. ([\#6915](https://github.com/matrix-org/synapse/issues/6915)) From 2b37eabca1e9355e2e2ab8f65bbdda12431ecc28 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 15:04:47 +0000 Subject: [PATCH 160/382] Reduce auth chains fetched during v2 state res. (#6952) The state res v2 algorithm only cares about the difference between auth chains, so we can pass in the known common state to the `get_auth_chain` storage function so that it can ignore those events. --- changelog.d/6952.misc | 1 + synapse/state/__init__.py | 15 ++++++---- synapse/state/v2.py | 2 +- .../data_stores/main/event_federation.py | 28 +++++++++++++++---- tests/state/test_v2.py | 6 ++-- 5 files changed, 39 insertions(+), 13 deletions(-) create mode 100644 changelog.d/6952.misc diff --git a/changelog.d/6952.misc b/changelog.d/6952.misc new file mode 100644 index 0000000000..e26dc5cab8 --- /dev/null +++ b/changelog.d/6952.misc @@ -0,0 +1 @@ +Improve perf of v2 state res for large rooms. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fdd6bef6b4..df7a4f6a89 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -16,7 +16,7 @@ import logging from collections import namedtuple -from typing import Dict, Iterable, List, Optional +from typing import Dict, Iterable, List, Optional, Set from six import iteritems, itervalues @@ -662,7 +662,7 @@ class StateResolutionStore(object): allow_rejected=allow_rejected, ) - def get_auth_chain(self, event_ids): + def get_auth_chain(self, event_ids: List[str], ignore_events: Set[str]): """Gets the full auth chain for a set of events (including rejected events). @@ -674,11 +674,16 @@ class StateResolutionStore(object): presence of rejected events Args: - event_ids (list): The event IDs of the events to fetch the auth - chain for. Must be state events. + event_ids: The event IDs of the events to fetch the auth chain for. + Must be state events. + ignore_events: Set of events to exclude from the returned auth + chain. + Returns: Deferred[list[str]]: List of event IDs of the auth chain. """ - return self.store.get_auth_chain_ids(event_ids, include_given=True) + return self.store.get_auth_chain_ids( + event_ids, include_given=True, ignore_events=ignore_events, + ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 531018c6a5..75fe58305a 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -248,7 +248,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): and eid not in common ) - auth_chain = yield state_res_store.get_auth_chain(auth_ids) + auth_chain = yield state_res_store.get_auth_chain(auth_ids, common) auth_ids.update(auth_chain) auth_sets.append(auth_ids) diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 60c67457b4..e16da2577d 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -14,6 +14,7 @@ # limitations under the License. import itertools import logging +from typing import List, Optional, Set from six.moves import range from six.moves.queue import Empty, PriorityQueue @@ -46,21 +47,37 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas event_ids, include_given=include_given ).addCallback(self.get_events_as_list) - def get_auth_chain_ids(self, event_ids, include_given=False): + def get_auth_chain_ids( + self, + event_ids: List[str], + include_given: bool = False, + ignore_events: Optional[Set[str]] = None, + ): """Get auth events for given event_ids. The events *must* be state events. Args: - event_ids (list): state events - include_given (bool): include the given events in result + event_ids: state events + include_given: include the given events in result + ignore_events: Set of events to exclude from the returned auth + chain. This is useful if the caller will just discard the + given events anyway, and saves us from figuring out their auth + chains if not required. Returns: list of event_ids """ return self.db.runInteraction( - "get_auth_chain_ids", self._get_auth_chain_ids_txn, event_ids, include_given + "get_auth_chain_ids", + self._get_auth_chain_ids_txn, + event_ids, + include_given, + ignore_events, ) - def _get_auth_chain_ids_txn(self, txn, event_ids, include_given): + def _get_auth_chain_ids_txn(self, txn, event_ids, include_given, ignore_events): + if ignore_events is None: + ignore_events = set() + if include_given: results = set(event_ids) else: @@ -80,6 +97,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas txn.execute(base_sql + clause, list(args)) new_front.update([r[0] for r in txn]) + new_front -= ignore_events new_front -= results front = new_front diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 5bafad9f19..5059ade850 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -603,7 +603,7 @@ class TestStateResolutionStore(object): return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} - def get_auth_chain(self, event_ids): + def get_auth_chain(self, event_ids, ignore_events): """Gets the full auth chain for a set of events (including rejected events). @@ -617,6 +617,8 @@ class TestStateResolutionStore(object): Args: event_ids (list): The event IDs of the events to fetch the auth chain for. Must be state events. + ignore_events: Set of events to exclude from the returned auth + chain. Returns: Deferred[list[str]]: List of event IDs of the auth chain. @@ -627,7 +629,7 @@ class TestStateResolutionStore(object): stack = list(event_ids) while stack: event_id = stack.pop() - if event_id in result: + if event_id in result or event_id in ignore_events: continue result.add(event_id) From fc87d2ffb39ca17065e19bd42ef25f1c84862d2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 15:09:00 +0000 Subject: [PATCH 161/382] Freeze allocated objects on startup. (#6953) This may make gc go a bit faster as the gc will know things like caches/data stores etc. are frozen without having to check. --- changelog.d/6953.misc | 1 + synapse/app/_base.py | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelog.d/6953.misc diff --git a/changelog.d/6953.misc b/changelog.d/6953.misc new file mode 100644 index 0000000000..0ab52041cf --- /dev/null +++ b/changelog.d/6953.misc @@ -0,0 +1 @@ +Reduce time spent doing GC by freezing objects on startup. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 0e8b467a3e..109b1e2fb5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -279,6 +279,15 @@ def start(hs, listeners=None): setup_sentry(hs) setup_sdnotify(hs) + + # We now freeze all allocated objects in the hopes that (almost) + # everything currently allocated are things that will be used for the + # rest of time. Doing so means less work each GC (hopefully). + # + # This only works on Python 3.7 + if sys.version_info >= (3, 7): + gc.collect() + gc.freeze() except Exception: traceback.print_exc(file=sys.stderr) reactor = hs.get_reactor() From 7b7c3cedf2fdc0d0c05bbc651e0ff5b59921c3a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 Feb 2020 15:47:11 +0000 Subject: [PATCH 162/382] Minor perf fixes to `get_auth_chain_ids`. --- changelog.d/6954.misc | 1 + synapse/storage/data_stores/main/event_federation.py | 10 ++++------ synapse/storage/database.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6954.misc diff --git a/changelog.d/6954.misc b/changelog.d/6954.misc new file mode 100644 index 0000000000..8b84ce2f19 --- /dev/null +++ b/changelog.d/6954.misc @@ -0,0 +1 @@ +Minor perf fixes to `get_auth_chain_ids`. diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index e16da2577d..750ec1b70d 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -16,7 +16,6 @@ import itertools import logging from typing import List, Optional, Set -from six.moves import range from six.moves.queue import Empty, PriorityQueue from twisted.internet import defer @@ -28,6 +27,7 @@ from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.signatures import SignatureWorkerStore from synapse.storage.database import Database from synapse.util.caches.descriptors import cached +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) @@ -88,14 +88,12 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas front = set(event_ids) while front: new_front = set() - front_list = list(front) - chunks = [front_list[x : x + 100] for x in range(0, len(front), 100)] - for chunk in chunks: + for chunk in batch_iter(front, 100): clause, args = make_in_list_sql_clause( txn.database_engine, "event_id", chunk ) - txn.execute(base_sql + clause, list(args)) - new_front.update([r[0] for r in txn]) + txn.execute(base_sql + clause, args) + new_front.update(r[0] for r in txn) new_front -= ignore_events new_front -= results diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 3eeb2f7c04..6dcb5c04da 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1504,7 +1504,7 @@ class Database(object): def make_in_list_sql_clause( database_engine, column: str, iterable: Iterable -) -> Tuple[str, Iterable]: +) -> Tuple[str, list]: """Returns an SQL clause that checks the given column is in the iterable. On SQLite this expands to `column IN (?, ?, ...)`, whereas on Postgres From 4fb5f4d0ce0444d1d3c2f0b9576b5a91b6307372 Mon Sep 17 00:00:00 2001 From: Ruben Barkow-Kuder Date: Thu, 20 Feb 2020 11:37:57 +0100 Subject: [PATCH 163/382] Add some clarifications to README.md in the database schema directory. (#6615) Signed-off-by: Ruben Barkow-Kuder --- changelog.d/6615.misc | 1 + .../main/schema/full_schemas/README.md | 24 ++++++++++++------- 2 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 changelog.d/6615.misc diff --git a/changelog.d/6615.misc b/changelog.d/6615.misc new file mode 100644 index 0000000000..9f93152565 --- /dev/null +++ b/changelog.d/6615.misc @@ -0,0 +1 @@ +Add some clarifications to `README.md` in the database schema directory. diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/data_stores/main/schema/full_schemas/README.md index bbd3f18604..c00f287190 100644 --- a/synapse/storage/data_stores/main/schema/full_schemas/README.md +++ b/synapse/storage/data_stores/main/schema/full_schemas/README.md @@ -1,13 +1,21 @@ -# Building full schema dumps +# Synapse Database Schemas -These schemas need to be made from a database that has had all background updates run. +These schemas are used as a basis to create brand new Synapse databases, on both +SQLite3 and Postgres. -To do so, use `scripts-dev/make_full_schema.sh`. This will produce -`full.sql.postgres ` and `full.sql.sqlite` files. +## Building full schema dumps + +If you want to recreate these schemas, they need to be made from a database that +has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce new +`full.sql.postgres ` and `full.sql.sqlite` files. Ensure postgres is installed and your user has the ability to run bash commands -such as `createdb`. +such as `createdb`, then call -``` -./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ -``` + ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ + +There are currently two folders with full-schema snapshots. `16` is a snapshot +from 2015, for historical reference. The other contains the most recent full +schema snapshot. From a90d0dc5c2650eea298f8d554ca74c2cf4c097eb Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Thu, 20 Feb 2020 09:59:00 -0500 Subject: [PATCH 164/382] don't insert into the device table for remote cross-signing keys (#6956) --- changelog.d/6956.misc | 1 + .../data_stores/main/end_to_end_keys.py | 33 ++++++++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) create mode 100644 changelog.d/6956.misc diff --git a/changelog.d/6956.misc b/changelog.d/6956.misc new file mode 100644 index 0000000000..5cb0894182 --- /dev/null +++ b/changelog.d/6956.misc @@ -0,0 +1 @@ +Don't record remote cross-signing keys in the `devices` table. diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py index e551606f9d..001a53f9b4 100644 --- a/synapse/storage/data_stores/main/end_to_end_keys.py +++ b/synapse/storage/data_stores/main/end_to_end_keys.py @@ -680,11 +680,6 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): 'user_signing' for a user-signing key key (dict): the key data """ - # the cross-signing keys need to occupy the same namespace as devices, - # since signatures are identified by device ID. So add an entry to the - # device table to make sure that we don't have a collision with device - # IDs - # the 'key' dict will look something like: # { # "user_id": "@alice:example.com", @@ -701,16 +696,24 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): # The "keys" property must only have one entry, which will be the public # key, so we just grab the first value in there pubkey = next(iter(key["keys"].values())) - self.db.simple_insert_txn( - txn, - "devices", - values={ - "user_id": user_id, - "device_id": pubkey, - "display_name": key_type + " signing key", - "hidden": True, - }, - ) + + # The cross-signing keys need to occupy the same namespace as devices, + # since signatures are identified by device ID. So add an entry to the + # device table to make sure that we don't have a collision with device + # IDs. + # We only need to do this for local users, since remote servers should be + # responsible for checking this for their own users. + if self.hs.is_mine_id(user_id): + self.db.simple_insert_txn( + txn, + "devices", + values={ + "user_id": user_id, + "device_id": pubkey, + "display_name": key_type + " signing key", + "hidden": True, + }, + ) # and finally, store the key itself with self._cross_signing_id_gen.get_next() as stream_id: From 99eed85a77acdc25b68f4a7b6447a5ffaecebb0d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 20 Feb 2020 16:24:04 -0500 Subject: [PATCH 165/382] Do not send alias events when creating / upgrading a room (#6941) Stop emitting room alias update events during room creation/upgrade. --- changelog.d/6941.removal | 1 + synapse/handlers/directory.py | 19 ++---------------- synapse/handlers/room.py | 36 +++++++++++++---------------------- 3 files changed, 16 insertions(+), 40 deletions(-) create mode 100644 changelog.d/6941.removal diff --git a/changelog.d/6941.removal b/changelog.d/6941.removal new file mode 100644 index 0000000000..8573be84b3 --- /dev/null +++ b/changelog.d/6941.removal @@ -0,0 +1 @@ +Stop sending m.room.aliases events during room creation and upgrade. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index db2104c5f6..921d887b24 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -14,6 +14,7 @@ # limitations under the License. +import collections import logging import string from typing import List @@ -282,22 +283,6 @@ class DirectoryHandler(BaseHandler): Codes.NOT_FOUND, ) - @defer.inlineCallbacks - def send_room_alias_update_event(self, requester, room_id): - aliases = yield self.store.get_aliases_for_room(room_id) - - yield self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Aliases, - "state_key": self.hs.hostname, - "room_id": room_id, - "sender": requester.user.to_string(), - "content": {"aliases": aliases}, - }, - ratelimit=False, - ) - @defer.inlineCallbacks def _update_canonical_alias(self, requester, user_id, room_id, room_alias): """ @@ -326,7 +311,7 @@ class DirectoryHandler(BaseHandler): alt_aliases = content.pop("alt_aliases", None) # If the aliases are not a list (or not found) do not attempt to modify # the list. - if isinstance(alt_aliases, list): + if isinstance(alt_aliases, collections.Sequence): send_update = True alt_aliases = [alias for alias in alt_aliases if alias != alias_str] if alt_aliases: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 49ec2f48bc..76e8f61b74 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -149,7 +149,9 @@ class RoomCreationHandler(BaseHandler): return ret @defer.inlineCallbacks - def _upgrade_room(self, requester, old_room_id, new_version): + def _upgrade_room( + self, requester: Requester, old_room_id: str, new_version: RoomVersion + ): user_id = requester.user.to_string() # start by allocating a new room id @@ -448,19 +450,21 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def _move_aliases_to_new_room( - self, requester, old_room_id, new_room_id, old_room_state + self, + requester: Requester, + old_room_id: str, + new_room_id: str, + old_room_state: StateMap[str], ): directory_handler = self.hs.get_handlers().directory_handler aliases = yield self.store.get_aliases_for_room(old_room_id) # check to see if we have a canonical alias. - canonical_alias = None + canonical_alias_event = None canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, "")) if canonical_alias_event_id: canonical_alias_event = yield self.store.get_event(canonical_alias_event_id) - if canonical_alias_event: - canonical_alias = canonical_alias_event.content.get("alias", "") # first we try to remove the aliases from the old room (we suppress sending # the room_aliases event until the end). @@ -488,19 +492,6 @@ class RoomCreationHandler(BaseHandler): if not removed_aliases: return - try: - # this can fail if, for some reason, our user doesn't have perms to send - # m.room.aliases events in the old room (note that we've already checked that - # they have perms to send a tombstone event, so that's not terribly likely). - # - # If that happens, it's regrettable, but we should carry on: it's the same - # as when you remove an alias from the directory normally - it just means that - # the aliases event gets out of sync with the directory - # (cf https://github.com/vector-im/riot-web/issues/2369) - yield directory_handler.send_room_alias_update_event(requester, old_room_id) - except AuthError as e: - logger.warning("Failed to send updated alias event on old room: %s", e) - # we can now add any aliases we successfully removed to the new room. for alias in removed_aliases: try: @@ -517,8 +508,10 @@ class RoomCreationHandler(BaseHandler): # checking module decides it shouldn't, or similar. logger.error("Error adding alias %s to new room: %s", alias, e) + # If a canonical alias event existed for the old room, fire a canonical + # alias event for the new room with a copy of the information. try: - if canonical_alias and (canonical_alias in removed_aliases): + if canonical_alias_event: yield self.event_creation_handler.create_and_send_nonmember_event( requester, { @@ -526,12 +519,10 @@ class RoomCreationHandler(BaseHandler): "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), - "content": {"alias": canonical_alias}, + "content": canonical_alias_event.content, }, ratelimit=False, ) - - yield directory_handler.send_room_alias_update_event(requester, new_room_id) except SynapseError as e: # again I'm not really expecting this to fail, but if it does, I'd rather # we returned the new room to the client at this point. @@ -757,7 +748,6 @@ class RoomCreationHandler(BaseHandler): if room_alias: result["room_alias"] = room_alias.to_string() - yield directory_handler.send_room_alias_update_event(requester, room_id) return result From 8f6d9c4cf0c36180ad26bb84cdbb55b503a942e2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 21 Feb 2020 08:53:01 +0000 Subject: [PATCH 166/382] Small grammar fixes to the ACME v1 deprecation notice (#6944) Some small fixes to the copy in #6907. --- INSTALL.md | 9 ++++----- changelog.d/6944.doc | 1 + synapse/handlers/acme.py | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/6944.doc diff --git a/INSTALL.md b/INSTALL.md index 9fe767704b..aa5eb882bb 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -420,11 +420,10 @@ so, you will need to edit `homeserver.yaml`, as follows: Note that, as pointed out in that document, this feature will not work with installs set up after November 2020. - If you are using your - own certificate, be sure to use a `.pem` file that includes the full - certificate chain including any intermediate certificates (for - instance, if using certbot, use `fullchain.pem` as your certificate, - not `cert.pem`). + If you are using your own certificate, be sure to use a `.pem` file that + includes the full certificate chain including any intermediate certificates + (for instance, if using certbot, use `fullchain.pem` as your certificate, not + `cert.pem`). For a more detailed guide to configuring your server for federation, see [federate.md](docs/federate.md) diff --git a/changelog.d/6944.doc b/changelog.d/6944.doc new file mode 100644 index 0000000000..eb0c534b56 --- /dev/null +++ b/changelog.d/6944.doc @@ -0,0 +1 @@ +Small grammatical fixes to the ACME v1 deprecation notice. \ No newline at end of file diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index 250faa997b..a2d7959abe 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -27,11 +27,11 @@ logger = logging.getLogger(__name__) ACME_REGISTER_FAIL_ERROR = """ -------------------------------------------------------------------------------- -Failed to register with the ACME provider. This is likely happening because the install -is new, and ACME v1 has been deprecated by Let's Encrypt and is disabled for installs set -up after November 2019. -At the moment, Synapse doesn't support ACME v2. For more info and alternative solution, -check out https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 +Failed to register with the ACME provider. This is likely happening because the installation +is new, and ACME v1 has been deprecated by Let's Encrypt and disabled for +new installations since November 2019. +At the moment, Synapse doesn't support ACME v2. For more information and alternative +solutions, please read https://github.com/matrix-org/synapse/blob/master/docs/ACME.md#deprecation-of-acme-v1 --------------------------------------------------------------------------------""" From 9c1b83b0078aa9cc1bb902e14d3f7302625ba099 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Feb 2020 08:56:04 +0000 Subject: [PATCH 167/382] 1.11.0 --- CHANGES.md | 9 +++++++++ changelog.d/6944.doc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6944.doc diff --git a/CHANGES.md b/CHANGES.md index fabf909fa3..ff681762cd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.11.0 (2020-02-21) +=========================== + +Improved Documentation +---------------------- + +- Small grammatical fixes to the ACME v1 deprecation notice. ([\#6944](https://github.com/matrix-org/synapse/issues/6944)) + + Synapse 1.11.0rc1 (2020-02-19) ============================== diff --git a/changelog.d/6944.doc b/changelog.d/6944.doc deleted file mode 100644 index eb0c534b56..0000000000 --- a/changelog.d/6944.doc +++ /dev/null @@ -1 +0,0 @@ -Small grammatical fixes to the ACME v1 deprecation notice. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 90314d36af..fbb44cb94b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.11.0) stable; urgency=medium + + * New synapse release 1.11.0. + + -- Synapse Packaging team Fri, 21 Feb 2020 08:54:34 +0000 + matrix-synapse-py3 (1.10.1) stable; urgency=medium * New synapse release 1.10.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 076a297b87..3406ce634f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.11.0rc1" +__version__ = "1.11.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 0bd8cf435e307a72c91667caff746fec7c233f16 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Feb 2018 13:53:02 +0000 Subject: [PATCH 168/382] Increase MAX_EVENTS_BEHIND for replication clients --- synapse/replication/tcp/streams/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index a8d568b14a..208e8a667b 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -24,7 +24,7 @@ import attr logger = logging.getLogger(__name__) -MAX_EVENTS_BEHIND = 10000 +MAX_EVENTS_BEHIND = 500000 BackfillStreamRow = namedtuple( "BackfillStreamRow", From 1fcb9a1a7ab2cb4833ea6c823e8250199a0b3d95 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 21 Feb 2020 09:06:18 +0000 Subject: [PATCH 169/382] changelog --- changelog.d/6967.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6967.bugfix diff --git a/changelog.d/6967.bugfix b/changelog.d/6967.bugfix new file mode 100644 index 0000000000..b65f80cf1d --- /dev/null +++ b/changelog.d/6967.bugfix @@ -0,0 +1 @@ +Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. From 509e381afa8c656e72f5fef3d651a9819794174a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Feb 2020 07:15:07 -0500 Subject: [PATCH 170/382] Clarify list/set/dict/tuple comprehensions and enforce via flake8 (#6957) Ensure good comprehension hygiene using flake8-comprehensions. --- CONTRIBUTING.md | 2 +- changelog.d/6957.misc | 1 + docs/code_style.md | 2 +- scripts-dev/convert_server_keys.py | 2 +- synapse/app/_base.py | 2 +- synapse/app/federation_sender.py | 4 +- synapse/app/pusher.py | 2 +- synapse/config/server.py | 4 +- synapse/config/tls.py | 2 +- synapse/crypto/keyring.py | 6 +-- synapse/federation/send_queue.py | 4 +- synapse/groups/groups_server.py | 2 +- synapse/handlers/device.py | 2 +- synapse/handlers/directory.py | 4 +- synapse/handlers/federation.py | 18 ++++----- synapse/handlers/presence.py | 6 +-- synapse/handlers/receipts.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/search.py | 8 ++-- synapse/handlers/sync.py | 22 +++++----- synapse/handlers/typing.py | 4 +- synapse/logging/utils.py | 2 +- synapse/metrics/__init__.py | 2 +- synapse/metrics/background_process_metrics.py | 4 +- synapse/push/bulk_push_rule_evaluator.py | 8 ++-- synapse/push/emailpusher.py | 2 +- synapse/push/mailer.py | 20 ++++------ synapse/push/pusherpool.py | 2 +- synapse/rest/admin/_base.py | 4 +- synapse/rest/client/v1/push_rule.py | 6 +-- synapse/rest/client/v1/pusher.py | 4 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v1/_base.py | 40 +++++++++---------- synapse/state/v1.py | 10 ++--- synapse/state/v2.py | 8 ++-- synapse/storage/_base.py | 2 +- synapse/storage/background_updates.py | 2 +- .../storage/data_stores/main/appservice.py | 14 ++++--- .../storage/data_stores/main/client_ips.py | 4 +- synapse/storage/data_stores/main/devices.py | 13 +++--- .../data_stores/main/event_federation.py | 2 +- synapse/storage/data_stores/main/events.py | 8 ++-- .../data_stores/main/events_bg_updates.py | 2 +- .../storage/data_stores/main/events_worker.py | 6 +-- synapse/storage/data_stores/main/push_rule.py | 8 ++-- synapse/storage/data_stores/main/receipts.py | 4 +- .../storage/data_stores/main/roommember.py | 4 +- synapse/storage/data_stores/main/state.py | 8 ++-- synapse/storage/data_stores/main/stream.py | 8 ++-- .../data_stores/main/user_erasure_store.py | 4 +- synapse/storage/data_stores/state/store.py | 4 +- synapse/storage/database.py | 4 +- synapse/storage/persist_events.py | 8 ++-- synapse/storage/prepare_database.py | 6 +-- synapse/util/frozenutils.py | 2 +- synapse/visibility.py | 4 +- tests/config/test_generate.py | 2 +- tests/federation/test_federation_server.py | 2 +- tests/handlers/test_presence.py | 4 +- tests/handlers/test_typing.py | 6 +-- tests/handlers/test_user_directory.py | 12 +++--- tests/push/test_email.py | 6 +-- tests/push/test_http.py | 8 ++-- tests/rest/client/v2_alpha/test_sync.py | 28 +++++++------ tests/storage/test__base.py | 4 +- tests/storage/test_appservice.py | 36 ++++++++--------- tests/storage/test_cleanup_extrems.py | 10 ++--- tests/storage/test_event_metrics.py | 36 ++++++++--------- tests/storage/test_state.py | 2 +- tests/test_state.py | 18 +++------ tests/util/test_stream_change_cache.py | 18 +++------ tox.ini | 1 + 73 files changed, 251 insertions(+), 276 deletions(-) create mode 100644 changelog.d/6957.misc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4b01b6ac8c..253a0ca648 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,7 +60,7 @@ python 3.6 and to install each tool: ``` # Install the dependencies -pip install -U black flake8 isort +pip install -U black flake8 flake8-comprehensions isort # Run the linter script ./scripts-dev/lint.sh diff --git a/changelog.d/6957.misc b/changelog.d/6957.misc new file mode 100644 index 0000000000..4f98030110 --- /dev/null +++ b/changelog.d/6957.misc @@ -0,0 +1 @@ +Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. diff --git a/docs/code_style.md b/docs/code_style.md index 71aecd41f7..6ef6f80290 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -30,7 +30,7 @@ The necessary tools are detailed below. Install `flake8` with: - pip install --upgrade flake8 + pip install --upgrade flake8 flake8-comprehensions Check all application and test code with: diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py index 179be61c30..06b4c1e2ff 100644 --- a/scripts-dev/convert_server_keys.py +++ b/scripts-dev/convert_server_keys.py @@ -103,7 +103,7 @@ def main(): yaml.safe_dump(result, sys.stdout, default_flow_style=False) - rows = list(row for server, json in result.items() for row in rows_v2(server, json)) + rows = [row for server, json in result.items() for row in rows_v2(server, json)] cursor = connection.cursor() cursor.executemany( diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 109b1e2fb5..9ffd23c6df 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -141,7 +141,7 @@ def start_reactor( def quit_with_error(error_string): message_lines = error_string.split("\n") - line_length = max([len(l) for l in message_lines if len(l) < 80]) + 2 + line_length = max(len(l) for l in message_lines if len(l) < 80) + 2 sys.stderr.write("*" * line_length + "\n") for line in message_lines: sys.stderr.write(" %s\n" % (line.rstrip(),)) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 63a91f1177..b7fcf80ddc 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -262,7 +262,7 @@ class FederationSenderHandler(object): # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: - hosts = set(row.destination for row in rows) + hosts = {row.destination for row in rows} for host in hosts: self.federation_sender.send_device_messages(host) @@ -270,7 +270,7 @@ class FederationSenderHandler(object): # The to_device stream includes stuff to be pushed to both local # clients and remote servers, so we ignore entities that start with # '@' (since they'll be local users rather than destinations). - hosts = set(row.entity for row in rows if not row.entity.startswith("@")) + hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index e46b6ac598..84e9f8d5e2 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -158,7 +158,7 @@ class PusherReplicationHandler(ReplicationClientHandler): yield self.pusher_pool.on_new_notifications(token, token) elif stream_name == "receipts": yield self.pusher_pool.on_new_receipts( - token, token, set(row.room_id for row in rows) + token, token, {row.room_id for row in rows} ) except Exception: logger.exception("Error poking pushers") diff --git a/synapse/config/server.py b/synapse/config/server.py index 0ec1b0fadd..7525765fee 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1066,12 +1066,12 @@ KNOWN_RESOURCES = ( def _check_resource_config(listeners): - resource_names = set( + resource_names = { res_name for listener in listeners for res in listener.get("resources", []) for res_name in res.get("names", []) - ) + } for resource in resource_names: if resource not in KNOWN_RESOURCES: diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 97a12d51f6..a65538562b 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -260,7 +260,7 @@ class TlsConfig(Config): crypto.FILETYPE_ASN1, self.tls_certificate ) sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest()) - sha256_fingerprints = set(f["sha256"] for f in self.tls_fingerprints) + sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints} if sha256_fingerprint not in sha256_fingerprints: self.tls_fingerprints.append({"sha256": sha256_fingerprint}) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6fe5a6a26a..983f0ead8c 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -326,9 +326,7 @@ class Keyring(object): verify_requests (list[VerifyJsonRequest]): list of verify requests """ - remaining_requests = set( - (rq for rq in verify_requests if not rq.key_ready.called) - ) + remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called} @defer.inlineCallbacks def do_iterations(): @@ -396,7 +394,7 @@ class Keyring(object): results = yield fetcher.get_keys(missing_keys) - completed = list() + completed = [] for verify_request in remaining_requests: server_name = verify_request.server_name diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 001bb304ae..876fb0e245 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -129,9 +129,9 @@ class FederationRemoteSendQueue(object): for key in keys[:i]: del self.presence_changed[key] - user_ids = set( + user_ids = { user_id for uids in self.presence_changed.values() for user_id in uids - ) + } keys = self.presence_destinations.keys() i = self.presence_destinations.bisect_left(position_to_delete) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index c106abae21..4f0dc0a209 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -608,7 +608,7 @@ class GroupsServerHandler(GroupsServerWorkerHandler): user_results = yield self.store.get_users_in_group( group_id, include_private=True ) - if user_id in [user_result["user_id"] for user_result in user_results]: + if user_id in (user_result["user_id"] for user_result in user_results): raise SynapseError(400, "User already in group") content = { diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 50cea3f378..a514c30714 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -742,6 +742,6 @@ class DeviceListUpdater(object): # We clobber the seen updates since we've re-synced from a given # point. - self._seen_updates[user_id] = set([stream_id]) + self._seen_updates[user_id] = {stream_id} defer.returnValue(result) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 921d887b24..0b23ca919a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -72,7 +72,7 @@ class DirectoryHandler(BaseHandler): # TODO(erikj): Check if there is a current association. if not servers: users = yield self.state.get_current_users_in_room(room_id) - servers = set(get_domain_from_id(u) for u in users) + servers = {get_domain_from_id(u) for u in users} if not servers: raise SynapseError(400, "Failed to get server list") @@ -255,7 +255,7 @@ class DirectoryHandler(BaseHandler): ) users = yield self.state.get_current_users_in_room(room_id) - extra_servers = set(get_domain_from_id(u) for u in users) + extra_servers = {get_domain_from_id(u) for u in users} servers = set(extra_servers) | set(servers) # If this server is in the list of servers, return it first. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index eb20ef4aec..a689065f89 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -659,11 +659,11 @@ class FederationHandler(BaseHandler): # this can happen if a remote server claims that the state or # auth_events at an event in room A are actually events in room B - bad_events = list( + bad_events = [ (event_id, event.room_id) for event_id, event in fetched_events.items() if event.room_id != room_id - ) + ] for bad_event_id, bad_room_id in bad_events: # This is a bogus situation, but since we may only discover it a long time @@ -856,7 +856,7 @@ class FederationHandler(BaseHandler): # Don't bother processing events we already have. seen_events = await self.store.have_events_in_timeline( - set(e.event_id for e in events) + {e.event_id for e in events} ) events = [e for e in events if e.event_id not in seen_events] @@ -866,7 +866,7 @@ class FederationHandler(BaseHandler): event_map = {e.event_id: e for e in events} - event_ids = set(e.event_id for e in events) + event_ids = {e.event_id for e in events} # build a list of events whose prev_events weren't in the batch. # (XXX: this will include events whose prev_events we already have; that doesn't @@ -892,13 +892,13 @@ class FederationHandler(BaseHandler): state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state - required_auth = set( + required_auth = { a_id for event in events + list(state_events.values()) + list(auth_events.values()) for a_id in event.auth_event_ids() - ) + } auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) @@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler): async def on_event_auth(self, event_id: str) -> List[EventBase]: event = await self.store.get_event(event_id) auth = await self.store.get_auth_chain( - [auth_id for auth_id in event.auth_event_ids()], include_given=True + list(event.auth_event_ids()), include_given=True ) return list(auth) @@ -2152,7 +2152,7 @@ class FederationHandler(BaseHandler): # Now get the current auth_chain for the event. local_auth_chain = await self.store.get_auth_chain( - [auth_id for auth_id in event.auth_event_ids()], include_given=True + list(event.auth_event_ids()), include_given=True ) # TODO: Check if we would now reject event_id. If so we need to tell @@ -2654,7 +2654,7 @@ class FederationHandler(BaseHandler): member_handler = self.hs.get_room_member_handler() yield member_handler.send_membership_event(None, event, context) else: - destinations = set(x.split(":", 1)[-1] for x in (sender_user_id, room_id)) + destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)} yield self.federation_client.forward_third_party_invite( destinations, room_id, event_dict ) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 202aa9294f..0d6cf2b008 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -313,7 +313,7 @@ class PresenceHandler(object): notified_presence_counter.inc(len(to_notify)) yield self._persist_and_notify(list(to_notify.values())) - self.unpersisted_users_changes |= set(s.user_id for s in new_states) + self.unpersisted_users_changes |= {s.user_id for s in new_states} self.unpersisted_users_changes -= set(to_notify.keys()) to_federation_ping = { @@ -698,7 +698,7 @@ class PresenceHandler(object): updates = yield self.current_state_for_users(target_user_ids) updates = list(updates.values()) - for user_id in set(target_user_ids) - set(u.user_id for u in updates): + for user_id in set(target_user_ids) - {u.user_id for u in updates}: updates.append(UserPresenceState.default(user_id)) now = self.clock.time_msec() @@ -886,7 +886,7 @@ class PresenceHandler(object): hosts = yield self.state.get_current_hosts_in_room(room_id) # Filter out ourselves. - hosts = set(host for host in hosts if host != self.server_name) + hosts = {host for host in hosts if host != self.server_name} self.federation.send_presence_to_destinations( states=[state], destinations=hosts diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 9283c039e3..8bc100db42 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -94,7 +94,7 @@ class ReceiptsHandler(BaseHandler): # no new receipts return False - affected_room_ids = list(set([r.room_id for r in receipts])) + affected_room_ids = list({r.room_id for r in receipts}) self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids) # Note that the min here shouldn't be relied upon to be accurate. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 76e8f61b74..8ee870f0bb 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -355,7 +355,7 @@ class RoomCreationHandler(BaseHandler): # If so, mark the new room as non-federatable as well creation_content["m.federate"] = False - initial_state = dict() + initial_state = {} # Replicate relevant room events types_to_copy = ( diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 110097eab9..ec1542d416 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -184,7 +184,7 @@ class SearchHandler(BaseHandler): membership_list=[Membership.JOIN], # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban], ) - room_ids = set(r.room_id for r in rooms) + room_ids = {r.room_id for r in rooms} # If doing a subset of all rooms seearch, check if any of the rooms # are from an upgraded room, and search their contents as well @@ -374,12 +374,12 @@ class SearchHandler(BaseHandler): ).to_string() if include_profile: - senders = set( + senders = { ev.sender for ev in itertools.chain( res["events_before"], [event], res["events_after"] ) - ) + } if res["events_after"]: last_event_id = res["events_after"][-1].event_id @@ -421,7 +421,7 @@ class SearchHandler(BaseHandler): state_results = {} if include_state: - rooms = set(e.room_id for e in allowed_events) + rooms = {e.room_id for e in allowed_events} for room_id in rooms: state = yield self.state_handler.get_current_state(room_id) state_results[room_id] = list(state.values()) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4324bc702e..669dbc8a48 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -682,11 +682,9 @@ class SyncHandler(object): # FIXME: order by stream ordering rather than as returned by SQL if joined_user_ids or invited_user_ids: - summary["m.heroes"] = sorted( - [user_id for user_id in (joined_user_ids + invited_user_ids)] - )[0:5] + summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5] else: - summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5] + summary["m.heroes"] = sorted(gone_user_ids)[0:5] if not sync_config.filter_collection.lazy_load_members(): return summary @@ -697,9 +695,9 @@ class SyncHandler(object): # track which members the client should already know about via LL: # Ones which are already in state... - existing_members = set( + existing_members = { user_id for (typ, user_id) in state.keys() if typ == EventTypes.Member - ) + } # ...or ones which are in the timeline... for ev in batch.events: @@ -773,10 +771,10 @@ class SyncHandler(object): # We only request state for the members needed to display the # timeline: - members_to_fetch = set( + members_to_fetch = { event.sender # FIXME: we also care about invite targets etc. for event in batch.events - ) + } if full_state: # always make sure we LL ourselves so we know we're in the room @@ -1993,10 +1991,10 @@ def _calculate_state( ) } - c_ids = set(e for e in itervalues(current)) - ts_ids = set(e for e in itervalues(timeline_start)) - p_ids = set(e for e in itervalues(previous)) - tc_ids = set(e for e in itervalues(timeline_contains)) + c_ids = set(itervalues(current)) + ts_ids = set(itervalues(timeline_start)) + p_ids = set(itervalues(previous)) + tc_ids = set(itervalues(timeline_contains)) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 5406618431..391bceb0c4 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -198,7 +198,7 @@ class TypingHandler(object): now=now, obj=member, then=now + FEDERATION_PING_INTERVAL ) - for domain in set(get_domain_from_id(u) for u in users): + for domain in {get_domain_from_id(u) for u in users}: if domain != self.server_name: logger.debug("sending typing update to %s", domain) self.federation.build_and_send_edu( @@ -231,7 +231,7 @@ class TypingHandler(object): return users = yield self.state.get_current_users_in_room(room_id) - domains = set(get_domain_from_id(u) for u in users) + domains = {get_domain_from_id(u) for u in users} if self.server_name in domains: logger.info("Got typing update from %s: %r", user_id, content) diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index 6073fc2725..0c2527bd86 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -148,7 +148,7 @@ def trace_function(f): pathname=pathname, lineno=lineno, msg=msg, - args=tuple(), + args=(), exc_info=None, ) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 0b45e1f52a..0dba997a23 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -240,7 +240,7 @@ class BucketCollector(object): res.append(["+Inf", sum(data.values())]) metric = HistogramMetricFamily( - self.name, "", buckets=res, sum_value=sum([x * y for x, y in data.items()]) + self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items()) ) yield metric diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index c53d2a0d40..b65bcd8806 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -80,13 +80,13 @@ _background_process_db_sched_duration = Counter( # map from description to a counter, so that we can name our logcontexts # incrementally. (It actually duplicates _background_process_start_count, but # it's much simpler to do so than to try to combine them.) -_background_process_counts = dict() # type: dict[str, int] +_background_process_counts = {} # type: dict[str, int] # map from description to the currently running background processes. # # it's kept as a dict of sets rather than a big set so that we can keep track # of process descriptions that no longer have any active processes. -_background_processes = dict() # type: dict[str, set[_BackgroundProcess]] +_background_processes = {} # type: dict[str, set[_BackgroundProcess]] # A lock that covers the above dicts _bg_metrics_lock = threading.Lock() diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 7d9f5a38d9..433ca2f416 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -400,11 +400,11 @@ class RulesForRoom(object): if logger.isEnabledFor(logging.DEBUG): logger.debug("Found members %r: %r", self.room_id, members.values()) - interested_in_user_ids = set( + interested_in_user_ids = { user_id for user_id, membership in itervalues(members) if membership == Membership.JOIN - ) + } logger.debug("Joined: %r", interested_in_user_ids) @@ -412,9 +412,9 @@ class RulesForRoom(object): interested_in_user_ids, on_invalidate=self.invalidate_all_cb ) - user_ids = set( + user_ids = { uid for uid, have_pusher in iteritems(if_users_with_pushers) if have_pusher - ) + } logger.debug("With pushers: %r", user_ids) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 8c818a86bf..ba4551d619 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -204,7 +204,7 @@ class EmailPusher(object): yield self.send_notification(unprocessed, reason) yield self.save_last_stream_ordering_and_success( - max([ea["stream_ordering"] for ea in unprocessed]) + max(ea["stream_ordering"] for ea in unprocessed) ) # we update the throttle on all the possible unprocessed push actions diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index b13b646bfd..4ccaf178ce 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -526,12 +526,10 @@ class Mailer(object): # If the room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" sender_ids = list( - set( - [ - notif_events[n["event_id"]].sender - for n in notifs_by_room[room_id] - ] - ) + { + notif_events[n["event_id"]].sender + for n in notifs_by_room[room_id] + } ) member_events = yield self.store.get_events( @@ -558,12 +556,10 @@ class Mailer(object): # If the reason room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" sender_ids = list( - set( - [ - notif_events[n["event_id"]].sender - for n in notifs_by_room[reason["room_id"]] - ] - ) + { + notif_events[n["event_id"]].sender + for n in notifs_by_room[reason["room_id"]] + } ) member_events = yield self.store.get_events( diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index b9dca5bc63..01789a9fb4 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -191,7 +191,7 @@ class PusherPool: min_stream_id - 1, max_stream_id ) # This returns a tuple, user_id is at index 3 - users_affected = set([r[3] for r in updated_receipts]) + users_affected = {r[3] for r in updated_receipts} for u in users_affected: if u in self.pushers: diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index 459482eb6d..a96f75ce26 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -29,7 +29,7 @@ def historical_admin_path_patterns(path_regex): Note that this should only be used for existing endpoints: new ones should just register for the /_synapse/admin path. """ - return list( + return [ re.compile(prefix + path_regex) for prefix in ( "^/_synapse/admin/v1", @@ -37,7 +37,7 @@ def historical_admin_path_patterns(path_regex): "^/_matrix/client/unstable/admin", "^/_matrix/client/r0/admin", ) - ) + ] def admin_patterns(path_regex: str): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 4f74600239..9fd4908136 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -49,7 +49,7 @@ class PushRuleRestServlet(RestServlet): if self._is_worker: raise Exception("Cannot handle PUT /push_rules on worker") - spec = _rule_spec_from_path([x for x in path.split("/")]) + spec = _rule_spec_from_path(path.split("/")) try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: @@ -110,7 +110,7 @@ class PushRuleRestServlet(RestServlet): if self._is_worker: raise Exception("Cannot handle DELETE /push_rules on worker") - spec = _rule_spec_from_path([x for x in path.split("/")]) + spec = _rule_spec_from_path(path.split("/")) requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -138,7 +138,7 @@ class PushRuleRestServlet(RestServlet): rules = format_push_rules_for_user(requester.user, rules) - path = [x for x in path.split("/")][1:] + path = path.split("/")[1:] if path == []: # we're a reference impl: pedantry is our job. diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 6f6b7aed6e..550a2f1b44 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -54,9 +54,9 @@ class PushersRestServlet(RestServlet): pushers = await self.hs.get_datastore().get_pushers_by_user_id(user.to_string()) - filtered_pushers = list( + filtered_pushers = [ {k: v for k, v in p.items() if k in ALLOWED_KEYS} for p in pushers - ) + ] return 200, {"pushers": filtered_pushers} diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index d8292ce29f..8fa68dd37f 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -72,7 +72,7 @@ class SyncRestServlet(RestServlet): """ PATTERNS = client_patterns("/sync$") - ALLOWED_PRESENCE = set(["online", "offline", "unavailable"]) + ALLOWED_PRESENCE = {"online", "offline", "unavailable"} def __init__(self, hs): super(SyncRestServlet, self).__init__() diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 9d6813a047..4b6d030a57 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -149,7 +149,7 @@ class RemoteKey(DirectServeResource): time_now_ms = self.clock.time_msec() - cache_misses = dict() # type: Dict[str, Set[str]] + cache_misses = {} # type: Dict[str, Set[str]] for (server_name, key_id, from_server), results in cached.items(): results = [(result["ts_added_ms"], result) for result in results] diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 65bbf00073..ba28dd089d 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -135,27 +135,25 @@ def add_file_headers(request, media_type, file_size, upload_name): # separators as defined in RFC2616. SP and HT are handled separately. # see _can_encode_filename_as_token. -_FILENAME_SEPARATOR_CHARS = set( - ( - "(", - ")", - "<", - ">", - "@", - ",", - ";", - ":", - "\\", - '"', - "/", - "[", - "]", - "?", - "=", - "{", - "}", - ) -) +_FILENAME_SEPARATOR_CHARS = { + "(", + ")", + "<", + ">", + "@", + ",", + ";", + ":", + "\\", + '"', + "/", + "[", + "]", + "?", + "=", + "{", + "}", +} def _can_encode_filename_as_token(x): diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 24b7c0faef..9bf98d06f2 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -69,9 +69,9 @@ def resolve_events_with_store( unconflicted_state, conflicted_state = _seperate(state_sets) - needed_events = set( + needed_events = { event_id for event_ids in itervalues(conflicted_state) for event_id in event_ids - ) + } needed_event_count = len(needed_events) if event_map is not None: needed_events -= set(iterkeys(event_map)) @@ -261,11 +261,11 @@ def _resolve_state_events(conflicted_state, auth_events): def _resolve_auth_events(events, auth_events): - reverse = [i for i in reversed(_ordered_events(events))] + reverse = list(reversed(_ordered_events(events))) - auth_keys = set( + auth_keys = { key for event in events for key in event_auth.auth_types_for_event(event) - ) + } new_auth_events = {} for key in auth_keys: diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 75fe58305a..0ffe6d8c14 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -105,7 +105,7 @@ def resolve_events_with_store( % (room_id, event.event_id, event.room_id,) ) - full_conflicted_set = set(eid for eid in full_conflicted_set if eid in event_map) + full_conflicted_set = {eid for eid in full_conflicted_set if eid in event_map} logger.debug("%d full_conflicted_set entries", len(full_conflicted_set)) @@ -233,7 +233,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): auth_sets = [] for state_set in state_sets: - auth_ids = set( + auth_ids = { eid for key, eid in iteritems(state_set) if ( @@ -246,7 +246,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): ) ) and eid not in common - ) + } auth_chain = yield state_res_store.get_auth_chain(auth_ids, common) auth_ids.update(auth_chain) @@ -275,7 +275,7 @@ def _seperate(state_sets): conflicted_state = {} for key in set(itertools.chain.from_iterable(state_sets)): - event_ids = set(state_set.get(key) for state_set in state_sets) + event_ids = {state_set.get(key) for state_set in state_sets} if len(event_ids) == 1: unconflicted_state[key] = event_ids.pop() else: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index da3b99f93d..13de5f1f62 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta): members_changed (iterable[str]): The user_ids of members that have changed """ - for host in set(get_domain_from_id(u) for u in members_changed): + for host in {get_domain_from_id(u) for u in members_changed}: self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) self._attempt_to_invalidate_cache("was_host_joined", (room_id, host)) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index bd547f35cf..eb1a7e5002 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -189,7 +189,7 @@ class BackgroundUpdater(object): keyvalues=None, retcols=("update_name", "depends_on"), ) - in_flight = set(update["update_name"] for update in updates) + in_flight = {update["update_name"] for update in updates} for update in updates: if update["depends_on"] not in in_flight: self._background_update_queue.append(update["update_name"]) diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py index b2f39649fd..efbc06c796 100644 --- a/synapse/storage/data_stores/main/appservice.py +++ b/synapse/storage/data_stores/main/appservice.py @@ -135,7 +135,7 @@ class ApplicationServiceTransactionWorkerStore( may be empty. """ results = yield self.db.simple_select_list( - "application_services_state", dict(state=state), ["as_id"] + "application_services_state", {"state": state}, ["as_id"] ) # NB: This assumes this class is linked with ApplicationServiceStore as_list = self.get_app_services() @@ -158,7 +158,7 @@ class ApplicationServiceTransactionWorkerStore( """ result = yield self.db.simple_select_one( "application_services_state", - dict(as_id=service.id), + {"as_id": service.id}, ["state"], allow_none=True, desc="get_appservice_state", @@ -177,7 +177,7 @@ class ApplicationServiceTransactionWorkerStore( A Deferred which resolves when the state was set successfully. """ return self.db.simple_upsert( - "application_services_state", dict(as_id=service.id), dict(state=state) + "application_services_state", {"as_id": service.id}, {"state": state} ) def create_appservice_txn(self, service, events): @@ -253,13 +253,15 @@ class ApplicationServiceTransactionWorkerStore( self.db.simple_upsert_txn( txn, "application_services_state", - dict(as_id=service.id), - dict(last_txn=txn_id), + {"as_id": service.id}, + {"last_txn": txn_id}, ) # Delete txn self.db.simple_delete_txn( - txn, "application_services_txns", dict(txn_id=txn_id, as_id=service.id) + txn, + "application_services_txns", + {"txn_id": txn_id, "as_id": service.id}, ) return self.db.runInteraction( diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py index 13f4c9c72e..e1ccb27142 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py @@ -530,7 +530,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"])) for row in rows ) - return list( + return [ { "access_token": access_token, "ip": ip, @@ -538,7 +538,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): "last_seen": last_seen, } for (access_token, ip), (user_agent, last_seen) in iteritems(results) - ) + ] @wrap_as_background_process("prune_old_user_ips") async def _prune_old_user_ips(self): diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index b7617efb80..d55733a4cd 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -137,7 +137,7 @@ class DeviceWorkerStore(SQLBaseStore): # get the cross-signing keys of the users in the list, so that we can # determine which of the device changes were cross-signing keys - users = set(r[0] for r in updates) + users = {r[0] for r in updates} master_key_by_user = {} self_signing_key_by_user = {} for user in users: @@ -446,7 +446,7 @@ class DeviceWorkerStore(SQLBaseStore): a set of user_ids and results_map is a mapping of user_id -> device_id -> device_info """ - user_ids = set(user_id for user_id, _ in query_list) + user_ids = {user_id for user_id, _ in query_list} user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids)) # We go and check if any of the users need to have their device lists @@ -454,10 +454,9 @@ class DeviceWorkerStore(SQLBaseStore): users_needing_resync = yield self.get_user_ids_requiring_device_list_resync( user_ids ) - user_ids_in_cache = ( - set(user_id for user_id, stream_id in user_map.items() if stream_id) - - users_needing_resync - ) + user_ids_in_cache = { + user_id for user_id, stream_id in user_map.items() if stream_id + } - users_needing_resync user_ids_not_in_cache = user_ids - user_ids_in_cache results = {} @@ -604,7 +603,7 @@ class DeviceWorkerStore(SQLBaseStore): rows = yield self.db.execute( "get_users_whose_signatures_changed", None, sql, user_id, from_key ) - return set(user for row in rows for user in json.loads(row[0])) + return {user for row in rows for user in json.loads(row[0])} else: return set() diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 750ec1b70d..49a7b8b433 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -426,7 +426,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas query, (room_id, event_id, False, limit - len(event_results)) ) - new_results = set(t[0] for t in txn) - seen_events + new_results = {t[0] for t in txn} - seen_events new_front |= new_results seen_events |= new_results diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index c9d0d68c3a..8ae23df00a 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -145,7 +145,7 @@ class EventsStore( return txn.fetchall() res = yield self.db.runInteraction("read_forward_extremities", fetch) - self._current_forward_extremities_amount = c_counter(list(x[0] for x in res)) + self._current_forward_extremities_amount = c_counter([x[0] for x in res]) @_retry_on_integrity_error @defer.inlineCallbacks @@ -598,11 +598,11 @@ class EventsStore( # We find out which membership events we may have deleted # and which we have added, then we invlidate the caches for all # those users. - members_changed = set( + members_changed = { state_key for ev_type, state_key in itertools.chain(to_delete, to_insert) if ev_type == EventTypes.Member - ) + } for member in members_changed: txn.call_after( @@ -1615,7 +1615,7 @@ class EventsStore( """ ) - referenced_state_groups = set(sg for sg, in txn) + referenced_state_groups = {sg for sg, in txn} logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups) ) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 5177b71016..f54c8b1ee0 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -402,7 +402,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): keyvalues={}, retcols=("room_id",), ) - room_ids = set(row["room_id"] for row in rows) + room_ids = {row["room_id"] for row in rows} for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, (room_id,) diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 7251e819f5..47a3a26072 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -494,9 +494,9 @@ class EventsWorkerStore(SQLBaseStore): """ with Measure(self._clock, "_fetch_event_list"): try: - events_to_fetch = set( + events_to_fetch = { event_id for events, _ in event_list for event_id in events - ) + } row_dict = self.db.new_transaction( conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch @@ -804,7 +804,7 @@ class EventsWorkerStore(SQLBaseStore): desc="have_events_in_timeline", ) - return set(r["event_id"] for r in rows) + return {r["event_id"] for r in rows} @defer.inlineCallbacks def have_seen_events(self, event_ids): diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py index e2673ae073..62ac88d9f2 100644 --- a/synapse/storage/data_stores/main/push_rule.py +++ b/synapse/storage/data_stores/main/push_rule.py @@ -276,21 +276,21 @@ class PushRulesWorkerStore( # We ignore app service users for now. This is so that we don't fill # up the `get_if_users_have_pushers` cache with AS entries that we # know don't have pushers, nor even read receipts. - local_users_in_room = set( + local_users_in_room = { u for u in users_in_room if self.hs.is_mine_id(u) and not self.get_if_app_services_interested_in_user(u) - ) + } # users in the room who have pushers need to get push rules run because # that's how their pushers work if_users_with_pushers = yield self.get_if_users_have_pushers( local_users_in_room, on_invalidate=cache_context.invalidate ) - user_ids = set( + user_ids = { uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher - ) + } users_with_receipts = yield self.get_users_with_read_receipts_in_room( room_id, on_invalidate=cache_context.invalidate diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py index 96e54d145e..0d932a0672 100644 --- a/synapse/storage/data_stores/main/receipts.py +++ b/synapse/storage/data_stores/main/receipts.py @@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore): @cachedInlineCallbacks() def get_users_with_read_receipts_in_room(self, room_id): receipts = yield self.get_receipts_for_room(room_id, "m.read") - return set(r["user_id"] for r in receipts) + return {r["user_id"] for r in receipts} @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -283,7 +283,7 @@ class ReceiptsWorkerStore(SQLBaseStore): args.append(limit) txn.execute(sql, args) - return list(r[0:5] + (json.loads(r[5]),) for r in txn) + return [r[0:5] + (json.loads(r[5]),) for r in txn] return self.db.runInteraction( "get_all_updated_receipts", get_all_updated_receipts_txn diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py index d5ced05701..d5bd0cb5cf 100644 --- a/synapse/storage/data_stores/main/roommember.py +++ b/synapse/storage/data_stores/main/roommember.py @@ -465,7 +465,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): txn.execute(sql % (clause,), args) - return set(row[0] for row in txn) + return {row[0] for row in txn} return await self.db.runInteraction( "get_users_server_still_shares_room_with", @@ -826,7 +826,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): GROUP BY room_id, user_id; """ txn.execute(sql, (user_id,)) - return set(row[0] for row in txn if row[1] == 0) + return {row[0] for row in txn if row[1] == 0} return self.db.runInteraction( "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 3d34103e67..3a3b9a8e72 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -321,7 +321,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): desc="get_referenced_state_groups", ) - return set(row["state_group"] for row in rows) + return {row["state_group"] for row in rows} class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): @@ -367,7 +367,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): """ txn.execute(sql, (last_room_id, batch_size)) - room_ids = list(row[0] for row in txn) + room_ids = [row[0] for row in txn] if not room_ids: return True, set() @@ -384,7 +384,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name)) - joined_room_ids = set(row[0] for row in txn) + joined_room_ids = {row[0] for row in txn} left_rooms = set(room_ids) - joined_room_ids @@ -404,7 +404,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): retcols=("state_key",), ) - potentially_left_users = set(row["state_key"] for row in rows) + potentially_left_users = {row["state_key"] for row in rows} # Now lets actually delete the rooms from the DB. self.db.simple_delete_many_txn( diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 056b25b13a..ada5cce6c2 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -346,11 +346,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): from_key (str): The room_key portion of a StreamToken """ from_key = RoomStreamToken.parse_stream_token(from_key).stream - return set( + return { room_id for room_id in room_ids if self._events_stream_cache.has_entity_changed(room_id, from_key) - ) + } @defer.inlineCallbacks def get_room_events_stream_for_room( @@ -679,11 +679,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events_before = yield self.get_events_as_list( - [e for e in results["before"]["event_ids"]], get_prev_content=True + list(results["before"]["event_ids"]), get_prev_content=True ) events_after = yield self.get_events_as_list( - [e for e in results["after"]["event_ids"]], get_prev_content=True + list(results["after"]["event_ids"]), get_prev_content=True ) return { diff --git a/synapse/storage/data_stores/main/user_erasure_store.py b/synapse/storage/data_stores/main/user_erasure_store.py index af8025bc17..ec6b8a4ffd 100644 --- a/synapse/storage/data_stores/main/user_erasure_store.py +++ b/synapse/storage/data_stores/main/user_erasure_store.py @@ -63,9 +63,9 @@ class UserErasureWorkerStore(SQLBaseStore): retcols=("user_id",), desc="are_users_erased", ) - erased_users = set(row["user_id"] for row in rows) + erased_users = {row["user_id"] for row in rows} - res = dict((u, u in erased_users) for u in user_ids) + res = {u: u in erased_users for u in user_ids} return res diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py index c4ee9b7ccb..57a5267663 100644 --- a/synapse/storage/data_stores/state/store.py +++ b/synapse/storage/data_stores/state/store.py @@ -520,11 +520,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): retcols=("state_group",), ) - remaining_state_groups = set( + remaining_state_groups = { row["state_group"] for row in rows if row["state_group"] not in state_groups_to_delete - ) + } logger.info( "[purge] de-delta-ing %i remaining state groups", diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6dcb5c04da..1953614401 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -554,8 +554,8 @@ class Database(object): Returns: A list of dicts where the key is the column header. """ - col_headers = list(intern(str(column[0])) for column in cursor.description) - results = list(dict(zip(col_headers, row)) for row in cursor) + col_headers = [intern(str(column[0])) for column in cursor.description] + results = [dict(zip(col_headers, row)) for row in cursor] return results def execute(self, desc, decoder, query, *args): diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index b950550f23..0f9ac1cf09 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -602,14 +602,14 @@ class EventsPersistenceStorage(object): event_id_to_state_group.update(event_to_groups) # State groups of old_latest_event_ids - old_state_groups = set( + old_state_groups = { event_id_to_state_group[evid] for evid in old_latest_event_ids - ) + } # State groups of new_latest_event_ids - new_state_groups = set( + new_state_groups = { event_id_to_state_group[evid] for evid in new_latest_event_ids - ) + } # If they old and new groups are the same then we don't need to do # anything. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c285ef52a0..fc69c32a0a 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -345,9 +345,9 @@ def _upgrade_existing_database( "Could not open delta dir for version %d: %s" % (v, directory) ) - duplicates = set( + duplicates = { file_name for file_name, count in file_name_counter.items() if count > 1 - ) + } if duplicates: # We don't support using the same file name in the same delta version. raise PrepareDatabaseException( @@ -454,7 +454,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) ), (modname,), ) - applied_deltas = set(d for d, in cur) + applied_deltas = {d for d, in cur} for (name, stream) in names_and_streams: if name in applied_deltas: continue diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 635b897d6c..f2ccd5e7c6 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -30,7 +30,7 @@ def freeze(o): return o try: - return tuple([freeze(i) for i in o]) + return tuple(freeze(i) for i in o) except TypeError: pass diff --git a/synapse/visibility.py b/synapse/visibility.py index d0abd8f04f..e60d9756b7 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -75,7 +75,7 @@ def filter_events_for_client( """ # Filter out events that have been soft failed so that we don't relay them # to clients. - events = list(e for e in events if not e.internal_metadata.is_soft_failed()) + events = [e for e in events if not e.internal_metadata.is_soft_failed()] types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id)) event_id_to_state = yield storage.state.get_state_for_events( @@ -97,7 +97,7 @@ def filter_events_for_client( erased_senders = yield storage.main.are_users_erased((e.sender for e in events)) if apply_retention_policies: - room_ids = set(e.room_id for e in events) + room_ids = {e.room_id for e in events} retention_policies = {} for room_id in room_ids: diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 2684e662de..463855ecc8 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -48,7 +48,7 @@ class ConfigGenerationTestCase(unittest.TestCase): ) self.assertSetEqual( - set(["homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"]), + {"homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"}, set(os.listdir(self.dir)), ) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index e7d8699040..296dc887be 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -83,7 +83,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): ) ) - self.assertEqual(members, set(["@user:other.example.com", u1])) + self.assertEqual(members, {"@user:other.example.com", u1}) self.assertEqual(len(channel.json_body["pdus"]), 6) def test_needs_to_be_in_room(self): diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index c171038df8..64915bafcd 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -338,7 +338,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, syncing_user_ids=set([user_id]), now=now + state, is_mine=True, syncing_user_ids={user_id}, now=now ) self.assertIsNotNone(new_state) @@ -579,7 +579,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): ) self.assertEqual(expected_state.state, PresenceState.ONLINE) self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=set(("server2", "server3")), states=[expected_state] + destinations={"server2", "server3"}, states=[expected_state] ) def _add_new_user(self, room_id, user_id): diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 140cc0a3c2..07b204666e 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -129,12 +129,12 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): hs.get_auth().check_user_in_room = check_user_in_room def get_joined_hosts_for_room(room_id): - return set(member.domain for member in self.room_members) + return {member.domain for member in self.room_members} self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room def get_current_users_in_room(room_id): - return set(str(u) for u in self.room_members) + return {str(u) for u in self.room_members} hs.get_state_handler().get_current_users_in_room = get_current_users_in_room @@ -257,7 +257,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): member = RoomMember(ROOM_ID, U_APPLE.to_string()) self.handler._member_typing_until[member] = 1002000 - self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()]) + self.handler._room_typing[ROOM_ID] = {U_APPLE.to_string()} self.assertEquals(self.event_source.get_current_key(), 0) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 0a4765fff4..7b92bdbc47 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -114,7 +114,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): public_users = self.get_users_in_public_rooms() self.assertEqual( - self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} ) self.assertEqual(public_users, []) @@ -169,7 +169,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): public_users = self.get_users_in_public_rooms() self.assertEqual( - self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} ) self.assertEqual(public_users, []) @@ -226,7 +226,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): public_users = self.get_users_in_public_rooms() self.assertEqual( - self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + self._compress_shared(shares_private), {(u1, u2, room), (u2, u1, room)} ) self.assertEqual(public_users, []) @@ -358,12 +358,12 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): public_users = self.get_users_in_public_rooms() # User 1 and User 2 are in the same public room - self.assertEqual(set(public_users), set([(u1, room), (u2, room)])) + self.assertEqual(set(public_users), {(u1, room), (u2, room)}) # User 1 and User 3 share private rooms self.assertEqual( self._compress_shared(shares_private), - set([(u1, u3, private_room), (u3, u1, private_room)]), + {(u1, u3, private_room), (u3, u1, private_room)}, ) def test_initial_share_all_users(self): @@ -398,7 +398,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): # No users share rooms self.assertEqual(public_users, []) - self.assertEqual(self._compress_shared(shares_private), set([])) + self.assertEqual(self._compress_shared(shares_private), set()) # Despite not sharing a room, search_all_users means we get a search # result. diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 80187406bc..83032cc9ea 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -163,7 +163,7 @@ class EmailPusherTests(HomeserverTestCase): # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -174,7 +174,7 @@ class EmailPusherTests(HomeserverTestCase): # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -192,7 +192,7 @@ class EmailPusherTests(HomeserverTestCase): # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index fe3441f081..baf9c785f4 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -102,7 +102,7 @@ class HTTPPusherTests(HomeserverTestCase): # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -113,7 +113,7 @@ class HTTPPusherTests(HomeserverTestCase): # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -132,7 +132,7 @@ class HTTPPusherTests(HomeserverTestCase): # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -152,7 +152,7 @@ class HTTPPusherTests(HomeserverTestCase): # The stream ordering has increased, again pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 9c13a13786..fa3a3ec1bd 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -40,16 +40,14 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertTrue( - set( - [ - "next_batch", - "rooms", - "presence", - "account_data", - "to_device", - "device_lists", - ] - ).issubset(set(channel.json_body.keys())) + { + "next_batch", + "rooms", + "presence", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) ) def test_sync_presence_disabled(self): @@ -63,9 +61,13 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertTrue( - set( - ["next_batch", "rooms", "account_data", "to_device", "device_lists"] - ).issubset(set(channel.json_body.keys())) + { + "next_batch", + "rooms", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) ) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index d491ea2924..e37260a820 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -373,7 +373,7 @@ class UpsertManyTests(unittest.HomeserverTestCase): ) self.assertEqual( set(self._dump_to_tuple(res)), - set([(1, "user1", "hello"), (2, "user2", "there")]), + {(1, "user1", "hello"), (2, "user2", "there")}, ) # Update only user2 @@ -400,5 +400,5 @@ class UpsertManyTests(unittest.HomeserverTestCase): ) self.assertEqual( set(self._dump_to_tuple(res)), - set([(1, "user1", "hello"), (2, "user2", "bleb")]), + {(1, "user1", "hello"), (2, "user2", "bleb")}, ) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index fd52512696..31710949a8 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -69,14 +69,14 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): pass def _add_appservice(self, as_token, id, url, hs_token, sender): - as_yaml = dict( - url=url, - as_token=as_token, - hs_token=hs_token, - id=id, - sender_localpart=sender, - namespaces={}, - ) + as_yaml = { + "url": url, + "as_token": as_token, + "hs_token": hs_token, + "id": id, + "sender_localpart": sender, + "namespaces": {}, + } # use the token as the filename with open(as_token, "w") as outfile: outfile.write(yaml.dump(as_yaml)) @@ -135,14 +135,14 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): ) def _add_service(self, url, as_token, id): - as_yaml = dict( - url=url, - as_token=as_token, - hs_token="something", - id=id, - sender_localpart="a_sender", - namespaces={}, - ) + as_yaml = { + "url": url, + "as_token": as_token, + "hs_token": "something", + "id": id, + "sender_localpart": "a_sender", + "namespaces": {}, + } # use the token as the filename with open(as_token, "w") as outfile: outfile.write(yaml.dump(as_yaml)) @@ -384,8 +384,8 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): ) self.assertEquals(2, len(services)) self.assertEquals( - set([self.as_list[2]["id"], self.as_list[0]["id"]]), - set([services[0].id, services[1].id]), + {self.as_list[2]["id"], self.as_list[0]["id"]}, + {services[0].id, services[1].id}, ) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 029ac26454..0e04b2cf92 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -134,7 +134,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -172,7 +172,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b}) # Run the background update and check it did the right thing self.run_background_update() @@ -227,9 +227,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual( - set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) - ) + self.assertEqual(set(latest_event_ids), {event_id_a, event_id_b, event_id_c}) # Run the background update and check it did the right thing self.run_background_update() @@ -237,7 +235,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) ) - self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) + self.assertEqual(set(latest_event_ids), {event_id_b, event_id_c}) class CleanupExtremDummyEventsTestCase(HomeserverTestCase): diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index f26ff57a18..a7b7fd36d3 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -59,24 +59,22 @@ class ExtremStatisticsTestCase(HomeserverTestCase): ) ) - expected = set( - [ - b'synapse_forward_extremities_bucket{le="1.0"} 0.0', - b'synapse_forward_extremities_bucket{le="2.0"} 2.0', - b'synapse_forward_extremities_bucket{le="3.0"} 2.0', - b'synapse_forward_extremities_bucket{le="5.0"} 2.0', - b'synapse_forward_extremities_bucket{le="7.0"} 3.0', - b'synapse_forward_extremities_bucket{le="10.0"} 3.0', - b'synapse_forward_extremities_bucket{le="15.0"} 3.0', - b'synapse_forward_extremities_bucket{le="20.0"} 3.0', - b'synapse_forward_extremities_bucket{le="50.0"} 3.0', - b'synapse_forward_extremities_bucket{le="100.0"} 3.0', - b'synapse_forward_extremities_bucket{le="200.0"} 3.0', - b'synapse_forward_extremities_bucket{le="500.0"} 3.0', - b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', - b"synapse_forward_extremities_count 3.0", - b"synapse_forward_extremities_sum 10.0", - ] - ) + expected = { + b'synapse_forward_extremities_bucket{le="1.0"} 0.0', + b'synapse_forward_extremities_bucket{le="2.0"} 2.0', + b'synapse_forward_extremities_bucket{le="3.0"} 2.0', + b'synapse_forward_extremities_bucket{le="5.0"} 2.0', + b'synapse_forward_extremities_bucket{le="7.0"} 3.0', + b'synapse_forward_extremities_bucket{le="10.0"} 3.0', + b'synapse_forward_extremities_bucket{le="15.0"} 3.0', + b'synapse_forward_extremities_bucket{le="20.0"} 3.0', + b'synapse_forward_extremities_bucket{le="50.0"} 3.0', + b'synapse_forward_extremities_bucket{le="100.0"} 3.0', + b'synapse_forward_extremities_bucket{le="200.0"} 3.0', + b'synapse_forward_extremities_bucket{le="500.0"} 3.0', + b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', + b"synapse_forward_extremities_count 3.0", + b"synapse_forward_extremities_sum 10.0", + } self.assertEqual(items, expected) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 04d58fbf24..0b88308ff4 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -394,7 +394,7 @@ class StateStoreTestCase(tests.unittest.TestCase): ) = self.state_datastore._state_group_cache.get(group) self.assertEqual(is_all, False) - self.assertEqual(known_absent, set([(e1.type, e1.state_key)])) + self.assertEqual(known_absent, {(e1.type, e1.state_key)}) self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) ############################################ diff --git a/tests/test_state.py b/tests/test_state.py index d1578fe581..66f22f6813 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -254,9 +254,7 @@ class StateTestCase(unittest.TestCase): ctx_d = context_store["D"] prev_state_ids = yield ctx_d.get_prev_state_ids() - self.assertSetEqual( - {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()} - ) + self.assertSetEqual({"START", "A", "C"}, set(prev_state_ids.values())) self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event) self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) @@ -313,9 +311,7 @@ class StateTestCase(unittest.TestCase): ctx_e = context_store["E"] prev_state_ids = yield ctx_e.get_prev_state_ids() - self.assertSetEqual( - {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()} - ) + self.assertSetEqual({"START", "A", "B", "C"}, set(prev_state_ids.values())) self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event) self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group) @@ -388,9 +384,7 @@ class StateTestCase(unittest.TestCase): ctx_d = context_store["D"] prev_state_ids = yield ctx_d.get_prev_state_ids() - self.assertSetEqual( - {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()} - ) + self.assertSetEqual({"A1", "A2", "A3", "A5", "B"}, set(prev_state_ids.values())) self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event) self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) @@ -482,7 +476,7 @@ class StateTestCase(unittest.TestCase): current_state_ids = yield context.get_current_state_ids() self.assertEqual( - set([e.event_id for e in old_state]), set(current_state_ids.values()) + {e.event_id for e in old_state}, set(current_state_ids.values()) ) self.assertEqual(group_name, context.state_group) @@ -513,9 +507,7 @@ class StateTestCase(unittest.TestCase): prev_state_ids = yield context.get_prev_state_ids() - self.assertEqual( - set([e.event_id for e in old_state]), set(prev_state_ids.values()) - ) + self.assertEqual({e.event_id for e in old_state}, set(prev_state_ids.values())) self.assertIsNotNone(context.state_group) diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py index f2be63706b..72a9de5370 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py @@ -67,7 +67,7 @@ class StreamChangeCacheTests(unittest.TestCase): # If we update an existing entity, it keeps the two existing entities cache.entity_has_changed("bar@baz.net", 5) self.assertEqual( - set(["bar@baz.net", "user@elsewhere.org"]), set(cache._entity_to_key) + {"bar@baz.net", "user@elsewhere.org"}, set(cache._entity_to_key) ) def test_get_all_entities_changed(self): @@ -137,7 +137,7 @@ class StreamChangeCacheTests(unittest.TestCase): cache.get_entities_changed( ["user@foo.com", "bar@baz.net", "user@elsewhere.org"], stream_pos=2 ), - set(["bar@baz.net", "user@elsewhere.org"]), + {"bar@baz.net", "user@elsewhere.org"}, ) # Query all the entries mid-way through the stream, but include one @@ -153,7 +153,7 @@ class StreamChangeCacheTests(unittest.TestCase): ], stream_pos=2, ), - set(["bar@baz.net", "user@elsewhere.org"]), + {"bar@baz.net", "user@elsewhere.org"}, ) # Query all the entries, but before the first known point. We will get @@ -168,21 +168,13 @@ class StreamChangeCacheTests(unittest.TestCase): ], stream_pos=0, ), - set( - [ - "user@foo.com", - "bar@baz.net", - "user@elsewhere.org", - "not@here.website", - ] - ), + {"user@foo.com", "bar@baz.net", "user@elsewhere.org", "not@here.website"}, ) # Query a subset of the entries mid-way through the stream. We should # only get back the subset. self.assertEqual( - cache.get_entities_changed(["bar@baz.net"], stream_pos=2), - set(["bar@baz.net"]), + cache.get_entities_changed(["bar@baz.net"], stream_pos=2), {"bar@baz.net"}, ) def test_max_pos(self): diff --git a/tox.ini b/tox.ini index b9132a3177..b715ea0bff 100644 --- a/tox.ini +++ b/tox.ini @@ -123,6 +123,7 @@ skip_install = True basepython = python3.6 deps = flake8 + flake8-comprehensions black==19.10b0 # We pin so that our tests don't start failing on new releases of black. commands = python -m black --check --diff . From 7936d2a96e4781ad7d1ae27f78b65c8eb8d5c3f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Feb 2020 07:18:33 -0500 Subject: [PATCH 171/382] Publishing/removing from the directory requires a power level greater than canonical aliases. --- changelog.d/6965.feature | 1 + synapse/api/auth.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6965.feature diff --git a/changelog.d/6965.feature b/changelog.d/6965.feature new file mode 100644 index 0000000000..6ad9956e40 --- /dev/null +++ b/changelog.d/6965.feature @@ -0,0 +1 @@ +Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index f576d65388..5ca18b4301 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -538,13 +538,13 @@ class Auth(object): return defer.succeed(auth_ids) @defer.inlineCallbacks - def check_can_change_room_list(self, room_id, user): + def check_can_change_room_list(self, room_id: str, user: UserID): """Check if the user is allowed to edit the room's entry in the published room list. Args: - room_id (str) - user (UserID) + room_id + user """ is_admin = yield self.is_server_admin(user) @@ -556,7 +556,7 @@ class Auth(object): # We currently require the user is a "moderator" in the room. We do this # by checking if they would (theoretically) be able to change the - # m.room.aliases events + # m.room.canonical_alias events power_level_event = yield self.state.get_current_state( room_id, EventTypes.PowerLevels, "" ) @@ -566,7 +566,7 @@ class Auth(object): auth_events[(EventTypes.PowerLevels, "")] = power_level_event send_level = event_auth.get_send_level( - EventTypes.Aliases, "", power_level_event + EventTypes.CanonicalAlias, "", power_level_event ) user_level = event_auth.get_user_power_level(user_id, auth_events) From fcf45994881d652571d965547b2287d796f798fc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Feb 2020 12:40:23 -0500 Subject: [PATCH 172/382] Stop returning aliases as part of the room list. (#6970) --- changelog.d/6970.removal | 1 + synapse/handlers/room_list.py | 9 --------- 2 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/6970.removal diff --git a/changelog.d/6970.removal b/changelog.d/6970.removal new file mode 100644 index 0000000000..89bd363b95 --- /dev/null +++ b/changelog.d/6970.removal @@ -0,0 +1 @@ +The room list endpoint no longer returns a list of aliases. diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index c615206df1..0b7d3da680 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -216,15 +216,6 @@ class RoomListHandler(BaseHandler): direction_is_forward=False, ).to_token() - for room in results: - # populate search result entries with additional fields, namely - # 'aliases' - room_id = room["room_id"] - - aliases = yield self.store.get_aliases_for_room(room_id) - if aliases: - room["aliases"] = aliases - response["chunk"] = results response["total_room_count_estimate"] = yield self.store.count_public_rooms( From 7b0e2d961ce9b70ed2d41f27f624ab752af26400 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 21 Feb 2020 18:44:03 +0100 Subject: [PATCH 173/382] Change displayname of user as admin in rooms (#6876) --- changelog.d/6572.bugfix | 1 + synapse/handlers/profile.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6572.bugfix diff --git a/changelog.d/6572.bugfix b/changelog.d/6572.bugfix new file mode 100644 index 0000000000..4f708f409f --- /dev/null +++ b/changelog.d/6572.bugfix @@ -0,0 +1 @@ +When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index f9579d69ee..50ce0c585b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -28,7 +28,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import UserID, get_domain_from_id +from synapse.types import UserID, create_requester, get_domain_from_id from ._base import BaseHandler @@ -165,6 +165,12 @@ class BaseProfileHandler(BaseHandler): if new_displayname == "": new_displayname = None + # If the admin changes the display name of a user, the requesting user cannot send + # the join event to update the displayname in the rooms. + # This must be done by the target user himself. + if by_admin: + requester = create_requester(target_user) + yield self.store.set_profile_displayname(target_user.localpart, new_displayname) if self.hs.config.user_directory_search_all_users: @@ -217,6 +223,10 @@ class BaseProfileHandler(BaseHandler): 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) ) + # Same like set_displayname + if by_admin: + requester = create_requester(target_user) + yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url) if self.hs.config.user_directory_search_all_users: From af6c3895015580d04c5affd95321c75802c3cb62 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Feb 2020 12:50:48 -0500 Subject: [PATCH 174/382] No longer use room alias events to calculate room names for push notifications. (#6966) --- changelog.d/6966.removal | 1 + synapse/push/presentable_names.py | 36 ++++++++++++------------------- 2 files changed, 15 insertions(+), 22 deletions(-) create mode 100644 changelog.d/6966.removal diff --git a/changelog.d/6966.removal b/changelog.d/6966.removal new file mode 100644 index 0000000000..69673d9139 --- /dev/null +++ b/changelog.d/6966.removal @@ -0,0 +1 @@ +Synapse no longer uses room alias events to calculate room names for email notifications. diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 16a7e8e31d..0644a13cfc 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -18,6 +18,8 @@ import re from twisted.internet import defer +from synapse.api.constants import EventTypes + logger = logging.getLogger(__name__) # intentionally looser than what aliases we allow to be registered since @@ -50,17 +52,17 @@ def calculate_room_name( (string or None) A human readable name for the room. """ # does it have a name? - if ("m.room.name", "") in room_state_ids: + if (EventTypes.Name, "") in room_state_ids: m_room_name = yield store.get_event( - room_state_ids[("m.room.name", "")], allow_none=True + room_state_ids[(EventTypes.Name, "")], allow_none=True ) if m_room_name and m_room_name.content and m_room_name.content["name"]: return m_room_name.content["name"] # does it have a canonical alias? - if ("m.room.canonical_alias", "") in room_state_ids: + if (EventTypes.CanonicalAlias, "") in room_state_ids: canon_alias = yield store.get_event( - room_state_ids[("m.room.canonical_alias", "")], allow_none=True + room_state_ids[(EventTypes.CanonicalAlias, "")], allow_none=True ) if ( canon_alias @@ -74,32 +76,22 @@ def calculate_room_name( # for an event type, so rearrange the data structure room_state_bytype_ids = _state_as_two_level_dict(room_state_ids) - # right then, any aliases at all? - if "m.room.aliases" in room_state_bytype_ids: - m_room_aliases = room_state_bytype_ids["m.room.aliases"] - for alias_id in m_room_aliases.values(): - alias_event = yield store.get_event(alias_id, allow_none=True) - if alias_event and alias_event.content.get("aliases"): - the_aliases = alias_event.content["aliases"] - if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]): - return the_aliases[0] - if not fallback_to_members: return None my_member_event = None - if ("m.room.member", user_id) in room_state_ids: + if (EventTypes.Member, user_id) in room_state_ids: my_member_event = yield store.get_event( - room_state_ids[("m.room.member", user_id)], allow_none=True + room_state_ids[(EventTypes.Member, user_id)], allow_none=True ) if ( my_member_event is not None and my_member_event.content["membership"] == "invite" ): - if ("m.room.member", my_member_event.sender) in room_state_ids: + if (EventTypes.Member, my_member_event.sender) in room_state_ids: inviter_member_event = yield store.get_event( - room_state_ids[("m.room.member", my_member_event.sender)], + room_state_ids[(EventTypes.Member, my_member_event.sender)], allow_none=True, ) if inviter_member_event: @@ -114,9 +106,9 @@ def calculate_room_name( # we're going to have to generate a name based on who's in the room, # so find out who is in the room that isn't the user. - if "m.room.member" in room_state_bytype_ids: + if EventTypes.Member in room_state_bytype_ids: member_events = yield store.get_events( - list(room_state_bytype_ids["m.room.member"].values()) + list(room_state_bytype_ids[EventTypes.Member].values()) ) all_members = [ ev @@ -138,9 +130,9 @@ def calculate_room_name( # self-chat, peeked room with 1 participant, # or inbound invite, or outbound 3PID invite. if all_members[0].sender == user_id: - if "m.room.third_party_invite" in room_state_bytype_ids: + if EventTypes.ThirdPartyInvite in room_state_bytype_ids: third_party_invites = room_state_bytype_ids[ - "m.room.third_party_invite" + EventTypes.ThirdPartyInvite ].values() if len(third_party_invites) > 0: From 4c2ed3f20ef5361ea04da9c678d157d8735ca120 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 Feb 2020 15:18:38 +0000 Subject: [PATCH 175/382] Fix minor issues with email config (#6962) * Give `notif_template_html`, `notif_template_text` default values (fixes #6960) * Don't complain if `smtp_host` and `smtp_port` are unset, since they have sensible defaults (fixes #6961) * Set the example for `enable_notifs` to `True`, for consistency and because it's more useful * Raise errors as ConfigError rather than RuntimeError for nicer formatting --- changelog.d/6962.bugfix | 1 + docs/sample_config.yaml | 9 ++--- synapse/config/emailconfig.py | 66 ++++++++++++++++------------------- 3 files changed, 36 insertions(+), 40 deletions(-) create mode 100644 changelog.d/6962.bugfix diff --git a/changelog.d/6962.bugfix b/changelog.d/6962.bugfix new file mode 100644 index 0000000000..9f5229d400 --- /dev/null +++ b/changelog.d/6962.bugfix @@ -0,0 +1 @@ +Fix a couple of bugs in email configuration handling. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8a036071e1..54cbe840d5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1409,10 +1409,6 @@ email: # #require_transport_security: true - # Enable sending emails for messages that the user has missed - # - #enable_notifs: false - # notif_from defines the "From" address to use when sending emails. # It must be set if email sending is enabled. # @@ -1430,6 +1426,11 @@ email: # #app_name: my_branded_matrix_server + # Uncomment the following to enable sending emails for messages that the user + # has missed. Disabled by default. + # + #enable_notifs: true + # Uncomment the following to disable automatic subscription to email # notifications for new users. Enabled by default. # diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 74853f9faa..f31fc85ec8 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -27,6 +27,12 @@ import pkg_resources from ._base import Config, ConfigError +MISSING_PASSWORD_RESET_CONFIG_ERROR = """\ +Password reset emails are enabled on this homeserver due to a partial +'email' block. However, the following required keys are missing: + %s +""" + class EmailConfig(Config): section = "email" @@ -142,24 +148,18 @@ class EmailConfig(Config): bleach if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - required = ["smtp_host", "smtp_port", "notif_from"] - missing = [] - for k in required: - if k not in email_config: - missing.append("email." + k) + if not self.email_notif_from: + missing.append("email.notif_from") # public_baseurl is required to build password reset and validation links that # will be emailed to users if config.get("public_baseurl") is None: missing.append("public_baseurl") - if len(missing) > 0: - raise RuntimeError( - "Password resets emails are configured to be sent from " - "this homeserver due to a partial 'email' block. " - "However, the following required keys are missing: %s" - % (", ".join(missing),) + if missing: + raise ConfigError( + MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),) ) # These email templates have placeholders in them, and thus must be @@ -245,32 +245,25 @@ class EmailConfig(Config): ) if self.email_enable_notifs: - required = [ - "smtp_host", - "smtp_port", - "notif_from", - "notif_template_html", - "notif_template_text", - ] - missing = [] - for k in required: - if k not in email_config: - missing.append(k) - - if len(missing) > 0: - raise RuntimeError( - "email.enable_notifs is True but required keys are missing: %s" - % (", ".join(["email." + k for k in missing]),) - ) + if not self.email_notif_from: + missing.append("email.notif_from") if config.get("public_baseurl") is None: - raise RuntimeError( - "email.enable_notifs is True but no public_baseurl is set" + missing.append("public_baseurl") + + if missing: + raise ConfigError( + "email.enable_notifs is True but required keys are missing: %s" + % (", ".join(missing),) ) - self.email_notif_template_html = email_config["notif_template_html"] - self.email_notif_template_text = email_config["notif_template_text"] + self.email_notif_template_html = email_config.get( + "notif_template_html", "notif_mail.html" + ) + self.email_notif_template_text = email_config.get( + "notif_template_text", "notif_mail.txt" + ) for f in self.email_notif_template_text, self.email_notif_template_html: p = os.path.join(self.email_template_dir, f) @@ -323,10 +316,6 @@ class EmailConfig(Config): # #require_transport_security: true - # Enable sending emails for messages that the user has missed - # - #enable_notifs: false - # notif_from defines the "From" address to use when sending emails. # It must be set if email sending is enabled. # @@ -344,6 +333,11 @@ class EmailConfig(Config): # #app_name: my_branded_matrix_server + # Uncomment the following to enable sending emails for messages that the user + # has missed. Disabled by default. + # + #enable_notifs: true + # Uncomment the following to disable automatic subscription to email # notifications for new users. Enabled by default. # From a301934f4610ffce490fbb925aaa898aac2829bc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 Feb 2020 15:46:41 +0000 Subject: [PATCH 176/382] Upsert room version when we join over federation (#6968) This is intended as a precursor to storing room versions when we receive an invite over federation, but has the happy side-effect of fixing #3374 at last. In short: change the store_room with try/except to a proper upsert which updates the right columns. --- changelog.d/6968.bugfix | 1 + synapse/handlers/federation.py | 22 ++++++++++++---------- synapse/storage/data_stores/main/room.py | 17 +++++++++++++++++ 3 files changed, 30 insertions(+), 10 deletions(-) create mode 100644 changelog.d/6968.bugfix diff --git a/changelog.d/6968.bugfix b/changelog.d/6968.bugfix new file mode 100644 index 0000000000..9965bfc0c3 --- /dev/null +++ b/changelog.d/6968.bugfix @@ -0,0 +1 @@ +Fix `duplicate key` error which was logged when rejoining a room over federation. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a689065f89..fb0a586eaa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1323,16 +1323,18 @@ class FederationHandler(BaseHandler): logger.debug("do_invite_join event: %s", event) - try: - await self.store.store_room( - room_id=room_id, - room_creator_user_id="", - is_public=False, - room_version=room_version_obj, - ) - except Exception: - # FIXME - pass + # if this is the first time we've joined this room, it's time to add + # a row to `rooms` with the correct room version. If there's already a + # row there, we should override it, since it may have been populated + # based on an invite request which lied about the room version. + # + # federation_client.send_join has already checked that the room + # version in the received create event is the same as room_version_obj, + # so we can rely on it now. + # + await self.store.upsert_room_on_join( + room_id=room_id, room_version=room_version_obj, + ) await self._persist_auth_tree( origin, auth_chain, state, event, room_version_obj diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 9a17e336ba..70137dfbe4 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -954,6 +954,23 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): self.config = hs.config + async def upsert_room_on_join(self, room_id: str, room_version: RoomVersion): + """Ensure that the room is stored in the table + + Called when we join a room over federation, and overwrites any room version + currently in the table. + """ + await self.db.simple_upsert( + desc="upsert_room_on_join", + table="rooms", + keyvalues={"room_id": room_id}, + values={"room_version": room_version.identifier}, + insertion_values={"is_public": False, "creator": ""}, + # rooms has a unique constraint on room_id, so no need to lock when doing an + # emulated upsert. + lock=False, + ) + @defer.inlineCallbacks def store_room( self, From 691659568fa57f6afd9918886efc72b9e7081d8f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 Feb 2020 17:20:45 +0000 Subject: [PATCH 177/382] Remove redundant store_room call (#6979) `_process_received_pdu` is only called by `on_receive_pdu`, which ignores any events for unknown rooms, so this is redundant. --- changelog.d/6979.misc | 1 + synapse/handlers/federation.py | 23 ----------------------- 2 files changed, 1 insertion(+), 23 deletions(-) create mode 100644 changelog.d/6979.misc diff --git a/changelog.d/6979.misc b/changelog.d/6979.misc new file mode 100644 index 0000000000..c57b398c2f --- /dev/null +++ b/changelog.d/6979.misc @@ -0,0 +1 @@ +Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fb0a586eaa..c2e6ee266d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -41,7 +41,6 @@ from synapse.api.errors import ( FederationDeniedError, FederationError, RequestSendFailed, - StoreError, SynapseError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions @@ -707,28 +706,6 @@ class FederationHandler(BaseHandler): except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) - room = await self.store.get_room(room_id) - - if not room: - try: - prev_state_ids = await context.get_prev_state_ids() - create_event = await self.store.get_event( - prev_state_ids[(EventTypes.Create, "")] - ) - - room_version_id = create_event.content.get( - "room_version", RoomVersions.V1.identifier - ) - - await self.store.store_room( - room_id=room_id, - room_creator_user_id="", - is_public=False, - room_version=KNOWN_ROOM_VERSIONS[room_version_id], - ) - except StoreError: - logger.exception("Failed to store room.") - if event.type == EventTypes.Member: if event.membership == Membership.JOIN: # Only fire user_joined_room if the user has acutally From 4aea0bd292bf9e33d166fcfd632159cfe35050dd Mon Sep 17 00:00:00 2001 From: Fridtjof Mund <2780577+fridtjof@users.noreply.github.com> Date: Tue, 25 Feb 2020 11:48:13 +0100 Subject: [PATCH 178/382] contrib/docker: remove quotes for POSTGRES_INITDB_ARGS (#6984) I made a mistake in https://github.com/matrix-org/synapse/pull/6921 - the quotes break the postgres container's startup script (or docker-compose), which makes initdb fail: https://github.com/matrix-org/synapse/pull/6921#issuecomment-590657154 Signed-off-by: Fridtjof Mund --- changelog.d/6984.docker | 1 + contrib/docker/docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6984.docker diff --git a/changelog.d/6984.docker b/changelog.d/6984.docker new file mode 100644 index 0000000000..84a55e1267 --- /dev/null +++ b/changelog.d/6984.docker @@ -0,0 +1 @@ +Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 5df29379c8..453b305053 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -58,7 +58,7 @@ services: - POSTGRES_PASSWORD=changeme # ensure the database gets created correctly # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database - - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C" + - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C volumes: # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data From bbf8886a05be6a929556d6f09a1b6ce053a3c403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Feb 2020 16:56:55 +0000 Subject: [PATCH 179/382] Merge worker apps into one. (#6964) --- changelog.d/6964.misc | 1 + synapse/app/appservice.py | 156 +--- synapse/app/client_reader.py | 190 +--- synapse/app/event_creator.py | 186 +--- synapse/app/federation_reader.py | 172 +--- synapse/app/federation_sender.py | 303 +------ synapse/app/frontend_proxy.py | 236 +---- synapse/app/generic_worker.py | 917 ++++++++++++++++++++ synapse/app/media_repository.py | 157 +--- synapse/app/pusher.py | 209 +---- synapse/app/synchrotron.py | 449 +--------- synapse/app/user_dir.py | 211 +---- synapse/replication/slave/storage/events.py | 20 + synapse/storage/data_stores/main/pusher.py | 156 ++-- tests/app/test_frontend_proxy.py | 12 +- tests/app/test_openid_listener.py | 4 +- 16 files changed, 1052 insertions(+), 2327 deletions(-) create mode 100644 changelog.d/6964.misc create mode 100644 synapse/app/generic_worker.py diff --git a/changelog.d/6964.misc b/changelog.d/6964.misc new file mode 100644 index 0000000000..ec5c004bbe --- /dev/null +++ b/changelog.d/6964.misc @@ -0,0 +1 @@ +Merge worker apps together. diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 2217d4a4fb..add43147b3 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -13,161 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.directory import DirectoryStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.server import HomeServer -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.appservice") - - -class AppserviceSlaveStore( - DirectoryStore, - SlavedEventStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, -): - pass - - -class AppserviceServer(HomeServer): - DATASTORE_CLASS = AppserviceSlaveStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse appservice now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ASReplicationHandler(self) - - -class ASReplicationHandler(ReplicationClientHandler): - def __init__(self, hs): - super(ASReplicationHandler, self).__init__(hs.get_datastore()) - self.appservice_handler = hs.get_application_service_handler() - - async def on_rdata(self, stream_name, token, rows): - await super(ASReplicationHandler, self).on_rdata(stream_name, token, rows) - - if stream_name == "events": - max_stream_id = self.store.get_room_max_stream_ordering() - run_in_background(self._notify_app_services, max_stream_id) - - @defer.inlineCallbacks - def _notify_app_services(self, room_stream_id): - try: - yield self.appservice_handler.notify_interested_services(room_stream_id) - except Exception: - logger.exception("Error notifying application services of event") - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse appservice", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.appservice" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - if config.notify_appservices: - sys.stderr.write( - "\nThe appservices must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``notify_appservices: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the pushers to start since they will be disabled in the main config - config.notify_appservices = True - - ps = AppserviceServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ps, config, use_worker_options=True) - - ps.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ps, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-appservice", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 7fa91a3b11..add43147b3 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -13,195 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.server import JsonResource -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.directory import DirectoryStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore -from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.profile import SlavedProfileStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v1.login import LoginRestServlet -from synapse.rest.client.v1.push_rule import PushRuleRestServlet -from synapse.rest.client.v1.room import ( - JoinedRoomMemberListRestServlet, - PublicRoomListRestServlet, - RoomEventContextServlet, - RoomMemberListRestServlet, - RoomMessageListRestServlet, - RoomStateRestServlet, -) -from synapse.rest.client.v1.voip import VoipRestServlet -from synapse.rest.client.v2_alpha import groups -from synapse.rest.client.v2_alpha.account import ThreepidRestServlet -from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet -from synapse.rest.client.v2_alpha.register import RegisterRestServlet -from synapse.rest.client.versions import VersionsRestServlet -from synapse.server import HomeServer -from synapse.storage.data_stores.main.monthly_active_users import ( - MonthlyActiveUsersWorkerStore, -) -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.client_reader") - - -class ClientReaderSlavedStore( - SlavedDeviceInboxStore, - SlavedDeviceStore, - SlavedReceiptsStore, - SlavedPushRuleStore, - SlavedGroupServerStore, - SlavedAccountDataStore, - SlavedEventStore, - SlavedKeyStore, - RoomStore, - DirectoryStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedTransactionStore, - SlavedProfileStore, - SlavedClientIpStore, - MonthlyActiveUsersWorkerStore, - BaseSlavedStore, -): - pass - - -class ClientReaderServer(HomeServer): - DATASTORE_CLASS = ClientReaderSlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "client": - resource = JsonResource(self, canonical_json=False) - - PublicRoomListRestServlet(self).register(resource) - RoomMemberListRestServlet(self).register(resource) - JoinedRoomMemberListRestServlet(self).register(resource) - RoomStateRestServlet(self).register(resource) - RoomEventContextServlet(self).register(resource) - RoomMessageListRestServlet(self).register(resource) - RegisterRestServlet(self).register(resource) - LoginRestServlet(self).register(resource) - ThreepidRestServlet(self).register(resource) - KeyQueryServlet(self).register(resource) - KeyChangesServlet(self).register(resource) - VoipRestServlet(self).register(resource) - PushRuleRestServlet(self).register(resource) - VersionsRestServlet(self).register(resource) - - groups.register_servlets(self, resource) - - resources.update({"/_matrix/client": resource}) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse client reader now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ReplicationClientHandler(self.get_datastore()) - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse client reader", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.client_reader" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = ClientReaderServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-client-reader", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 58e5b354f6..e9c098c4e7 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -13,191 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.server import JsonResource -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.directory import DirectoryStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.profile import SlavedProfileStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.pushers import SlavedPusherStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v1.profile import ( - ProfileAvatarURLRestServlet, - ProfileDisplaynameRestServlet, - ProfileRestServlet, -) -from synapse.rest.client.v1.room import ( - JoinRoomAliasServlet, - RoomMembershipRestServlet, - RoomSendEventRestServlet, - RoomStateEventRestServlet, -) -from synapse.server import HomeServer -from synapse.storage.data_stores.main.monthly_active_users import ( - MonthlyActiveUsersWorkerStore, -) -from synapse.storage.data_stores.main.user_directory import UserDirectoryStore -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.event_creator") - - -class EventCreatorSlavedStore( - # FIXME(#3714): We need to add UserDirectoryStore as we write directly - # rather than going via the correct worker. - UserDirectoryStore, - DirectoryStore, - SlavedTransactionStore, - SlavedProfileStore, - SlavedAccountDataStore, - SlavedPusherStore, - SlavedReceiptsStore, - SlavedPushRuleStore, - SlavedDeviceStore, - SlavedClientIpStore, - SlavedApplicationServiceStore, - SlavedEventStore, - SlavedRegistrationStore, - RoomStore, - MonthlyActiveUsersWorkerStore, - BaseSlavedStore, -): - pass - - -class EventCreatorServer(HomeServer): - DATASTORE_CLASS = EventCreatorSlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "client": - resource = JsonResource(self, canonical_json=False) - RoomSendEventRestServlet(self).register(resource) - RoomMembershipRestServlet(self).register(resource) - RoomStateEventRestServlet(self).register(resource) - JoinRoomAliasServlet(self).register(resource) - ProfileAvatarURLRestServlet(self).register(resource) - ProfileDisplaynameRestServlet(self).register(resource) - ProfileRestServlet(self).register(resource) - resources.update( - { - "/_matrix/client/r0": resource, - "/_matrix/client/unstable": resource, - "/_matrix/client/v2_alpha": resource, - "/_matrix/client/api/v1": resource, - } - ) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse event creator now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ReplicationClientHandler(self.get_datastore()) - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse event creator", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.event_creator" - - assert config.worker_replication_http_port is not None - - # This should only be done on the user directory worker or the master - config.update_user_directory = False - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = EventCreatorServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-event-creator", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index d055d11b23..add43147b3 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -13,177 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.federation.transport.server import TransportLayerServer -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.directory import DirectoryStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore -from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.profile import SlavedProfileStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.pushers import SlavedPusherStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.key.v2 import KeyApiV2Resource -from synapse.server import HomeServer -from synapse.storage.data_stores.main.monthly_active_users import ( - MonthlyActiveUsersWorkerStore, -) -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.federation_reader") - - -class FederationReaderSlavedStore( - SlavedAccountDataStore, - SlavedProfileStore, - SlavedApplicationServiceStore, - SlavedPusherStore, - SlavedPushRuleStore, - SlavedReceiptsStore, - SlavedEventStore, - SlavedKeyStore, - SlavedRegistrationStore, - SlavedGroupServerStore, - SlavedDeviceStore, - RoomStore, - DirectoryStore, - SlavedTransactionStore, - MonthlyActiveUsersWorkerStore, - BaseSlavedStore, -): - pass - - -class FederationReaderServer(HomeServer): - DATASTORE_CLASS = FederationReaderSlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "federation": - resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) - if name == "openid" and "federation" not in res["names"]: - # Only load the openid resource separately if federation resource - # is not specified since federation resource includes openid - # resource. - resources.update( - { - FEDERATION_PREFIX: TransportLayerServer( - self, servlet_groups=["openid"] - ) - } - ) - - if name in ["keys", "federation"]: - resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - reactor=self.get_reactor(), - ) - - logger.info("Synapse federation reader now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ReplicationClientHandler(self.get_datastore()) - - -def start(config_options): - try: - config = HomeServerConfig.load_config( - "Synapse federation reader", config_options - ) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.federation_reader" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = FederationReaderServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-federation-reader", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index b7fcf80ddc..add43147b3 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -13,308 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.federation import send_queue -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.replication.tcp.streams._base import ( - DeviceListsStream, - ReceiptsStream, - ToDeviceStream, -) -from synapse.server import HomeServer -from synapse.storage.database import Database -from synapse.types import ReadReceipt -from synapse.util.async_helpers import Linearizer -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.federation_sender") - - -class FederationSenderSlaveStore( - SlavedDeviceInboxStore, - SlavedTransactionStore, - SlavedReceiptsStore, - SlavedEventStore, - SlavedRegistrationStore, - SlavedDeviceStore, - SlavedPresenceStore, -): - def __init__(self, database: Database, db_conn, hs): - super(FederationSenderSlaveStore, self).__init__(database, db_conn, hs) - - # We pull out the current federation stream position now so that we - # always have a known value for the federation position in memory so - # that we don't have to bounce via a deferred once when we start the - # replication streams. - self.federation_out_pos_startup = self._get_federation_out_pos(db_conn) - - def _get_federation_out_pos(self, db_conn): - sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?" - sql = self.database_engine.convert_param_style(sql) - - txn = db_conn.cursor() - txn.execute(sql, ("federation",)) - rows = txn.fetchall() - txn.close() - - return rows[0][0] if rows else -1 - - -class FederationSenderServer(HomeServer): - DATASTORE_CLASS = FederationSenderSlaveStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse federation_sender now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return FederationSenderReplicationHandler(self) - - -class FederationSenderReplicationHandler(ReplicationClientHandler): - def __init__(self, hs): - super(FederationSenderReplicationHandler, self).__init__(hs.get_datastore()) - self.send_handler = FederationSenderHandler(hs, self) - - async def on_rdata(self, stream_name, token, rows): - await super(FederationSenderReplicationHandler, self).on_rdata( - stream_name, token, rows - ) - self.send_handler.process_replication_rows(stream_name, token, rows) - - def get_streams_to_replicate(self): - args = super( - FederationSenderReplicationHandler, self - ).get_streams_to_replicate() - args.update(self.send_handler.stream_positions()) - return args - - def on_remote_server_up(self, server: str): - """Called when get a new REMOTE_SERVER_UP command.""" - - # Let's wake up the transaction queue for the server in case we have - # pending stuff to send to it. - self.send_handler.wake_destination(server) - - -def start(config_options): - try: - config = HomeServerConfig.load_config( - "Synapse federation sender", config_options - ) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.federation_sender" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - if config.send_federation: - sys.stderr.write( - "\nThe send_federation must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``send_federation: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the pushers to start since they will be disabled in the main config - config.send_federation = True - - ss = FederationSenderServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-federation-sender", config) - - -class FederationSenderHandler(object): - """Processes the replication stream and forwards the appropriate entries - to the federation sender. - """ - - def __init__(self, hs: FederationSenderServer, replication_client): - self.store = hs.get_datastore() - self._is_mine_id = hs.is_mine_id - self.federation_sender = hs.get_federation_sender() - self.replication_client = replication_client - - self.federation_position = self.store.federation_out_pos_startup - self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - - self._last_ack = self.federation_position - - self._room_serials = {} - self._room_typing = {} - - def on_start(self): - # There may be some events that are persisted but haven't been sent, - # so send them now. - self.federation_sender.notify_new_events( - self.store.get_room_max_stream_ordering() - ) - - def wake_destination(self, server: str): - self.federation_sender.wake_destination(server) - - def stream_positions(self): - return {"federation": self.federation_position} - - def process_replication_rows(self, stream_name, token, rows): - # The federation stream contains things that we want to send out, e.g. - # presence, typing, etc. - if stream_name == "federation": - send_queue.process_rows_for_federation(self.federation_sender, rows) - run_in_background(self.update_token, token) - - # We also need to poke the federation sender when new events happen - elif stream_name == "events": - self.federation_sender.notify_new_events(token) - - # ... and when new receipts happen - elif stream_name == ReceiptsStream.NAME: - run_as_background_process( - "process_receipts_for_federation", self._on_new_receipts, rows - ) - - # ... as well as device updates and messages - elif stream_name == DeviceListsStream.NAME: - hosts = {row.destination for row in rows} - for host in hosts: - self.federation_sender.send_device_messages(host) - - elif stream_name == ToDeviceStream.NAME: - # The to_device stream includes stuff to be pushed to both local - # clients and remote servers, so we ignore entities that start with - # '@' (since they'll be local users rather than destinations). - hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) - - @defer.inlineCallbacks - def _on_new_receipts(self, rows): - """ - Args: - rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]): - new receipts to be processed - """ - for receipt in rows: - # we only want to send on receipts for our own users - if not self._is_mine_id(receipt.user_id): - continue - receipt_info = ReadReceipt( - receipt.room_id, - receipt.receipt_type, - receipt.user_id, - [receipt.event_id], - receipt.data, - ) - yield self.federation_sender.send_read_receipt(receipt_info) - - @defer.inlineCallbacks - def update_token(self, token): - try: - self.federation_position = token - - # We linearize here to ensure we don't have races updating the token - with (yield self._fed_position_linearizer.queue(None)): - if self._last_ack < self.federation_position: - yield self.store.update_federation_out_pos( - "federation", self.federation_position - ) - - # We ACK this token over replication so that the master can drop - # its in memory queues - self.replication_client.send_federation_ack( - self.federation_position - ) - self._last_ack = self.federation_position - except Exception: - logger.exception("Error updating federation stream position") - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 30e435eead..add43147b3 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -13,241 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.api.errors import HttpResponseException, SynapseError -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.server import JsonResource -from synapse.http.servlet import RestServlet, parse_json_object_from_request -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v2_alpha._base import client_patterns -from synapse.server import HomeServer -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.frontend_proxy") - - -class PresenceStatusStubServlet(RestServlet): - PATTERNS = client_patterns("/presence/(?P[^/]*)/status") - - def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__() - self.http_client = hs.get_simple_http_client() - self.auth = hs.get_auth() - self.main_uri = hs.config.worker_main_http_uri - - @defer.inlineCallbacks - def on_GET(self, request, user_id): - # Pass through the auth headers, if any, in case the access token - # is there. - auth_headers = request.requestHeaders.getRawHeaders("Authorization", []) - headers = {"Authorization": auth_headers} - - try: - result = yield self.http_client.get_json( - self.main_uri + request.uri.decode("ascii"), headers=headers - ) - except HttpResponseException as e: - raise e.to_synapse_error() - - return 200, result - - @defer.inlineCallbacks - def on_PUT(self, request, user_id): - yield self.auth.get_user_by_req(request) - return 200, {} - - -class KeyUploadServlet(RestServlet): - PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): server - """ - super(KeyUploadServlet, self).__init__() - self.auth = hs.get_auth() - self.store = hs.get_datastore() - self.http_client = hs.get_simple_http_client() - self.main_uri = hs.config.worker_main_http_uri - - @defer.inlineCallbacks - def on_POST(self, request, device_id): - requester = yield self.auth.get_user_by_req(request, allow_guest=True) - user_id = requester.user.to_string() - body = parse_json_object_from_request(request) - - if device_id is not None: - # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. - if requester.device_id is not None and device_id != requester.device_id: - logger.warning( - "Client uploading keys for a different device " - "(logged in as %s, uploading for %s)", - requester.device_id, - device_id, - ) - else: - device_id = requester.device_id - - if device_id is None: - raise SynapseError( - 400, "To upload keys, you must pass device_id when authenticating" - ) - - if body: - # They're actually trying to upload something, proxy to main synapse. - # Pass through the auth headers, if any, in case the access token - # is there. - auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", []) - headers = {"Authorization": auth_headers} - result = yield self.http_client.post_json_get_json( - self.main_uri + request.uri.decode("ascii"), body, headers=headers - ) - - return 200, result - else: - # Just interested in counts. - result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - return 200, {"one_time_key_counts": result} - - -class FrontendProxySlavedStore( - SlavedDeviceStore, - SlavedClientIpStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - BaseSlavedStore, -): - pass - - -class FrontendProxyServer(HomeServer): - DATASTORE_CLASS = FrontendProxySlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "client": - resource = JsonResource(self, canonical_json=False) - KeyUploadServlet(self).register(resource) - - # If presence is disabled, use the stub servlet that does - # not allow sending presence - if not self.config.use_presence: - PresenceStatusStubServlet(self).register(resource) - - resources.update( - { - "/_matrix/client/r0": resource, - "/_matrix/client/unstable": resource, - "/_matrix/client/v2_alpha": resource, - "/_matrix/client/api/v1": resource, - } - ) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - reactor=self.get_reactor(), - ) - - logger.info("Synapse client reader now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ReplicationClientHandler(self.get_datastore()) - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse frontend proxy", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.frontend_proxy" - - assert config.worker_main_http_uri is not None - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = FrontendProxyServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-frontend-proxy", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py new file mode 100644 index 0000000000..30efd39092 --- /dev/null +++ b/synapse/app/generic_worker.py @@ -0,0 +1,917 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import logging +import sys + +from twisted.internet import defer, reactor +from twisted.web.resource import NoResource + +import synapse +import synapse.events +from synapse.api.constants import EventTypes +from synapse.api.errors import HttpResponseException, SynapseError +from synapse.api.urls import ( + CLIENT_API_PREFIX, + FEDERATION_PREFIX, + LEGACY_MEDIA_PREFIX, + MEDIA_PREFIX, + SERVER_KEY_V2_PREFIX, +) +from synapse.app import _base +from synapse.config._base import ConfigError +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.federation import send_queue +from synapse.federation.transport.server import TransportLayerServer +from synapse.handlers.presence import PresenceHandler, get_interested_parties +from synapse.http.server import JsonResource +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseSite +from synapse.logging.context import LoggingContext, run_in_background +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ +from synapse.replication.slave.storage.account_data import SlavedAccountDataStore +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.client_ips import SlavedClientIpStore +from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.slave.storage.directory import DirectoryStore +from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.filtering import SlavedFilteringStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore +from synapse.replication.slave.storage.keys import SlavedKeyStore +from synapse.replication.slave.storage.presence import SlavedPresenceStore +from synapse.replication.slave.storage.profile import SlavedProfileStore +from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore +from synapse.replication.slave.storage.pushers import SlavedPusherStore +from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.slave.storage.room import RoomStore +from synapse.replication.slave.storage.transactions import SlavedTransactionStore +from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.replication.tcp.streams._base import ( + DeviceListsStream, + ReceiptsStream, + ToDeviceStream, +) +from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow +from synapse.rest.admin import register_servlets_for_media_repo +from synapse.rest.client.v1 import events +from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet +from synapse.rest.client.v1.login import LoginRestServlet +from synapse.rest.client.v1.profile import ( + ProfileAvatarURLRestServlet, + ProfileDisplaynameRestServlet, + ProfileRestServlet, +) +from synapse.rest.client.v1.push_rule import PushRuleRestServlet +from synapse.rest.client.v1.room import ( + JoinedRoomMemberListRestServlet, + JoinRoomAliasServlet, + PublicRoomListRestServlet, + RoomEventContextServlet, + RoomInitialSyncRestServlet, + RoomMemberListRestServlet, + RoomMembershipRestServlet, + RoomMessageListRestServlet, + RoomSendEventRestServlet, + RoomStateEventRestServlet, + RoomStateRestServlet, +) +from synapse.rest.client.v1.voip import VoipRestServlet +from synapse.rest.client.v2_alpha import groups, sync, user_directory +from synapse.rest.client.v2_alpha._base import client_patterns +from synapse.rest.client.v2_alpha.account import ThreepidRestServlet +from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet +from synapse.rest.client.v2_alpha.register import RegisterRestServlet +from synapse.rest.client.versions import VersionsRestServlet +from synapse.rest.key.v2 import KeyApiV2Resource +from synapse.server import HomeServer +from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore +from synapse.storage.data_stores.main.monthly_active_users import ( + MonthlyActiveUsersWorkerStore, +) +from synapse.storage.data_stores.main.presence import UserPresenceState +from synapse.storage.data_stores.main.user_directory import UserDirectoryStore +from synapse.types import ReadReceipt +from synapse.util.async_helpers import Linearizer +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.manhole import manhole +from synapse.util.stringutils import random_string +from synapse.util.versionstring import get_version_string + +logger = logging.getLogger("synapse.app.generic_worker") + + +class PresenceStatusStubServlet(RestServlet): + """If presence is disabled this servlet can be used to stub out setting + presence status, while proxying the getters to the master instance. + """ + + PATTERNS = client_patterns("/presence/(?P[^/]*)/status") + + def __init__(self, hs): + super(PresenceStatusStubServlet, self).__init__() + self.http_client = hs.get_simple_http_client() + self.auth = hs.get_auth() + self.main_uri = hs.config.worker_main_http_uri + + async def on_GET(self, request, user_id): + # Pass through the auth headers, if any, in case the access token + # is there. + auth_headers = request.requestHeaders.getRawHeaders("Authorization", []) + headers = {"Authorization": auth_headers} + + try: + result = await self.http_client.get_json( + self.main_uri + request.uri.decode("ascii"), headers=headers + ) + except HttpResponseException as e: + raise e.to_synapse_error() + + return 200, result + + async def on_PUT(self, request, user_id): + await self.auth.get_user_by_req(request) + return 200, {} + + +class KeyUploadServlet(RestServlet): + """An implementation of the `KeyUploadServlet` that responds to read only + requests, but otherwise proxies through to the master instance. + """ + + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(KeyUploadServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.http_client = hs.get_simple_http_client() + self.main_uri = hs.config.worker_main_http_uri + + async def on_POST(self, request, device_id): + requester = await self.auth.get_user_by_req(request, allow_guest=True) + user_id = requester.user.to_string() + body = parse_json_object_from_request(request) + + if device_id is not None: + # passing the device_id here is deprecated; however, we allow it + # for now for compatibility with older clients. + if requester.device_id is not None and device_id != requester.device_id: + logger.warning( + "Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, + device_id, + ) + else: + device_id = requester.device_id + + if device_id is None: + raise SynapseError( + 400, "To upload keys, you must pass device_id when authenticating" + ) + + if body: + # They're actually trying to upload something, proxy to main synapse. + # Pass through the auth headers, if any, in case the access token + # is there. + auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", []) + headers = {"Authorization": auth_headers} + result = await self.http_client.post_json_get_json( + self.main_uri + request.uri.decode("ascii"), body, headers=headers + ) + + return 200, result + else: + # Just interested in counts. + result = await self.store.count_e2e_one_time_keys(user_id, device_id) + return 200, {"one_time_key_counts": result} + + +UPDATE_SYNCING_USERS_MS = 10 * 1000 + + +class GenericWorkerPresence(object): + def __init__(self, hs): + self.hs = hs + self.is_mine_id = hs.is_mine_id + self.http_client = hs.get_simple_http_client() + self.store = hs.get_datastore() + self.user_to_num_current_syncs = {} + self.clock = hs.get_clock() + self.notifier = hs.get_notifier() + + active_presence = self.store.take_presence_startup_info() + self.user_to_current_state = {state.user_id: state for state in active_presence} + + # user_id -> last_sync_ms. Lists the users that have stopped syncing + # but we haven't notified the master of that yet + self.users_going_offline = {} + + self._send_stop_syncing_loop = self.clock.looping_call( + self.send_stop_syncing, UPDATE_SYNCING_USERS_MS + ) + + self.process_id = random_string(16) + logger.info("Presence process_id is %r", self.process_id) + + def send_user_sync(self, user_id, is_syncing, last_sync_ms): + if self.hs.config.use_presence: + self.hs.get_tcp_replication().send_user_sync( + user_id, is_syncing, last_sync_ms + ) + + def mark_as_coming_online(self, user_id): + """A user has started syncing. Send a UserSync to the master, unless they + had recently stopped syncing. + + Args: + user_id (str) + """ + going_offline = self.users_going_offline.pop(user_id, None) + if not going_offline: + # Safe to skip because we haven't yet told the master they were offline + self.send_user_sync(user_id, True, self.clock.time_msec()) + + def mark_as_going_offline(self, user_id): + """A user has stopped syncing. We wait before notifying the master as + its likely they'll come back soon. This allows us to avoid sending + a stopped syncing immediately followed by a started syncing notification + to the master + + Args: + user_id (str) + """ + self.users_going_offline[user_id] = self.clock.time_msec() + + def send_stop_syncing(self): + """Check if there are any users who have stopped syncing a while ago + and haven't come back yet. If there are poke the master about them. + """ + now = self.clock.time_msec() + for user_id, last_sync_ms in list(self.users_going_offline.items()): + if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: + self.users_going_offline.pop(user_id, None) + self.send_user_sync(user_id, False, last_sync_ms) + + def set_state(self, user, state, ignore_status_msg=False): + # TODO Hows this supposed to work? + return defer.succeed(None) + + get_states = __func__(PresenceHandler.get_states) + get_state = __func__(PresenceHandler.get_state) + current_state_for_users = __func__(PresenceHandler.current_state_for_users) + + def user_syncing(self, user_id, affect_presence): + if affect_presence: + curr_sync = self.user_to_num_current_syncs.get(user_id, 0) + self.user_to_num_current_syncs[user_id] = curr_sync + 1 + + # If we went from no in flight sync to some, notify replication + if self.user_to_num_current_syncs[user_id] == 1: + self.mark_as_coming_online(user_id) + + def _end(): + # We check that the user_id is in user_to_num_current_syncs because + # user_to_num_current_syncs may have been cleared if we are + # shutting down. + if affect_presence and user_id in self.user_to_num_current_syncs: + self.user_to_num_current_syncs[user_id] -= 1 + + # If we went from one in flight sync to non, notify replication + if self.user_to_num_current_syncs[user_id] == 0: + self.mark_as_going_offline(user_id) + + @contextlib.contextmanager + def _user_syncing(): + try: + yield + finally: + _end() + + return defer.succeed(_user_syncing()) + + @defer.inlineCallbacks + def notify_from_replication(self, states, stream_id): + parties = yield get_interested_parties(self.store, states) + room_ids_to_states, users_to_states = parties + + self.notifier.on_new_event( + "presence_key", + stream_id, + rooms=room_ids_to_states.keys(), + users=users_to_states.keys(), + ) + + @defer.inlineCallbacks + def process_replication_rows(self, token, rows): + states = [ + UserPresenceState( + row.user_id, + row.state, + row.last_active_ts, + row.last_federation_update_ts, + row.last_user_sync_ts, + row.status_msg, + row.currently_active, + ) + for row in rows + ] + + for state in states: + self.user_to_current_state[state.user_id] = state + + stream_id = token + yield self.notify_from_replication(states, stream_id) + + def get_currently_syncing_users(self): + if self.hs.config.use_presence: + return [ + user_id + for user_id, count in self.user_to_num_current_syncs.items() + if count > 0 + ] + else: + return set() + + +class GenericWorkerTyping(object): + def __init__(self, hs): + self._latest_room_serial = 0 + self._reset() + + def _reset(self): + """ + Reset the typing handler's data caches. + """ + # map room IDs to serial numbers + self._room_serials = {} + # map room IDs to sets of users currently typing + self._room_typing = {} + + def stream_positions(self): + # We must update this typing token from the response of the previous + # sync. In particular, the stream id may "reset" back to zero/a low + # value which we *must* use for the next replication request. + return {"typing": self._latest_room_serial} + + def process_replication_rows(self, token, rows): + if self._latest_room_serial > token: + # The master has gone backwards. To prevent inconsistent data, just + # clear everything. + self._reset() + + # Set the latest serial token to whatever the server gave us. + self._latest_room_serial = token + + for row in rows: + self._room_serials[row.room_id] = token + self._room_typing[row.room_id] = row.user_ids + + +class GenericWorkerSlavedStore( + # FIXME(#3714): We need to add UserDirectoryStore as we write directly + # rather than going via the correct worker. + UserDirectoryStore, + SlavedDeviceInboxStore, + SlavedDeviceStore, + SlavedReceiptsStore, + SlavedPushRuleStore, + SlavedGroupServerStore, + SlavedAccountDataStore, + SlavedPusherStore, + SlavedEventStore, + SlavedKeyStore, + RoomStore, + DirectoryStore, + SlavedApplicationServiceStore, + SlavedRegistrationStore, + SlavedTransactionStore, + SlavedProfileStore, + SlavedClientIpStore, + SlavedPresenceStore, + SlavedFilteringStore, + MonthlyActiveUsersWorkerStore, + MediaRepositoryStore, + BaseSlavedStore, +): + def __init__(self, database, db_conn, hs): + super(GenericWorkerSlavedStore, self).__init__(database, db_conn, hs) + + # We pull out the current federation stream position now so that we + # always have a known value for the federation position in memory so + # that we don't have to bounce via a deferred once when we start the + # replication streams. + self.federation_out_pos_startup = self._get_federation_out_pos(db_conn) + + def _get_federation_out_pos(self, db_conn): + sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?" + sql = self.database_engine.convert_param_style(sql) + + txn = db_conn.cursor() + txn.execute(sql, ("federation",)) + rows = txn.fetchall() + txn.close() + + return rows[0][0] if rows else -1 + + +class GenericWorkerServer(HomeServer): + DATASTORE_CLASS = GenericWorkerSlavedStore + + def _listen_http(self, listener_config): + port = listener_config["port"] + bind_addresses = listener_config["bind_addresses"] + site_tag = listener_config.get("tag", port) + resources = {} + for res in listener_config["resources"]: + for name in res["names"]: + if name == "metrics": + resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) + elif name == "client": + resource = JsonResource(self, canonical_json=False) + + PublicRoomListRestServlet(self).register(resource) + RoomMemberListRestServlet(self).register(resource) + JoinedRoomMemberListRestServlet(self).register(resource) + RoomStateRestServlet(self).register(resource) + RoomEventContextServlet(self).register(resource) + RoomMessageListRestServlet(self).register(resource) + RegisterRestServlet(self).register(resource) + LoginRestServlet(self).register(resource) + ThreepidRestServlet(self).register(resource) + KeyQueryServlet(self).register(resource) + KeyChangesServlet(self).register(resource) + VoipRestServlet(self).register(resource) + PushRuleRestServlet(self).register(resource) + VersionsRestServlet(self).register(resource) + RoomSendEventRestServlet(self).register(resource) + RoomMembershipRestServlet(self).register(resource) + RoomStateEventRestServlet(self).register(resource) + JoinRoomAliasServlet(self).register(resource) + ProfileAvatarURLRestServlet(self).register(resource) + ProfileDisplaynameRestServlet(self).register(resource) + ProfileRestServlet(self).register(resource) + KeyUploadServlet(self).register(resource) + + sync.register_servlets(self, resource) + events.register_servlets(self, resource) + InitialSyncRestServlet(self).register(resource) + RoomInitialSyncRestServlet(self).register(resource) + + user_directory.register_servlets(self, resource) + + # If presence is disabled, use the stub servlet that does + # not allow sending presence + if not self.config.use_presence: + PresenceStatusStubServlet(self).register(resource) + + groups.register_servlets(self, resource) + + resources.update({CLIENT_API_PREFIX: resource}) + elif name == "federation": + resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) + elif name == "media": + media_repo = self.get_media_repository_resource() + + # We need to serve the admin servlets for media on the + # worker. + admin_resource = JsonResource(self, canonical_json=False) + register_servlets_for_media_repo(self, admin_resource) + + resources.update( + { + MEDIA_PREFIX: media_repo, + LEGACY_MEDIA_PREFIX: media_repo, + "/_synapse/admin": admin_resource, + } + ) + + if name == "openid" and "federation" not in res["names"]: + # Only load the openid resource separately if federation resource + # is not specified since federation resource includes openid + # resource. + resources.update( + { + FEDERATION_PREFIX: TransportLayerServer( + self, servlet_groups=["openid"] + ) + } + ) + + if name in ["keys", "federation"]: + resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) + + root_resource = create_resource_tree(resources, NoResource()) + + _base.listen_tcp( + bind_addresses, + port, + SynapseSite( + "synapse.access.http.%s" % (site_tag,), + site_tag, + listener_config, + root_resource, + self.version_string, + ), + reactor=self.get_reactor(), + ) + + logger.info("Synapse worker now listening on port %d", port) + + def start_listening(self, listeners): + for listener in listeners: + if listener["type"] == "http": + self._listen_http(listener) + elif listener["type"] == "manhole": + _base.listen_tcp( + listener["bind_addresses"], + listener["port"], + manhole( + username="matrix", password="rabbithole", globals={"hs": self} + ), + ) + elif listener["type"] == "metrics": + if not self.get_config().enable_metrics: + logger.warning( + ( + "Metrics listener configured, but " + "enable_metrics is not True!" + ) + ) + else: + _base.listen_metrics(listener["bind_addresses"], listener["port"]) + else: + logger.warning("Unrecognized listener type: %s", listener["type"]) + + self.get_tcp_replication().start_replication(self) + + def remove_pusher(self, app_id, push_key, user_id): + self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) + + def build_tcp_replication(self): + return GenericWorkerReplicationHandler(self) + + def build_presence_handler(self): + return GenericWorkerPresence(self) + + def build_typing_handler(self): + return GenericWorkerTyping(self) + + +class GenericWorkerReplicationHandler(ReplicationClientHandler): + def __init__(self, hs): + super(GenericWorkerReplicationHandler, self).__init__(hs.get_datastore()) + + self.store = hs.get_datastore() + self.typing_handler = hs.get_typing_handler() + # NB this is a SynchrotronPresence, not a normal PresenceHandler + self.presence_handler = hs.get_presence_handler() + self.notifier = hs.get_notifier() + + self.notify_pushers = hs.config.start_pushers + self.pusher_pool = hs.get_pusherpool() + + if hs.config.send_federation: + self.send_handler = FederationSenderHandler(hs, self) + else: + self.send_handler = None + + async def on_rdata(self, stream_name, token, rows): + await super(GenericWorkerReplicationHandler, self).on_rdata( + stream_name, token, rows + ) + run_in_background(self.process_and_notify, stream_name, token, rows) + + def get_streams_to_replicate(self): + args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate() + args.update(self.typing_handler.stream_positions()) + if self.send_handler: + args.update(self.send_handler.stream_positions()) + return args + + def get_currently_syncing_users(self): + return self.presence_handler.get_currently_syncing_users() + + async def process_and_notify(self, stream_name, token, rows): + try: + if self.send_handler: + self.send_handler.process_replication_rows(stream_name, token, rows) + + if stream_name == "events": + # We shouldn't get multiple rows per token for events stream, so + # we don't need to optimise this for multiple rows. + for row in rows: + if row.type != EventsStreamEventRow.TypeId: + continue + assert isinstance(row, EventsStreamRow) + + event = await self.store.get_event( + row.data.event_id, allow_rejected=True + ) + if event.rejected_reason: + continue + + extra_users = () + if event.type == EventTypes.Member: + extra_users = (event.state_key,) + max_token = self.store.get_room_max_stream_ordering() + self.notifier.on_new_room_event( + event, token, max_token, extra_users + ) + + await self.pusher_pool.on_new_notifications(token, token) + elif stream_name == "push_rules": + self.notifier.on_new_event( + "push_rules_key", token, users=[row.user_id for row in rows] + ) + elif stream_name in ("account_data", "tag_account_data"): + self.notifier.on_new_event( + "account_data_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == "receipts": + self.notifier.on_new_event( + "receipt_key", token, rooms=[row.room_id for row in rows] + ) + await self.pusher_pool.on_new_receipts( + token, token, {row.room_id for row in rows} + ) + elif stream_name == "typing": + self.typing_handler.process_replication_rows(token, rows) + self.notifier.on_new_event( + "typing_key", token, rooms=[row.room_id for row in rows] + ) + elif stream_name == "to_device": + entities = [row.entity for row in rows if row.entity.startswith("@")] + if entities: + self.notifier.on_new_event("to_device_key", token, users=entities) + elif stream_name == "device_lists": + all_room_ids = set() + for row in rows: + room_ids = await self.store.get_rooms_for_user(row.user_id) + all_room_ids.update(room_ids) + self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) + elif stream_name == "presence": + await self.presence_handler.process_replication_rows(token, rows) + elif stream_name == "receipts": + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == "pushers": + for row in rows: + if row.deleted: + self.stop_pusher(row.user_id, row.app_id, row.pushkey) + else: + await self.start_pusher(row.user_id, row.app_id, row.pushkey) + except Exception: + logger.exception("Error processing replication") + + def stop_pusher(self, user_id, app_id, pushkey): + if not self.notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) + pusher = pushers_for_user.pop(key, None) + if pusher is None: + return + logger.info("Stopping pusher %r / %r", user_id, key) + pusher.on_stop() + + async def start_pusher(self, user_id, app_id, pushkey): + if not self.notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + logger.info("Starting pusher %r / %r", user_id, key) + return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) + + def on_remote_server_up(self, server: str): + """Called when get a new REMOTE_SERVER_UP command.""" + + # Let's wake up the transaction queue for the server in case we have + # pending stuff to send to it. + if self.send_handler: + self.send_handler.wake_destination(server) + + +class FederationSenderHandler(object): + """Processes the replication stream and forwards the appropriate entries + to the federation sender. + """ + + def __init__(self, hs: GenericWorkerServer, replication_client): + self.store = hs.get_datastore() + self._is_mine_id = hs.is_mine_id + self.federation_sender = hs.get_federation_sender() + self.replication_client = replication_client + + self.federation_position = self.store.federation_out_pos_startup + self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") + + self._last_ack = self.federation_position + + self._room_serials = {} + self._room_typing = {} + + def on_start(self): + # There may be some events that are persisted but haven't been sent, + # so send them now. + self.federation_sender.notify_new_events( + self.store.get_room_max_stream_ordering() + ) + + def wake_destination(self, server: str): + self.federation_sender.wake_destination(server) + + def stream_positions(self): + return {"federation": self.federation_position} + + def process_replication_rows(self, stream_name, token, rows): + # The federation stream contains things that we want to send out, e.g. + # presence, typing, etc. + if stream_name == "federation": + send_queue.process_rows_for_federation(self.federation_sender, rows) + run_in_background(self.update_token, token) + + # We also need to poke the federation sender when new events happen + elif stream_name == "events": + self.federation_sender.notify_new_events(token) + + # ... and when new receipts happen + elif stream_name == ReceiptsStream.NAME: + run_as_background_process( + "process_receipts_for_federation", self._on_new_receipts, rows + ) + + # ... as well as device updates and messages + elif stream_name == DeviceListsStream.NAME: + hosts = {row.destination for row in rows} + for host in hosts: + self.federation_sender.send_device_messages(host) + + elif stream_name == ToDeviceStream.NAME: + # The to_device stream includes stuff to be pushed to both local + # clients and remote servers, so we ignore entities that start with + # '@' (since they'll be local users rather than destinations). + hosts = {row.entity for row in rows if not row.entity.startswith("@")} + for host in hosts: + self.federation_sender.send_device_messages(host) + + async def _on_new_receipts(self, rows): + """ + Args: + rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]): + new receipts to be processed + """ + for receipt in rows: + # we only want to send on receipts for our own users + if not self._is_mine_id(receipt.user_id): + continue + receipt_info = ReadReceipt( + receipt.room_id, + receipt.receipt_type, + receipt.user_id, + [receipt.event_id], + receipt.data, + ) + await self.federation_sender.send_read_receipt(receipt_info) + + async def update_token(self, token): + try: + self.federation_position = token + + # We linearize here to ensure we don't have races updating the token + with (await self._fed_position_linearizer.queue(None)): + if self._last_ack < self.federation_position: + await self.store.update_federation_out_pos( + "federation", self.federation_position + ) + + # We ACK this token over replication so that the master can drop + # its in memory queues + self.replication_client.send_federation_ack( + self.federation_position + ) + self._last_ack = self.federation_position + except Exception: + logger.exception("Error updating federation stream position") + + +def start(config_options): + try: + config = HomeServerConfig.load_config("Synapse worker", config_options) + except ConfigError as e: + sys.stderr.write("\n" + str(e) + "\n") + sys.exit(1) + + # For backwards compatibility let any of the old app names. + assert config.worker_app in ( + "synapse.app.appservice", + "synapse.app.client_reader", + "synapse.app.event_creator", + "synapse.app.federation_reader", + "synapse.app.federation_sender", + "synapse.app.frontend_proxy", + "synapse.app.generic_worker", + "synapse.app.media_repository", + "synapse.app.pusher", + "synapse.app.synchrotron", + "synapse.app.user_dir", + ) + + if config.worker_app == "synapse.app.appservice": + if config.notify_appservices: + sys.stderr.write( + "\nThe appservices must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``notify_appservices: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the appservice to start since they will be disabled in the main config + config.notify_appservices = True + + if config.worker_app == "synapse.app.pusher": + if config.start_pushers: + sys.stderr.write( + "\nThe pushers must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``start_pushers: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the pushers to start since they will be disabled in the main config + config.start_pushers = True + + if config.worker_app == "synapse.app.user_dir": + if config.update_user_directory: + sys.stderr.write( + "\nThe update_user_directory must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``update_user_directory: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the pushers to start since they will be disabled in the main config + config.update_user_directory = True + + if config.worker_app == "synapse.app.federation_sender": + if config.send_federation: + sys.stderr.write( + "\nThe send_federation must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``send_federation: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the pushers to start since they will be disabled in the main config + config.send_federation = True + + synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + + ss = GenericWorkerServer( + config.server_name, + config=config, + version_string="Synapse/" + get_version_string(synapse), + ) + + setup_logging(ss, config, use_worker_options=True) + + ss.setup() + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) + + _base.start_worker_reactor("synapse-generic-worker", config) + + +if __name__ == "__main__": + with LoggingContext("main"): + start(sys.argv[1:]) diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index 5b5832214a..add43147b3 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -13,162 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.api.urls import LEGACY_MEDIA_PREFIX, MEDIA_PREFIX -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.server import JsonResource -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.admin import register_servlets_for_media_repo -from synapse.server import HomeServer -from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.media_repository") - - -class MediaRepositorySlavedStore( - RoomStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedClientIpStore, - SlavedTransactionStore, - BaseSlavedStore, - MediaRepositoryStore, -): - pass - - -class MediaRepositoryServer(HomeServer): - DATASTORE_CLASS = MediaRepositorySlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "media": - media_repo = self.get_media_repository_resource() - - # We need to serve the admin servlets for media on the - # worker. - admin_resource = JsonResource(self, canonical_json=False) - register_servlets_for_media_repo(self, admin_resource) - - resources.update( - { - MEDIA_PREFIX: media_repo, - LEGACY_MEDIA_PREFIX: media_repo, - "/_synapse/admin": admin_resource, - } - ) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse media repository now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return ReplicationClientHandler(self.get_datastore()) - - -def start(config_options): - try: - config = HomeServerConfig.load_config( - "Synapse media repository", config_options - ) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.media_repository" - - if config.enable_media_repo: - _base.quit_with_error( - "enable_media_repo must be disabled in the main synapse process\n" - "before the media repo can be run in a separate worker.\n" - "Please add ``enable_media_repo: false`` to the main config\n" - ) - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = MediaRepositoryServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-media-repository", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 84e9f8d5e2..add43147b3 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -13,213 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging + import sys -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import __func__ -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.pushers import SlavedPusherStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.server import HomeServer -from synapse.storage import DataStore -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.pusher") - - -class PusherSlaveStore( - SlavedEventStore, - SlavedPusherStore, - SlavedReceiptsStore, - SlavedAccountDataStore, - RoomStore, -): - update_pusher_last_stream_ordering_and_success = __func__( - DataStore.update_pusher_last_stream_ordering_and_success - ) - - update_pusher_failing_since = __func__(DataStore.update_pusher_failing_since) - - update_pusher_last_stream_ordering = __func__( - DataStore.update_pusher_last_stream_ordering - ) - - get_throttle_params_by_room = __func__(DataStore.get_throttle_params_by_room) - - set_throttle_params = __func__(DataStore.set_throttle_params) - - get_time_of_last_push_action_before = __func__( - DataStore.get_time_of_last_push_action_before - ) - - get_profile_displayname = __func__(DataStore.get_profile_displayname) - - -class PusherServer(HomeServer): - DATASTORE_CLASS = PusherSlaveStore - - def remove_pusher(self, app_id, push_key, user_id): - self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse pusher now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return PusherReplicationHandler(self) - - -class PusherReplicationHandler(ReplicationClientHandler): - def __init__(self, hs): - super(PusherReplicationHandler, self).__init__(hs.get_datastore()) - - self.pusher_pool = hs.get_pusherpool() - - async def on_rdata(self, stream_name, token, rows): - await super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows) - run_in_background(self.poke_pushers, stream_name, token, rows) - - @defer.inlineCallbacks - def poke_pushers(self, stream_name, token, rows): - try: - if stream_name == "pushers": - for row in rows: - if row.deleted: - yield self.stop_pusher(row.user_id, row.app_id, row.pushkey) - else: - yield self.start_pusher(row.user_id, row.app_id, row.pushkey) - elif stream_name == "events": - yield self.pusher_pool.on_new_notifications(token, token) - elif stream_name == "receipts": - yield self.pusher_pool.on_new_receipts( - token, token, {row.room_id for row in rows} - ) - except Exception: - logger.exception("Error poking pushers") - - def stop_pusher(self, user_id, app_id, pushkey): - key = "%s:%s" % (app_id, pushkey) - pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) - pusher = pushers_for_user.pop(key, None) - if pusher is None: - return - logger.info("Stopping pusher %r / %r", user_id, key) - pusher.on_stop() - - def start_pusher(self, user_id, app_id, pushkey): - key = "%s:%s" % (app_id, pushkey) - logger.info("Starting pusher %r / %r", user_id, key) - return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse pusher", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.pusher" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - if config.start_pushers: - sys.stderr.write( - "\nThe pushers must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``start_pushers: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the pushers to start since they will be disabled in the main config - config.start_pushers = True - - ps = PusherServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ps, config, use_worker_options=True) - - ps.setup() - - def start(): - _base.start(ps, config.worker_listeners) - ps.get_pusherpool().start() - - reactor.addSystemEventTrigger("before", "startup", start) - - _base.start_worker_reactor("synapse-pusher", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): - ps = start(sys.argv[1:]) + start(sys.argv[1:]) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 8982c0676e..add43147b3 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -13,454 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib -import logging + import sys -from six import iteritems - -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse.api.constants import EventTypes -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.handlers.presence import PresenceHandler, get_interested_parties -from synapse.http.server import JsonResource -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore -from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore -from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow -from synapse.rest.client.v1 import events -from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet -from synapse.rest.client.v1.room import RoomInitialSyncRestServlet -from synapse.rest.client.v2_alpha import sync -from synapse.server import HomeServer -from synapse.storage.data_stores.main.monthly_active_users import ( - MonthlyActiveUsersWorkerStore, -) -from synapse.storage.data_stores.main.presence import UserPresenceState -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.stringutils import random_string -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.synchrotron") - - -class SynchrotronSlavedStore( - SlavedReceiptsStore, - SlavedAccountDataStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedFilteringStore, - SlavedPresenceStore, - SlavedGroupServerStore, - SlavedDeviceInboxStore, - SlavedDeviceStore, - SlavedPushRuleStore, - SlavedEventStore, - SlavedClientIpStore, - RoomStore, - MonthlyActiveUsersWorkerStore, - BaseSlavedStore, -): - pass - - -UPDATE_SYNCING_USERS_MS = 10 * 1000 - - -class SynchrotronPresence(object): - def __init__(self, hs): - self.hs = hs - self.is_mine_id = hs.is_mine_id - self.http_client = hs.get_simple_http_client() - self.store = hs.get_datastore() - self.user_to_num_current_syncs = {} - self.clock = hs.get_clock() - self.notifier = hs.get_notifier() - - active_presence = self.store.take_presence_startup_info() - self.user_to_current_state = {state.user_id: state for state in active_presence} - - # user_id -> last_sync_ms. Lists the users that have stopped syncing - # but we haven't notified the master of that yet - self.users_going_offline = {} - - self._send_stop_syncing_loop = self.clock.looping_call( - self.send_stop_syncing, 10 * 1000 - ) - - self.process_id = random_string(16) - logger.info("Presence process_id is %r", self.process_id) - - def send_user_sync(self, user_id, is_syncing, last_sync_ms): - if self.hs.config.use_presence: - self.hs.get_tcp_replication().send_user_sync( - user_id, is_syncing, last_sync_ms - ) - - def mark_as_coming_online(self, user_id): - """A user has started syncing. Send a UserSync to the master, unless they - had recently stopped syncing. - - Args: - user_id (str) - """ - going_offline = self.users_going_offline.pop(user_id, None) - if not going_offline: - # Safe to skip because we haven't yet told the master they were offline - self.send_user_sync(user_id, True, self.clock.time_msec()) - - def mark_as_going_offline(self, user_id): - """A user has stopped syncing. We wait before notifying the master as - its likely they'll come back soon. This allows us to avoid sending - a stopped syncing immediately followed by a started syncing notification - to the master - - Args: - user_id (str) - """ - self.users_going_offline[user_id] = self.clock.time_msec() - - def send_stop_syncing(self): - """Check if there are any users who have stopped syncing a while ago - and haven't come back yet. If there are poke the master about them. - """ - now = self.clock.time_msec() - for user_id, last_sync_ms in list(self.users_going_offline.items()): - if now - last_sync_ms > 10 * 1000: - self.users_going_offline.pop(user_id, None) - self.send_user_sync(user_id, False, last_sync_ms) - - def set_state(self, user, state, ignore_status_msg=False): - # TODO Hows this supposed to work? - return defer.succeed(None) - - get_states = __func__(PresenceHandler.get_states) - get_state = __func__(PresenceHandler.get_state) - current_state_for_users = __func__(PresenceHandler.current_state_for_users) - - def user_syncing(self, user_id, affect_presence): - if affect_presence: - curr_sync = self.user_to_num_current_syncs.get(user_id, 0) - self.user_to_num_current_syncs[user_id] = curr_sync + 1 - - # If we went from no in flight sync to some, notify replication - if self.user_to_num_current_syncs[user_id] == 1: - self.mark_as_coming_online(user_id) - - def _end(): - # We check that the user_id is in user_to_num_current_syncs because - # user_to_num_current_syncs may have been cleared if we are - # shutting down. - if affect_presence and user_id in self.user_to_num_current_syncs: - self.user_to_num_current_syncs[user_id] -= 1 - - # If we went from one in flight sync to non, notify replication - if self.user_to_num_current_syncs[user_id] == 0: - self.mark_as_going_offline(user_id) - - @contextlib.contextmanager - def _user_syncing(): - try: - yield - finally: - _end() - - return defer.succeed(_user_syncing()) - - @defer.inlineCallbacks - def notify_from_replication(self, states, stream_id): - parties = yield get_interested_parties(self.store, states) - room_ids_to_states, users_to_states = parties - - self.notifier.on_new_event( - "presence_key", - stream_id, - rooms=room_ids_to_states.keys(), - users=users_to_states.keys(), - ) - - @defer.inlineCallbacks - def process_replication_rows(self, token, rows): - states = [ - UserPresenceState( - row.user_id, - row.state, - row.last_active_ts, - row.last_federation_update_ts, - row.last_user_sync_ts, - row.status_msg, - row.currently_active, - ) - for row in rows - ] - - for state in states: - self.user_to_current_state[state.user_id] = state - - stream_id = token - yield self.notify_from_replication(states, stream_id) - - def get_currently_syncing_users(self): - if self.hs.config.use_presence: - return [ - user_id - for user_id, count in iteritems(self.user_to_num_current_syncs) - if count > 0 - ] - else: - return set() - - -class SynchrotronTyping(object): - def __init__(self, hs): - self._latest_room_serial = 0 - self._reset() - - def _reset(self): - """ - Reset the typing handler's data caches. - """ - # map room IDs to serial numbers - self._room_serials = {} - # map room IDs to sets of users currently typing - self._room_typing = {} - - def stream_positions(self): - # We must update this typing token from the response of the previous - # sync. In particular, the stream id may "reset" back to zero/a low - # value which we *must* use for the next replication request. - return {"typing": self._latest_room_serial} - - def process_replication_rows(self, token, rows): - if self._latest_room_serial > token: - # The master has gone backwards. To prevent inconsistent data, just - # clear everything. - self._reset() - - # Set the latest serial token to whatever the server gave us. - self._latest_room_serial = token - - for row in rows: - self._room_serials[row.room_id] = token - self._room_typing[row.room_id] = row.user_ids - - -class SynchrotronApplicationService(object): - def notify_interested_services(self, event): - pass - - -class SynchrotronServer(HomeServer): - DATASTORE_CLASS = SynchrotronSlavedStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "client": - resource = JsonResource(self, canonical_json=False) - sync.register_servlets(self, resource) - events.register_servlets(self, resource) - InitialSyncRestServlet(self).register(resource) - RoomInitialSyncRestServlet(self).register(resource) - resources.update( - { - "/_matrix/client/r0": resource, - "/_matrix/client/unstable": resource, - "/_matrix/client/v2_alpha": resource, - "/_matrix/client/api/v1": resource, - } - ) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse synchrotron now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return SyncReplicationHandler(self) - - def build_presence_handler(self): - return SynchrotronPresence(self) - - def build_typing_handler(self): - return SynchrotronTyping(self) - - -class SyncReplicationHandler(ReplicationClientHandler): - def __init__(self, hs): - super(SyncReplicationHandler, self).__init__(hs.get_datastore()) - - self.store = hs.get_datastore() - self.typing_handler = hs.get_typing_handler() - # NB this is a SynchrotronPresence, not a normal PresenceHandler - self.presence_handler = hs.get_presence_handler() - self.notifier = hs.get_notifier() - - async def on_rdata(self, stream_name, token, rows): - await super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows) - run_in_background(self.process_and_notify, stream_name, token, rows) - - def get_streams_to_replicate(self): - args = super(SyncReplicationHandler, self).get_streams_to_replicate() - args.update(self.typing_handler.stream_positions()) - return args - - def get_currently_syncing_users(self): - return self.presence_handler.get_currently_syncing_users() - - async def process_and_notify(self, stream_name, token, rows): - try: - if stream_name == "events": - # We shouldn't get multiple rows per token for events stream, so - # we don't need to optimise this for multiple rows. - for row in rows: - if row.type != EventsStreamEventRow.TypeId: - continue - assert isinstance(row, EventsStreamRow) - - event = await self.store.get_event( - row.data.event_id, allow_rejected=True - ) - if event.rejected_reason: - continue - - extra_users = () - if event.type == EventTypes.Member: - extra_users = (event.state_key,) - max_token = self.store.get_room_max_stream_ordering() - self.notifier.on_new_room_event( - event, token, max_token, extra_users - ) - elif stream_name == "push_rules": - self.notifier.on_new_event( - "push_rules_key", token, users=[row.user_id for row in rows] - ) - elif stream_name in ("account_data", "tag_account_data"): - self.notifier.on_new_event( - "account_data_key", token, users=[row.user_id for row in rows] - ) - elif stream_name == "receipts": - self.notifier.on_new_event( - "receipt_key", token, rooms=[row.room_id for row in rows] - ) - elif stream_name == "typing": - self.typing_handler.process_replication_rows(token, rows) - self.notifier.on_new_event( - "typing_key", token, rooms=[row.room_id for row in rows] - ) - elif stream_name == "to_device": - entities = [row.entity for row in rows if row.entity.startswith("@")] - if entities: - self.notifier.on_new_event("to_device_key", token, users=entities) - elif stream_name == "device_lists": - all_room_ids = set() - for row in rows: - room_ids = await self.store.get_rooms_for_user(row.user_id) - all_room_ids.update(room_ids) - self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) - elif stream_name == "presence": - await self.presence_handler.process_replication_rows(token, rows) - elif stream_name == "receipts": - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows] - ) - except Exception: - logger.exception("Error processing replication") - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse synchrotron", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.synchrotron" - - synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts - - ss = SynchrotronServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - application_service_handler=SynchrotronApplicationService(), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-synchrotron", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index ba536d6f04..503d44f687 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -14,217 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging import sys -from twisted.internet import defer, reactor -from twisted.web.resource import NoResource - -import synapse -from synapse import events -from synapse.app import _base -from synapse.config._base import ConfigError -from synapse.config.homeserver import HomeServerConfig -from synapse.config.logger import setup_logging -from synapse.http.server import JsonResource -from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.client_ips import SlavedClientIpStore -from synapse.replication.slave.storage.events import SlavedEventStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore -from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.replication.tcp.streams.events import ( - EventsStream, - EventsStreamCurrentStateRow, -) -from synapse.rest.client.v2_alpha import user_directory -from synapse.server import HomeServer -from synapse.storage.data_stores.main.user_directory import UserDirectoryStore -from synapse.storage.database import Database -from synapse.util.caches.stream_change_cache import StreamChangeCache -from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.manhole import manhole -from synapse.util.versionstring import get_version_string - -logger = logging.getLogger("synapse.app.user_dir") - - -class UserDirectorySlaveStore( - SlavedEventStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedClientIpStore, - UserDirectoryStore, - BaseSlavedStore, -): - def __init__(self, database: Database, db_conn, hs): - super(UserDirectorySlaveStore, self).__init__(database, db_conn, hs) - - events_max = self._stream_id_gen.get_current_token() - curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict( - db_conn, - "current_state_delta_stream", - entity_column="room_id", - stream_column="stream_id", - max_value=events_max, # As we share the stream id with events token - limit=1000, - ) - self._curr_state_delta_stream_cache = StreamChangeCache( - "_curr_state_delta_stream_cache", - min_curr_state_delta_id, - prefilled_cache=curr_state_delta_prefill, - ) - - def stream_positions(self): - result = super(UserDirectorySlaveStore, self).stream_positions() - return result - - def process_replication_rows(self, stream_name, token, rows): - if stream_name == EventsStream.NAME: - self._stream_id_gen.advance(token) - for row in rows: - if row.type != EventsStreamCurrentStateRow.TypeId: - continue - self._curr_state_delta_stream_cache.entity_has_changed( - row.data.room_id, token - ) - return super(UserDirectorySlaveStore, self).process_replication_rows( - stream_name, token, rows - ) - - -class UserDirectoryServer(HomeServer): - DATASTORE_CLASS = UserDirectorySlaveStore - - def _listen_http(self, listener_config): - port = listener_config["port"] - bind_addresses = listener_config["bind_addresses"] - site_tag = listener_config.get("tag", port) - resources = {} - for res in listener_config["resources"]: - for name in res["names"]: - if name == "metrics": - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) - elif name == "client": - resource = JsonResource(self, canonical_json=False) - user_directory.register_servlets(self, resource) - resources.update( - { - "/_matrix/client/r0": resource, - "/_matrix/client/unstable": resource, - "/_matrix/client/v2_alpha": resource, - "/_matrix/client/api/v1": resource, - } - ) - - root_resource = create_resource_tree(resources, NoResource()) - - _base.listen_tcp( - bind_addresses, - port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), - ) - - logger.info("Synapse user_dir now listening on port %d", port) - - def start_listening(self, listeners): - for listener in listeners: - if listener["type"] == "http": - self._listen_http(listener) - elif listener["type"] == "manhole": - _base.listen_tcp( - listener["bind_addresses"], - listener["port"], - manhole( - username="matrix", password="rabbithole", globals={"hs": self} - ), - ) - elif listener["type"] == "metrics": - if not self.get_config().enable_metrics: - logger.warning( - ( - "Metrics listener configured, but " - "enable_metrics is not True!" - ) - ) - else: - _base.listen_metrics(listener["bind_addresses"], listener["port"]) - else: - logger.warning("Unrecognized listener type: %s", listener["type"]) - - self.get_tcp_replication().start_replication(self) - - def build_tcp_replication(self): - return UserDirectoryReplicationHandler(self) - - -class UserDirectoryReplicationHandler(ReplicationClientHandler): - def __init__(self, hs): - super(UserDirectoryReplicationHandler, self).__init__(hs.get_datastore()) - self.user_directory = hs.get_user_directory_handler() - - async def on_rdata(self, stream_name, token, rows): - await super(UserDirectoryReplicationHandler, self).on_rdata( - stream_name, token, rows - ) - if stream_name == EventsStream.NAME: - run_in_background(self._notify_directory) - - @defer.inlineCallbacks - def _notify_directory(self): - try: - yield self.user_directory.notify_new_event() - except Exception: - logger.exception("Error notifiying user directory of state update") - - -def start(config_options): - try: - config = HomeServerConfig.load_config("Synapse user directory", config_options) - except ConfigError as e: - sys.stderr.write("\n" + str(e) + "\n") - sys.exit(1) - - assert config.worker_app == "synapse.app.user_dir" - - events.USE_FROZEN_DICTS = config.use_frozen_dicts - - if config.update_user_directory: - sys.stderr.write( - "\nThe update_user_directory must be disabled in the main synapse process" - "\nbefore they can be run in a separate worker." - "\nPlease add ``update_user_directory: false`` to the main config" - "\n" - ) - sys.exit(1) - - # Force the pushers to start since they will be disabled in the main config - config.update_user_directory = True - - ss = UserDirectoryServer( - config.server_name, - config=config, - version_string="Synapse/" + get_version_string(synapse), - ) - - setup_logging(ss, config, use_worker_options=True) - - ss.setup() - reactor.addSystemEventTrigger( - "before", "startup", _base.start, ss, config.worker_listeners - ) - - _base.start_worker_reactor("synapse-user-dir", config) - +from synapse.app.generic_worker import start +from synapse.util.logcontext import LoggingContext if __name__ == "__main__": with LoggingContext("main"): diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 3aa6cb8b96..e73342c657 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -32,6 +32,7 @@ from synapse.storage.data_stores.main.state import StateGroupWorkerStore from synapse.storage.data_stores.main.stream import StreamWorkerStore from synapse.storage.data_stores.main.user_erasure_store import UserErasureWorkerStore from synapse.storage.database import Database +from synapse.util.caches.stream_change_cache import StreamChangeCache from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker @@ -68,6 +69,21 @@ class SlavedEventStore( super(SlavedEventStore, self).__init__(database, db_conn, hs) + events_max = self._stream_id_gen.get_current_token() + curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict( + db_conn, + "current_state_delta_stream", + entity_column="room_id", + stream_column="stream_id", + max_value=events_max, # As we share the stream id with events token + limit=1000, + ) + self._curr_state_delta_stream_cache = StreamChangeCache( + "_curr_state_delta_stream_cache", + min_curr_state_delta_id, + prefilled_cache=curr_state_delta_prefill, + ) + # Cached functions can't be accessed through a class instance so we need # to reach inside the __dict__ to extract them. @@ -120,6 +136,10 @@ class SlavedEventStore( backfilled=False, ) elif row.type == EventsStreamCurrentStateRow.TypeId: + self._curr_state_delta_stream_cache.entity_has_changed( + row.data.room_id, token + ) + if data.type == EventTypes.Member: self.get_rooms_for_user_with_stream_ordering.invalidate( (data.state_key,) diff --git a/synapse/storage/data_stores/main/pusher.py b/synapse/storage/data_stores/main/pusher.py index 6b03233262..547b9d69cb 100644 --- a/synapse/storage/data_stores/main/pusher.py +++ b/synapse/storage/data_stores/main/pusher.py @@ -197,6 +197,84 @@ class PusherWorkerStore(SQLBaseStore): return result + @defer.inlineCallbacks + def update_pusher_last_stream_ordering( + self, app_id, pushkey, user_id, last_stream_ordering + ): + yield self.db.simple_update_one( + "pushers", + {"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, + {"last_stream_ordering": last_stream_ordering}, + desc="update_pusher_last_stream_ordering", + ) + + @defer.inlineCallbacks + def update_pusher_last_stream_ordering_and_success( + self, app_id, pushkey, user_id, last_stream_ordering, last_success + ): + """Update the last stream ordering position we've processed up to for + the given pusher. + + Args: + app_id (str) + pushkey (str) + last_stream_ordering (int) + last_success (int) + + Returns: + Deferred[bool]: True if the pusher still exists; False if it has been deleted. + """ + updated = yield self.db.simple_update( + table="pushers", + keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, + updatevalues={ + "last_stream_ordering": last_stream_ordering, + "last_success": last_success, + }, + desc="update_pusher_last_stream_ordering_and_success", + ) + + return bool(updated) + + @defer.inlineCallbacks + def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since): + yield self.db.simple_update( + table="pushers", + keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, + updatevalues={"failing_since": failing_since}, + desc="update_pusher_failing_since", + ) + + @defer.inlineCallbacks + def get_throttle_params_by_room(self, pusher_id): + res = yield self.db.simple_select_list( + "pusher_throttle", + {"pusher": pusher_id}, + ["room_id", "last_sent_ts", "throttle_ms"], + desc="get_throttle_params_by_room", + ) + + params_by_room = {} + for row in res: + params_by_room[row["room_id"]] = { + "last_sent_ts": row["last_sent_ts"], + "throttle_ms": row["throttle_ms"], + } + + return params_by_room + + @defer.inlineCallbacks + def set_throttle_params(self, pusher_id, room_id, params): + # no need to lock because `pusher_throttle` has a primary key on + # (pusher, room_id) so simple_upsert will retry + yield self.db.simple_upsert( + "pusher_throttle", + {"pusher": pusher_id, "room_id": room_id}, + params, + desc="set_throttle_params", + lock=False, + ) + class PusherStore(PusherWorkerStore): def get_pushers_stream_token(self): @@ -282,81 +360,3 @@ class PusherStore(PusherWorkerStore): with self._pushers_id_gen.get_next() as stream_id: yield self.db.runInteraction("delete_pusher", delete_pusher_txn, stream_id) - - @defer.inlineCallbacks - def update_pusher_last_stream_ordering( - self, app_id, pushkey, user_id, last_stream_ordering - ): - yield self.db.simple_update_one( - "pushers", - {"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, - {"last_stream_ordering": last_stream_ordering}, - desc="update_pusher_last_stream_ordering", - ) - - @defer.inlineCallbacks - def update_pusher_last_stream_ordering_and_success( - self, app_id, pushkey, user_id, last_stream_ordering, last_success - ): - """Update the last stream ordering position we've processed up to for - the given pusher. - - Args: - app_id (str) - pushkey (str) - last_stream_ordering (int) - last_success (int) - - Returns: - Deferred[bool]: True if the pusher still exists; False if it has been deleted. - """ - updated = yield self.db.simple_update( - table="pushers", - keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, - updatevalues={ - "last_stream_ordering": last_stream_ordering, - "last_success": last_success, - }, - desc="update_pusher_last_stream_ordering_and_success", - ) - - return bool(updated) - - @defer.inlineCallbacks - def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since): - yield self.db.simple_update( - table="pushers", - keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, - updatevalues={"failing_since": failing_since}, - desc="update_pusher_failing_since", - ) - - @defer.inlineCallbacks - def get_throttle_params_by_room(self, pusher_id): - res = yield self.db.simple_select_list( - "pusher_throttle", - {"pusher": pusher_id}, - ["room_id", "last_sent_ts", "throttle_ms"], - desc="get_throttle_params_by_room", - ) - - params_by_room = {} - for row in res: - params_by_room[row["room_id"]] = { - "last_sent_ts": row["last_sent_ts"], - "throttle_ms": row["throttle_ms"], - } - - return params_by_room - - @defer.inlineCallbacks - def set_throttle_params(self, pusher_id, room_id, params): - # no need to lock because `pusher_throttle` has a primary key on - # (pusher, room_id) so simple_upsert will retry - yield self.db.simple_upsert( - "pusher_throttle", - {"pusher": pusher_id, "room_id": room_id}, - params, - desc="set_throttle_params", - lock=False, - ) diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 8bdbc608a9..160e55aca9 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.app.frontend_proxy import FrontendProxyServer +from synapse.app.generic_worker import GenericWorkerServer from tests.unittest import HomeserverTestCase @@ -22,7 +22,7 @@ class FrontendProxyTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=FrontendProxyServer + http_client=None, homeserverToUse=GenericWorkerServer ) return hs @@ -46,9 +46,7 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - self.resource = ( - site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] - ) + self.resource = site.resource.children[b"_matrix"].children[b"client"] request, channel = self.make_request("PUT", "presence/a/status") self.render(request) @@ -76,9 +74,7 @@ class FrontendProxyTests(HomeserverTestCase): # Grab the resource from the site that was told to listen self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] - self.resource = ( - site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] - ) + self.resource = site.resource.children[b"_matrix"].children[b"client"] request, channel = self.make_request("PUT", "presence/a/status") self.render(request) diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 48792d1480..1fe048048b 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -16,7 +16,7 @@ from mock import Mock, patch from parameterized import parameterized -from synapse.app.federation_reader import FederationReaderServer +from synapse.app.generic_worker import GenericWorkerServer from synapse.app.homeserver import SynapseHomeServer from tests.unittest import HomeserverTestCase @@ -25,7 +25,7 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=FederationReaderServer + http_client=None, homeserverToUse=GenericWorkerServer ) return hs From e66f099ca952ef47944c7bba3fd942f98245d39f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 25 Feb 2020 17:46:00 +0000 Subject: [PATCH 180/382] Sanity-check database before running upgrades (#6982) Some of the database deltas rely on `config.server_name` being set correctly, so we should check that it is before running the deltas. Fixes #6870. --- changelog.d/6982.feature | 1 + synapse/storage/data_stores/main/__init__.py | 34 +++++++++++--------- synapse/storage/prepare_database.py | 15 +++++++-- 3 files changed, 33 insertions(+), 17 deletions(-) create mode 100644 changelog.d/6982.feature diff --git a/changelog.d/6982.feature b/changelog.d/6982.feature new file mode 100644 index 0000000000..934cc5141a --- /dev/null +++ b/changelog.d/6982.feature @@ -0,0 +1 @@ +Check that server_name is correctly set before running database updates. diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py index 2700cca822..acca079f23 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py @@ -20,6 +20,7 @@ import logging import time from synapse.api.constants import PresenceState +from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( @@ -117,16 +118,6 @@ class DataStore( self._clock = hs.get_clock() self.database_engine = database.engine - all_users_native = are_all_users_on_domain( - db_conn.cursor(), database.engine, hs.hostname - ) - if not all_users_native: - raise Exception( - "Found users in database not native to %s!\n" - "You cannot changed a synapse server_name after it's been configured" - % (hs.hostname,) - ) - self._stream_id_gen = StreamIdGenerator( db_conn, "events", @@ -567,13 +558,26 @@ class DataStore( ) -def are_all_users_on_domain(txn, database_engine, domain): +def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig): + """Called before upgrading an existing database to check that it is broadly sane + compared with the configuration. + """ + domain = config.server_name + sql = database_engine.convert_param_style( "SELECT COUNT(*) FROM users WHERE name NOT LIKE ?" ) pat = "%:" + domain - txn.execute(sql, (pat,)) - num_not_matching = txn.fetchall()[0][0] + cur.execute(sql, (pat,)) + num_not_matching = cur.fetchall()[0][0] if num_not_matching == 0: - return True - return False + return + + raise Exception( + "Found users in database not native to %s!\n" + "You cannot changed a synapse server_name after it's been configured" + % (domain,) + ) + + +__all__ = ["DataStore", "check_database_before_upgrade"] diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index fc69c32a0a..6cb7d4b922 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -278,13 +278,17 @@ def _upgrade_existing_database( the current_version wasn't generated by applying those delta files. database_engine (DatabaseEngine) config (synapse.config.homeserver.HomeServerConfig|None): - application config, or None if we are connecting to an existing - database which we expect to be configured already + None if we are initialising a blank database, otherwise the application + config data_stores (list[str]): The names of the data stores to instantiate on the given database. is_empty (bool): Is this a blank database? I.e. do we need to run the upgrade portions of the delta scripts. """ + if is_empty: + assert not applied_delta_files + else: + assert config if current_version > SCHEMA_VERSION: raise ValueError( @@ -292,6 +296,13 @@ def _upgrade_existing_database( + "new for the server to understand" ) + # some of the deltas assume that config.server_name is set correctly, so now + # is a good time to run the sanity check. + if not is_empty and "main" in data_stores: + from synapse.storage.data_stores.main import check_database_before_upgrade + + check_database_before_upgrade(cur, database_engine, config) + start_ver = current_version if not upgraded: start_ver += 1 From 8c75b621bfe03725cc8da071516ebc66d3872760 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 26 Feb 2020 12:22:55 +0000 Subject: [PATCH 182/382] Ensure 'deactivated' parameter is a boolean on user admin API, Fix error handling of call to deactivate user (#6990) --- changelog.d/6990.bugfix | 1 + synapse/rest/admin/users.py | 11 +++--- synapse/rest/client/v1/login.py | 1 + tests/rest/admin/test_user.py | 59 +++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6990.bugfix diff --git a/changelog.d/6990.bugfix b/changelog.d/6990.bugfix new file mode 100644 index 0000000000..8c1c48f4d4 --- /dev/null +++ b/changelog.d/6990.bugfix @@ -0,0 +1 @@ +Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. \ No newline at end of file diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 2107b5dc56..c5b461a236 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -228,13 +228,16 @@ class UserRestServletV2(RestServlet): ) if "deactivated" in body: - deactivate = bool(body["deactivated"]) + deactivate = body["deactivated"] + if not isinstance(deactivate, bool): + raise SynapseError( + 400, "'deactivated' parameter is not of type boolean" + ) + if deactivate and not user["deactivated"]: - result = await self.deactivate_account_handler.deactivate_account( + await self.deactivate_account_handler.deactivate_account( target_user.to_string(), False ) - if not result: - raise SynapseError(500, "Could not deactivate user") user = await self.admin_handler.get_user(target_user) return 200, user diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 1294e080dc..2c99536678 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -599,6 +599,7 @@ class SSOAuthHandler(object): redirect_url = self._add_login_token_to_redirect_url( client_redirect_url, login_token ) + # Load page request.redirect(redirect_url) finish_request(request) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 490ce8f55d..cbe4a6a51f 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -507,3 +507,62 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(1, channel.json_body["admin"]) self.assertEqual(0, channel.json_body["is_guest"]) self.assertEqual(1, channel.json_body["deactivated"]) + + def test_accidental_deactivation_prevention(self): + """ + Ensure an account can't accidentally be deactivated by using a str value + for the deactivated body parameter + """ + self.hs.config.registration_shared_secret = None + + # Create user + body = json.dumps({"password": "abc123"}) + + request, channel = self.make_request( + "PUT", + self.url, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("bob", channel.json_body["displayname"]) + + # Get user + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("bob", channel.json_body["displayname"]) + self.assertEqual(0, channel.json_body["deactivated"]) + + # Change password (and use a str for deactivate instead of a bool) + body = json.dumps({"password": "abc123", "deactivated": "false"}) # oops! + + request, channel = self.make_request( + "PUT", + self.url, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + + # Check user is not deactivated + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("bob", channel.json_body["displayname"]) + + # Ensure they're still alive + self.assertEqual(0, channel.json_body["deactivated"]) From 7728d87fd7e1af17dd6b0c619cbfecb1fadb624f Mon Sep 17 00:00:00 2001 From: Uday Bansal <43824981+udaybansal19@users.noreply.github.com> Date: Wed, 26 Feb 2020 20:47:03 +0530 Subject: [PATCH 183/382] Updated warning for incorrect database collation/ctype (#6985) Signed-off-by: Uday Bansal <43824981+udaybansal19@users.noreply.github.com> --- changelog.d/6985.misc | 1 + synapse/storage/engines/postgres.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6985.misc diff --git a/changelog.d/6985.misc b/changelog.d/6985.misc new file mode 100644 index 0000000000..ba367fa9af --- /dev/null +++ b/changelog.d/6985.misc @@ -0,0 +1 @@ +Update warning for incorrect database collation/ctype to include link to documentation. diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index a077345960..53b3f372b0 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -53,7 +53,7 @@ class PostgresEngine(object): if rows and rows[0][0] != "UTF8": raise IncorrectDatabaseSetup( "Database has incorrect encoding: '%s' instead of 'UTF8'\n" - "See docs/postgres.rst for more information." % (rows[0][0],) + "See docs/postgres.md for more information." % (rows[0][0],) ) txn.execute( @@ -62,12 +62,16 @@ class PostgresEngine(object): collation, ctype = txn.fetchone() if collation != "C": logger.warning( - "Database has incorrect collation of %r. Should be 'C'", collation + "Database has incorrect collation of %r. Should be 'C'\n" + "See docs/postgres.md for more information.", + collation, ) if ctype != "C": logger.warning( - "Database has incorrect ctype of %r. Should be 'C'", ctype + "Database has incorrect ctype of %r. Should be 'C'\n" + "See docs/postgres.md for more information.", + ctype, ) def check_new_database(self, txn): From 1f773eec912e4908ab60f7823f5c0a024261af4d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 26 Feb 2020 15:33:26 +0000 Subject: [PATCH 184/382] Port PresenceHandler to async/await (#6991) --- changelog.d/6991.misc | 1 + synapse/handlers/message.py | 5 +- synapse/handlers/presence.py | 192 +++++++++++++--------------- synapse/replication/tcp/resource.py | 6 +- synapse/server.pyi | 5 + tests/handlers/test_presence.py | 18 ++- tox.ini | 1 + 7 files changed, 113 insertions(+), 115 deletions(-) create mode 100644 changelog.d/6991.misc diff --git a/changelog.d/6991.misc b/changelog.d/6991.misc new file mode 100644 index 0000000000..5130f4e8af --- /dev/null +++ b/changelog.d/6991.misc @@ -0,0 +1 @@ +Port `synapse.handlers.presence` to async/await. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d6be280952..a0103addd3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1016,11 +1016,10 @@ class EventCreationHandler(object): # matters as sometimes presence code can take a while. run_in_background(self._bump_active_time, requester.user) - @defer.inlineCallbacks - def _bump_active_time(self, user): + async def _bump_active_time(self, user): try: presence = self.hs.get_presence_handler() - yield presence.bump_presence_active_time(user) + await presence.bump_presence_active_time(user) except Exception: logger.exception("Error bumping presence active time") diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0d6cf2b008..5526015ddb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -24,11 +24,12 @@ The methods that define policy are: import logging from contextlib import contextmanager -from typing import Dict, Set +from typing import Dict, List, Set from six import iteritems, itervalues from prometheus_client import Counter +from typing_extensions import ContextManager from twisted.internet import defer @@ -42,10 +43,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.presence import UserPresenceState from synapse.types import UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cached from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer +MYPY = False +if MYPY: + import synapse.server + logger = logging.getLogger(__name__) @@ -97,7 +102,6 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER class PresenceHandler(object): def __init__(self, hs: "synapse.server.HomeServer"): self.hs = hs - self.is_mine = hs.is_mine self.is_mine_id = hs.is_mine_id self.server_name = hs.hostname self.clock = hs.get_clock() @@ -150,7 +154,7 @@ class PresenceHandler(object): # Set of users who have presence in the `user_to_current_state` that # have not yet been persisted - self.unpersisted_users_changes = set() + self.unpersisted_users_changes = set() # type: Set[str] hs.get_reactor().addSystemEventTrigger( "before", @@ -160,12 +164,11 @@ class PresenceHandler(object): self._on_shutdown, ) - self.serial_to_user = {} self._next_serial = 1 # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self.user_to_num_current_syncs = {} + self.user_to_num_current_syncs = {} # type: Dict[str, int] # Keeps track of the number of *ongoing* syncs on other processes. # While any sync is ongoing on another process the user will never @@ -213,8 +216,7 @@ class PresenceHandler(object): self._event_pos = self.store.get_current_events_token() self._event_processing = False - @defer.inlineCallbacks - def _on_shutdown(self): + async def _on_shutdown(self): """Gets called when shutting down. This lets us persist any updates that we haven't yet persisted, e.g. updates that only changes some internal timers. This allows changes to persist across startup without having to @@ -235,7 +237,7 @@ class PresenceHandler(object): if self.unpersisted_users_changes: - yield self.store.update_presence( + await self.store.update_presence( [ self.user_to_current_state[user_id] for user_id in self.unpersisted_users_changes @@ -243,8 +245,7 @@ class PresenceHandler(object): ) logger.info("Finished _on_shutdown") - @defer.inlineCallbacks - def _persist_unpersisted_changes(self): + async def _persist_unpersisted_changes(self): """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ @@ -253,12 +254,11 @@ class PresenceHandler(object): if unpersisted: logger.info("Persisting %d unpersisted presence updates", len(unpersisted)) - yield self.store.update_presence( + await self.store.update_presence( [self.user_to_current_state[user_id] for user_id in unpersisted] ) - @defer.inlineCallbacks - def _update_states(self, new_states): + async def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes the notifier and federation if and only if the changed presence state should be sent to clients/servers. @@ -267,7 +267,7 @@ class PresenceHandler(object): with Measure(self.clock, "presence_update_states"): - # NOTE: We purposefully don't yield between now and when we've + # NOTE: We purposefully don't await between now and when we've # calculated what we want to do with the new states, to avoid races. to_notify = {} # Changes we want to notify everyone about @@ -311,7 +311,7 @@ class PresenceHandler(object): if to_notify: notified_presence_counter.inc(len(to_notify)) - yield self._persist_and_notify(list(to_notify.values())) + await self._persist_and_notify(list(to_notify.values())) self.unpersisted_users_changes |= {s.user_id for s in new_states} self.unpersisted_users_changes -= set(to_notify.keys()) @@ -326,7 +326,7 @@ class PresenceHandler(object): self._push_to_remotes(to_federation_ping.values()) - def _handle_timeouts(self): + async def _handle_timeouts(self): """Checks the presence of users that have timed out and updates as appropriate. """ @@ -368,10 +368,9 @@ class PresenceHandler(object): now=now, ) - return self._update_states(changes) + return await self._update_states(changes) - @defer.inlineCallbacks - def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user): """We've seen the user do something that indicates they're interacting with the app. """ @@ -383,16 +382,17 @@ class PresenceHandler(object): bump_active_time_counter.inc() - prev_state = yield self.current_state_for_user(user_id) + prev_state = await self.current_state_for_user(user_id) new_fields = {"last_active_ts": self.clock.time_msec()} if prev_state.state == PresenceState.UNAVAILABLE: new_fields["state"] = PresenceState.ONLINE - yield self._update_states([prev_state.copy_and_replace(**new_fields)]) + await self._update_states([prev_state.copy_and_replace(**new_fields)]) - @defer.inlineCallbacks - def user_syncing(self, user_id, affect_presence=True): + async def user_syncing( + self, user_id: str, affect_presence: bool = True + ) -> ContextManager[None]: """Returns a context manager that should surround any stream requests from the user. @@ -415,11 +415,11 @@ class PresenceHandler(object): curr_sync = self.user_to_num_current_syncs.get(user_id, 0) self.user_to_num_current_syncs[user_id] = curr_sync + 1 - prev_state = yield self.current_state_for_user(user_id) + prev_state = await self.current_state_for_user(user_id) if prev_state.state == PresenceState.OFFLINE: # If they're currently offline then bring them online, otherwise # just update the last sync times. - yield self._update_states( + await self._update_states( [ prev_state.copy_and_replace( state=PresenceState.ONLINE, @@ -429,7 +429,7 @@ class PresenceHandler(object): ] ) else: - yield self._update_states( + await self._update_states( [ prev_state.copy_and_replace( last_user_sync_ts=self.clock.time_msec() @@ -437,13 +437,12 @@ class PresenceHandler(object): ] ) - @defer.inlineCallbacks - def _end(): + async def _end(): try: self.user_to_num_current_syncs[user_id] -= 1 - prev_state = yield self.current_state_for_user(user_id) - yield self._update_states( + prev_state = await self.current_state_for_user(user_id) + await self._update_states( [ prev_state.copy_and_replace( last_user_sync_ts=self.clock.time_msec() @@ -480,8 +479,7 @@ class PresenceHandler(object): else: return set() - @defer.inlineCallbacks - def update_external_syncs_row( + async def update_external_syncs_row( self, process_id, user_id, is_syncing, sync_time_msec ): """Update the syncing users for an external process as a delta. @@ -494,8 +492,8 @@ class PresenceHandler(object): is_syncing (bool): Whether or not the user is now syncing sync_time_msec(int): Time in ms when the user was last syncing """ - with (yield self.external_sync_linearizer.queue(process_id)): - prev_state = yield self.current_state_for_user(user_id) + with (await self.external_sync_linearizer.queue(process_id)): + prev_state = await self.current_state_for_user(user_id) process_presence = self.external_process_to_current_syncs.setdefault( process_id, set() @@ -525,25 +523,24 @@ class PresenceHandler(object): process_presence.discard(user_id) if updates: - yield self._update_states(updates) + await self._update_states(updates) self.external_process_last_updated_ms[process_id] = self.clock.time_msec() - @defer.inlineCallbacks - def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id): """Marks all users that had been marked as syncing by a given process as offline. Used when the process has stopped/disappeared. """ - with (yield self.external_sync_linearizer.queue(process_id)): + with (await self.external_sync_linearizer.queue(process_id)): process_presence = self.external_process_to_current_syncs.pop( process_id, set() ) - prev_states = yield self.current_state_for_users(process_presence) + prev_states = await self.current_state_for_users(process_presence) time_now_ms = self.clock.time_msec() - yield self._update_states( + await self._update_states( [ prev_state.copy_and_replace(last_user_sync_ts=time_now_ms) for prev_state in itervalues(prev_states) @@ -551,15 +548,13 @@ class PresenceHandler(object): ) self.external_process_last_updated_ms.pop(process_id, None) - @defer.inlineCallbacks - def current_state_for_user(self, user_id): + async def current_state_for_user(self, user_id): """Get the current presence state for a user. """ - res = yield self.current_state_for_users([user_id]) + res = await self.current_state_for_users([user_id]) return res[user_id] - @defer.inlineCallbacks - def current_state_for_users(self, user_ids): + async def current_state_for_users(self, user_ids): """Get the current presence state for multiple users. Returns: @@ -574,7 +569,7 @@ class PresenceHandler(object): if missing: # There are things not in our in memory cache. Lets pull them out of # the database. - res = yield self.store.get_presence_for_users(missing) + res = await self.store.get_presence_for_users(missing) states.update(res) missing = [user_id for user_id, state in iteritems(states) if not state] @@ -587,14 +582,13 @@ class PresenceHandler(object): return states - @defer.inlineCallbacks - def _persist_and_notify(self, states): + async def _persist_and_notify(self, states): """Persist states in the database, poke the notifier and send to interested remote servers """ - stream_id, max_token = yield self.store.update_presence(states) + stream_id, max_token = await self.store.update_presence(states) - parties = yield get_interested_parties(self.store, states) + parties = await get_interested_parties(self.store, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( @@ -606,9 +600,8 @@ class PresenceHandler(object): self._push_to_remotes(states) - @defer.inlineCallbacks - def notify_for_states(self, state, stream_id): - parties = yield get_interested_parties(self.store, [state]) + async def notify_for_states(self, state, stream_id): + parties = await get_interested_parties(self.store, [state]) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( @@ -626,8 +619,7 @@ class PresenceHandler(object): """ self.federation.send_presence(states) - @defer.inlineCallbacks - def incoming_presence(self, origin, content): + async def incoming_presence(self, origin, content): """Called when we receive a `m.presence` EDU from a remote server. """ now = self.clock.time_msec() @@ -670,21 +662,19 @@ class PresenceHandler(object): new_fields["status_msg"] = push.get("status_msg", None) new_fields["currently_active"] = push.get("currently_active", False) - prev_state = yield self.current_state_for_user(user_id) + prev_state = await self.current_state_for_user(user_id) updates.append(prev_state.copy_and_replace(**new_fields)) if updates: federation_presence_counter.inc(len(updates)) - yield self._update_states(updates) + await self._update_states(updates) - @defer.inlineCallbacks - def get_state(self, target_user, as_event=False): - results = yield self.get_states([target_user.to_string()], as_event=as_event) + async def get_state(self, target_user, as_event=False): + results = await self.get_states([target_user.to_string()], as_event=as_event) return results[0] - @defer.inlineCallbacks - def get_states(self, target_user_ids, as_event=False): + async def get_states(self, target_user_ids, as_event=False): """Get the presence state for users. Args: @@ -695,7 +685,7 @@ class PresenceHandler(object): list """ - updates = yield self.current_state_for_users(target_user_ids) + updates = await self.current_state_for_users(target_user_ids) updates = list(updates.values()) for user_id in set(target_user_ids) - {u.user_id for u in updates}: @@ -713,8 +703,7 @@ class PresenceHandler(object): else: return updates - @defer.inlineCallbacks - def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state(self, target_user, state, ignore_status_msg=False): """Set the presence state of the user. """ status_msg = state.get("status_msg", None) @@ -730,7 +719,7 @@ class PresenceHandler(object): user_id = target_user.to_string() - prev_state = yield self.current_state_for_user(user_id) + prev_state = await self.current_state_for_user(user_id) new_fields = {"state": presence} @@ -741,16 +730,15 @@ class PresenceHandler(object): if presence == PresenceState.ONLINE: new_fields["last_active_ts"] = self.clock.time_msec() - yield self._update_states([prev_state.copy_and_replace(**new_fields)]) + await self._update_states([prev_state.copy_and_replace(**new_fields)]) - @defer.inlineCallbacks - def is_visible(self, observed_user, observer_user): + async def is_visible(self, observed_user, observer_user): """Returns whether a user can see another user's presence. """ - observer_room_ids = yield self.store.get_rooms_for_user( + observer_room_ids = await self.store.get_rooms_for_user( observer_user.to_string() ) - observed_room_ids = yield self.store.get_rooms_for_user( + observed_room_ids = await self.store.get_rooms_for_user( observed_user.to_string() ) @@ -759,8 +747,7 @@ class PresenceHandler(object): return False - @defer.inlineCallbacks - def get_all_presence_updates(self, last_id, current_id): + async def get_all_presence_updates(self, last_id, current_id): """ Gets a list of presence update rows from between the given stream ids. Each row has: @@ -775,7 +762,7 @@ class PresenceHandler(object): """ # TODO(markjh): replicate the unpersisted changes. # This could use the in-memory stores for recent changes. - rows = yield self.store.get_all_presence_updates(last_id, current_id) + rows = await self.store.get_all_presence_updates(last_id, current_id) return rows def notify_new_event(self): @@ -786,20 +773,18 @@ class PresenceHandler(object): if self._event_processing: return - @defer.inlineCallbacks - def _process_presence(): + async def _process_presence(): assert not self._event_processing self._event_processing = True try: - yield self._unsafe_process() + await self._unsafe_process() finally: self._event_processing = False run_as_background_process("presence.notify_new_event", _process_presence) - @defer.inlineCallbacks - def _unsafe_process(self): + async def _unsafe_process(self): # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "presence_delta"): @@ -812,10 +797,10 @@ class PresenceHandler(object): self._event_pos, room_max_stream_ordering, ) - max_pos, deltas = yield self.store.get_current_state_deltas( + max_pos, deltas = await self.store.get_current_state_deltas( self._event_pos, room_max_stream_ordering ) - yield self._handle_state_delta(deltas) + await self._handle_state_delta(deltas) self._event_pos = max_pos @@ -824,8 +809,7 @@ class PresenceHandler(object): max_pos ) - @defer.inlineCallbacks - def _handle_state_delta(self, deltas): + async def _handle_state_delta(self, deltas): """Process current state deltas to find new joins that need to be handled. """ @@ -846,13 +830,13 @@ class PresenceHandler(object): # joins. continue - event = yield self.store.get_event(event_id, allow_none=True) + event = await self.store.get_event(event_id, allow_none=True) if not event or event.content.get("membership") != Membership.JOIN: # We only care about joins continue if prev_event_id: - prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + prev_event = await self.store.get_event(prev_event_id, allow_none=True) if ( prev_event and prev_event.content.get("membership") == Membership.JOIN @@ -860,10 +844,9 @@ class PresenceHandler(object): # Ignore changes to join events. continue - yield self._on_user_joined_room(room_id, state_key) + await self._on_user_joined_room(room_id, state_key) - @defer.inlineCallbacks - def _on_user_joined_room(self, room_id, user_id): + async def _on_user_joined_room(self, room_id, user_id): """Called when we detect a user joining the room via the current state delta stream. @@ -882,8 +865,8 @@ class PresenceHandler(object): # TODO: We should be able to filter the hosts down to those that # haven't previously seen the user - state = yield self.current_state_for_user(user_id) - hosts = yield self.state.get_current_hosts_in_room(room_id) + state = await self.current_state_for_user(user_id) + hosts = await self.state.get_current_hosts_in_room(room_id) # Filter out ourselves. hosts = {host for host in hosts if host != self.server_name} @@ -903,10 +886,10 @@ class PresenceHandler(object): # TODO: Check that this is actually a new server joining the # room. - user_ids = yield self.state.get_current_users_in_room(room_id) + user_ids = await self.state.get_current_users_in_room(room_id) user_ids = list(filter(self.is_mine_id, user_ids)) - states = yield self.current_state_for_users(user_ids) + states = await self.current_state_for_users(user_ids) # Filter out old presence, i.e. offline presence states where # the user hasn't been active for a week. We can change this @@ -996,9 +979,8 @@ class PresenceEventSource(object): self.store = hs.get_datastore() self.state = hs.get_state_handler() - @defer.inlineCallbacks @log_function - def get_new_events( + async def get_new_events( self, user, from_key, @@ -1045,7 +1027,7 @@ class PresenceEventSource(object): presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache - users_interested_in = yield self._get_interested_in(user, explicit_room_id) + users_interested_in = await self._get_interested_in(user, explicit_room_id) user_ids_changed = set() changed = None @@ -1071,7 +1053,7 @@ class PresenceEventSource(object): else: user_ids_changed = users_interested_in - updates = yield presence.current_state_for_users(user_ids_changed) + updates = await presence.current_state_for_users(user_ids_changed) if include_offline: return (list(updates.values()), max_token) @@ -1084,11 +1066,11 @@ class PresenceEventSource(object): def get_current_key(self): return self.store.get_current_presence_token() - def get_pagination_rows(self, user, pagination_config, key): - return self.get_new_events(user, from_key=None, include_offline=False) + async def get_pagination_rows(self, user, pagination_config, key): + return await self.get_new_events(user, from_key=None, include_offline=False) - @cachedInlineCallbacks(num_args=2, cache_context=True) - def _get_interested_in(self, user, explicit_room_id, cache_context): + @cached(num_args=2, cache_context=True) + async def _get_interested_in(self, user, explicit_room_id, cache_context): """Returns the set of users that the given user should see presence updates for """ @@ -1096,13 +1078,13 @@ class PresenceEventSource(object): users_interested_in = set() users_interested_in.add(user_id) # So that we receive our own presence - users_who_share_room = yield self.store.get_users_who_share_room_with_user( + users_who_share_room = await self.store.get_users_who_share_room_with_user( user_id, on_invalidate=cache_context.invalidate ) users_interested_in.update(users_who_share_room) if explicit_room_id: - user_ids = yield self.store.get_users_in_room( + user_ids = await self.store.get_users_in_room( explicit_room_id, on_invalidate=cache_context.invalidate ) users_interested_in.update(user_ids) @@ -1277,8 +1259,8 @@ def get_interested_parties(store, states): 2-tuple: `(room_ids_to_states, users_to_states)`, with each item being a dict of `entity_name` -> `[UserPresenceState]` """ - room_ids_to_states = {} - users_to_states = {} + room_ids_to_states = {} # type: Dict[str, List[UserPresenceState]] + users_to_states = {} # type: Dict[str, List[UserPresenceState]] for state in states: room_ids = yield store.get_rooms_for_user(state.user_id) for room_id in room_ids: diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ce60ae2e07..ce9d1fae12 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -323,7 +323,11 @@ class ReplicationStreamer(object): # We need to tell the presence handler that the connection has been # lost so that it can handle any ongoing syncs on that connection. - self.presence_handler.update_external_syncs_clear(connection.conn_id) + run_as_background_process( + "update_external_syncs_clear", + self.presence_handler.update_external_syncs_clear, + connection.conn_id, + ) def _batch_updates(updates): diff --git a/synapse/server.pyi b/synapse/server.pyi index 40eabfe5d9..3844f0e12f 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -3,6 +3,7 @@ import twisted.internet import synapse.api.auth import synapse.config.homeserver import synapse.crypto.keyring +import synapse.federation.federation_server import synapse.federation.sender import synapse.federation.transport.client import synapse.handlers @@ -107,5 +108,9 @@ class HomeServer(object): self, ) -> synapse.replication.tcp.client.ReplicationClientHandler: pass + def get_federation_registry( + self, + ) -> synapse.federation.federation_server.FederationHandlerRegistry: + pass def is_mine_id(self, domain_id: str) -> bool: pass diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 64915bafcd..05ea40a7de 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -494,8 +494,10 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): self.helper.join(room_id, "@test2:server") # Mark test2 as online, test will be offline with a last_active of 0 - self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + self.get_success( + self.presence_handler.set_state( + UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + ) ) self.reactor.pump([0]) # Wait for presence updates to be handled @@ -543,14 +545,18 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): room_id = self.helper.create_room_as(self.user_id) # Mark test as online - self.presence_handler.set_state( - UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE} + self.get_success( + self.presence_handler.set_state( + UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE} + ) ) # Mark test2 as online, test will be offline with a last_active of 0. # Note we don't join them to the room yet - self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + self.get_success( + self.presence_handler.set_state( + UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} + ) ) # Add servers to the room diff --git a/tox.ini b/tox.ini index b715ea0bff..4ccfde01b5 100644 --- a/tox.ini +++ b/tox.ini @@ -183,6 +183,7 @@ commands = mypy \ synapse/events/spamcheck.py \ synapse/federation/sender \ synapse/federation/transport \ + synapse/handlers/presence.py \ synapse/handlers/sync.py \ synapse/handlers/ui_auth \ synapse/logging/ \ From 380122866f8cf7b891c95f10a60c83537ef6c780 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 26 Feb 2020 11:32:13 -0500 Subject: [PATCH 185/382] Cast a coroutine into a Deferred in the federation base (#6996) Properly convert a coroutine into a Deferred in federation_base to fix an error when joining a room. --- changelog.d/6996.bugfix | 1 + synapse/federation/federation_base.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6996.bugfix diff --git a/changelog.d/6996.bugfix b/changelog.d/6996.bugfix new file mode 100644 index 0000000000..765d376c7c --- /dev/null +++ b/changelog.d/6996.bugfix @@ -0,0 +1 @@ +Fix bug which caused an error when joining a room, with `'coroutine' object has no attribute 'event_id'`. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index eea64c1c9f..9fff65716a 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -96,12 +96,14 @@ class FederationBase(object): if not res and pdu.origin != origin: try: - res = yield self.get_pdu( - destinations=[pdu.origin], - event_id=pdu.event_id, - room_version=room_version, - outlier=outlier, - timeout=10000, + res = yield defer.ensureDeferred( + self.get_pdu( + destinations=[pdu.origin], + event_id=pdu.event_id, + room_version=room_version, + outlier=outlier, + timeout=10000, + ) ) except SynapseError: pass From 3e99528f2bfaa686c4708fb8efcddce935b2397d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 26 Feb 2020 16:58:33 +0000 Subject: [PATCH 186/382] Store room version on invite (#6983) When we get an invite over federation, store the room version in the rooms table. The general idea here is that, when we pull the invite out again, we'll want to know what room_version it belongs to (so that we can later redact it if need be). So we need to store it somewhere... --- changelog.d/6983.misc | 1 + synapse/handlers/federation.py | 12 ++++++++ synapse/replication/http/_base.py | 2 +- synapse/replication/http/federation.py | 36 +++++++++++++++++++++++- synapse/storage/data_stores/main/room.py | 20 +++++++++++++ tests/app/test_openid_listener.py | 8 ++++++ tests/handlers/test_typing.py | 1 + 7 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6983.misc diff --git a/changelog.d/6983.misc b/changelog.d/6983.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6983.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c2e6ee266d..38ab6a8fc3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -60,6 +60,7 @@ from synapse.replication.http.devices import ReplicationUserDevicesResyncRestSer from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, + ReplicationStoreRoomOnInviteRestServlet, ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store @@ -160,8 +161,12 @@ class FederationHandler(BaseHandler): self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client( hs ) + self._maybe_store_room_on_invite = ReplicationStoreRoomOnInviteRestServlet.make_client( + hs + ) else: self._device_list_updater = hs.get_device_handler().device_list_updater + self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite # When joining a room we need to queue any events for that room up self.room_queues = {} @@ -1537,6 +1542,13 @@ class FederationHandler(BaseHandler): if event.state_key == self._server_notices_mxid: raise SynapseError(http_client.FORBIDDEN, "Cannot invite this user") + # keep a record of the room version, if we don't yet know it. + # (this may get overwritten if we later get a different room version in a + # join dance). + await self._maybe_store_room_on_invite( + room_id=event.room_id, room_version=room_version + ) + event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 444eb7b7f4..1be1ccbdf3 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -44,7 +44,7 @@ class ReplicationEndpoint(object): """Helper base class for defining new replication HTTP endpoints. This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..` - (with an `/:txn_id` prefix for cached requests.), where NAME is a name, + (with a `/:txn_id` suffix for cached requests), where NAME is a name, PATH_ARGS are a tuple of parameters to be encoded in the URL. For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`, diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 49a3251372..8794720101 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import event_type_from_format_version from synapse.events.snapshot import EventContext from synapse.http.servlet import parse_json_object_from_request @@ -211,7 +212,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): Request format: - POST /_synapse/replication/fed_query/:fed_cleanup_room/:txn_id + POST /_synapse/replication/fed_cleanup_room/:room_id/:txn_id {} """ @@ -238,8 +239,41 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): return 200, {} +class ReplicationStoreRoomOnInviteRestServlet(ReplicationEndpoint): + """Called to clean up any data in DB for a given room, ready for the + server to join the room. + + Request format: + + POST /_synapse/replication/store_room_on_invite/:room_id/:txn_id + + { + "room_version": "1", + } + """ + + NAME = "store_room_on_invite" + PATH_ARGS = ("room_id",) + + def __init__(self, hs): + super().__init__(hs) + + self.store = hs.get_datastore() + + @staticmethod + def _serialize_payload(room_id, room_version): + return {"room_version": room_version.identifier} + + async def _handle_request(self, request, room_id): + content = parse_json_object_from_request(request) + room_version = KNOWN_ROOM_VERSIONS[content["room_version"]] + await self.store.maybe_store_room_on_invite(room_id, room_version) + return 200, {} + + def register_servlets(hs, http_server): ReplicationFederationSendEventsRestServlet(hs).register(http_server) ReplicationFederationSendEduRestServlet(hs).register(http_server) ReplicationGetQueryRestServlet(hs).register(http_server) ReplicationCleanRoomRestServlet(hs).register(http_server) + ReplicationStoreRoomOnInviteRestServlet(hs).register(http_server) diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 70137dfbe4..e6c10c6316 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -1020,6 +1020,26 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): logger.error("store_room with room_id=%s failed: %s", room_id, e) raise StoreError(500, "Problem creating room.") + async def maybe_store_room_on_invite(self, room_id: str, room_version: RoomVersion): + """ + When we receive an invite over federation, store the version of the room if we + don't already know the room version. + """ + await self.db.simple_upsert( + desc="maybe_store_room_on_invite", + table="rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={ + "room_version": room_version.identifier, + "is_public": False, + "creator": "", + }, + # rooms has a unique constraint on room_id, so no need to lock when doing an + # emulated upsert. + lock=False, + ) + @defer.inlineCallbacks def set_room_is_public(self, room_id, is_public): def set_room_is_public_txn(txn, next_id): diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 1fe048048b..89fcc3889a 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -29,6 +29,14 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): ) return hs + def default_config(self, name="test"): + conf = super().default_config(name) + # we're using FederationReaderServer, which uses a SlavedStore, so we + # have to tell the FederationHandler not to try to access stuff that is only + # in the primary store. + conf["worker_app"] = "yes" + return conf + @parameterized.expand( [ (["federation"], "auth_fail"), diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 07b204666e..51e2b37218 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -74,6 +74,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): "set_received_txn_response", "get_destination_retry_timings", "get_devices_by_remote", + "maybe_store_room_on_invite", # Bits that user_directory needs "get_user_directory_stream_pos", "get_current_state_deltas", From 132b673dbefa42eb7669a11522426f26e225ac05 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Feb 2020 11:53:40 +0000 Subject: [PATCH 187/382] Add some type annotations in `synapse.storage` (#6987) I cracked, and added some type definitions in synapse.storage. --- changelog.d/6987.misc | 1 + synapse/storage/database.py | 143 ++++++++++++++++------------ synapse/storage/engines/__init__.py | 28 +++--- synapse/storage/engines/_base.py | 87 +++++++++++++++++ synapse/storage/engines/postgres.py | 12 ++- synapse/storage/engines/sqlite.py | 13 ++- synapse/storage/types.py | 65 +++++++++++++ tox.ini | 5 +- 8 files changed, 270 insertions(+), 84 deletions(-) create mode 100644 changelog.d/6987.misc create mode 100644 synapse/storage/types.py diff --git a/changelog.d/6987.misc b/changelog.d/6987.misc new file mode 100644 index 0000000000..7ff74cda55 --- /dev/null +++ b/changelog.d/6987.misc @@ -0,0 +1 @@ +Add some type annotations to the database storage classes. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 1953614401..609db40616 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -15,9 +15,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import sys import time -from typing import Iterable, Tuple +from time import monotonic as monotonic_time +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple from six import iteritems, iterkeys, itervalues from six.moves import intern, range @@ -32,24 +32,14 @@ from synapse.config.database import DatabaseConnectionConfig from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater -from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine +from synapse.storage.types import Connection, Cursor from synapse.util.stringutils import exception_to_unicode -# import a function which will return a monotonic time, in seconds -try: - # on python 3, use time.monotonic, since time.clock can go backwards - from time import monotonic as monotonic_time -except ImportError: - # ... but python 2 doesn't have it - from time import clock as monotonic_time - logger = logging.getLogger(__name__) -try: - MAX_TXN_ID = sys.maxint - 1 -except AttributeError: - # python 3 does not have a maximum int value - MAX_TXN_ID = 2 ** 63 - 1 +# python 3 does not have a maximum int value +MAX_TXN_ID = 2 ** 63 - 1 sql_logger = logging.getLogger("synapse.storage.SQL") transaction_logger = logging.getLogger("synapse.storage.txn") @@ -77,7 +67,7 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = { def make_pool( - reactor, db_config: DatabaseConnectionConfig, engine + reactor, db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine ) -> adbapi.ConnectionPool: """Get the connection pool for the database. """ @@ -90,7 +80,9 @@ def make_pool( ) -def make_conn(db_config: DatabaseConnectionConfig, engine): +def make_conn( + db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine +) -> Connection: """Make a new connection to the database and return it. Returns: @@ -107,20 +99,27 @@ def make_conn(db_config: DatabaseConnectionConfig, engine): return db_conn -class LoggingTransaction(object): +# The type of entry which goes on our after_callbacks and exception_callbacks lists. +# +# Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so +# that mypy sees the type but the runtime python doesn't. +_CallbackListEntry = Tuple["Callable[..., None]", Iterable[Any], Dict[str, Any]] + + +class LoggingTransaction: """An object that almost-transparently proxies for the 'txn' object passed to the constructor. Adds logging and metrics to the .execute() method. Args: txn: The database transcation object to wrap. - name (str): The name of this transactions for logging. - database_engine (Sqlite3Engine|PostgresEngine) - after_callbacks(list|None): A list that callbacks will be appended to + name: The name of this transactions for logging. + database_engine + after_callbacks: A list that callbacks will be appended to that have been added by `call_after` which should be run on successful completion of the transaction. None indicates that no callbacks should be allowed to be scheduled to run. - exception_callbacks(list|None): A list that callbacks will be appended + exception_callbacks: A list that callbacks will be appended to that have been added by `call_on_exception` which should be run if transaction ends with an error. None indicates that no callbacks should be allowed to be scheduled to run. @@ -135,46 +134,67 @@ class LoggingTransaction(object): ] def __init__( - self, txn, name, database_engine, after_callbacks=None, exception_callbacks=None + self, + txn: Cursor, + name: str, + database_engine: BaseDatabaseEngine, + after_callbacks: Optional[List[_CallbackListEntry]] = None, + exception_callbacks: Optional[List[_CallbackListEntry]] = None, ): - object.__setattr__(self, "txn", txn) - object.__setattr__(self, "name", name) - object.__setattr__(self, "database_engine", database_engine) - object.__setattr__(self, "after_callbacks", after_callbacks) - object.__setattr__(self, "exception_callbacks", exception_callbacks) + self.txn = txn + self.name = name + self.database_engine = database_engine + self.after_callbacks = after_callbacks + self.exception_callbacks = exception_callbacks - def call_after(self, callback, *args, **kwargs): + def call_after(self, callback: "Callable[..., None]", *args, **kwargs): """Call the given callback on the main twisted thread after the transaction has finished. Used to invalidate the caches on the correct thread. """ + # if self.after_callbacks is None, that means that whatever constructed the + # LoggingTransaction isn't expecting there to be any callbacks; assert that + # is not the case. + assert self.after_callbacks is not None self.after_callbacks.append((callback, args, kwargs)) - def call_on_exception(self, callback, *args, **kwargs): + def call_on_exception(self, callback: "Callable[..., None]", *args, **kwargs): + # if self.exception_callbacks is None, that means that whatever constructed the + # LoggingTransaction isn't expecting there to be any callbacks; assert that + # is not the case. + assert self.exception_callbacks is not None self.exception_callbacks.append((callback, args, kwargs)) - def __getattr__(self, name): - return getattr(self.txn, name) + def fetchall(self) -> List[Tuple]: + return self.txn.fetchall() - def __setattr__(self, name, value): - setattr(self.txn, name, value) + def fetchone(self) -> Tuple: + return self.txn.fetchone() - def __iter__(self): + def __iter__(self) -> Iterator[Tuple]: return self.txn.__iter__() + @property + def rowcount(self) -> int: + return self.txn.rowcount + + @property + def description(self) -> Any: + return self.txn.description + def execute_batch(self, sql, args): if isinstance(self.database_engine, PostgresEngine): - from psycopg2.extras import execute_batch + from psycopg2.extras import execute_batch # type: ignore self._do_execute(lambda *x: execute_batch(self.txn, *x), sql, args) else: for val in args: self.execute(sql, val) - def execute(self, sql, *args): + def execute(self, sql: str, *args: Any): self._do_execute(self.txn.execute, sql, *args) - def executemany(self, sql, *args): + def executemany(self, sql: str, *args: Any): self._do_execute(self.txn.executemany, sql, *args) def _make_sql_one_line(self, sql): @@ -207,6 +227,9 @@ class LoggingTransaction(object): sql_logger.debug("[SQL time] {%s} %f sec", self.name, secs) sql_query_timer.labels(sql.split()[0]).observe(secs) + def close(self): + self.txn.close() + class PerformanceCounters(object): def __init__(self): @@ -251,7 +274,9 @@ class Database(object): _TXN_ID = 0 - def __init__(self, hs, database_config: DatabaseConnectionConfig, engine): + def __init__( + self, hs, database_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine + ): self.hs = hs self._clock = hs.get_clock() self._database_config = database_config @@ -259,9 +284,9 @@ class Database(object): self.updates = BackgroundUpdater(hs, self) - self._previous_txn_total_time = 0 - self._current_txn_total_time = 0 - self._previous_loop_ts = 0 + self._previous_txn_total_time = 0.0 + self._current_txn_total_time = 0.0 + self._previous_loop_ts = 0.0 # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends @@ -463,23 +488,23 @@ class Database(object): sql_txn_timer.labels(desc).observe(duration) @defer.inlineCallbacks - def runInteraction(self, desc, func, *args, **kwargs): + def runInteraction(self, desc: str, func: Callable, *args: Any, **kwargs: Any): """Starts a transaction on the database and runs a given function Arguments: - desc (str): description of the transaction, for logging and metrics - func (func): callback function, which will be called with a + desc: description of the transaction, for logging and metrics + func: callback function, which will be called with a database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. - args (list): positional args to pass to `func` - kwargs (dict): named args to pass to `func` + args: positional args to pass to `func` + kwargs: named args to pass to `func` Returns: Deferred: The result of func """ - after_callbacks = [] - exception_callbacks = [] + after_callbacks = [] # type: List[_CallbackListEntry] + exception_callbacks = [] # type: List[_CallbackListEntry] if LoggingContext.current_context() == LoggingContext.sentinel: logger.warning("Starting db txn '%s' from sentinel context", desc) @@ -505,15 +530,15 @@ class Database(object): return result @defer.inlineCallbacks - def runWithConnection(self, func, *args, **kwargs): + def runWithConnection(self, func: Callable, *args: Any, **kwargs: Any): """Wraps the .runWithConnection() method on the underlying db_pool. Arguments: - func (func): callback function, which will be called with a + func: callback function, which will be called with a database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. - args (list): positional args to pass to `func` - kwargs (dict): named args to pass to `func` + args: positional args to pass to `func` + kwargs: named args to pass to `func` Returns: Deferred: The result of func @@ -800,7 +825,7 @@ class Database(object): return False # We didn't find any existing rows, so insert a new one - allvalues = {} + allvalues = {} # type: Dict[str, Any] allvalues.update(keyvalues) allvalues.update(values) allvalues.update(insertion_values) @@ -829,7 +854,7 @@ class Database(object): Returns: None """ - allvalues = {} + allvalues = {} # type: Dict[str, Any] allvalues.update(keyvalues) allvalues.update(insertion_values) @@ -916,7 +941,7 @@ class Database(object): Returns: None """ - allnames = [] + allnames = [] # type: List[str] allnames.extend(key_names) allnames.extend(value_names) @@ -1100,7 +1125,7 @@ class Database(object): keyvalues : dict of column names and values to select the rows with retcols : list of strings giving the names of the columns to return """ - results = [] + results = [] # type: List[Dict[str, Any]] if not iterable: return results @@ -1439,7 +1464,7 @@ class Database(object): raise ValueError("order_direction must be one of 'ASC' or 'DESC'.") where_clause = "WHERE " if filters or keyvalues else "" - arg_list = [] + arg_list = [] # type: List[Any] if filters: where_clause += " AND ".join("%s LIKE ?" % (k,) for k in filters) arg_list += list(filters.values()) diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index 9d2d519922..035f9ea6e9 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -12,29 +12,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import importlib import platform -from ._base import IncorrectDatabaseSetup +from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup from .postgres import PostgresEngine from .sqlite import Sqlite3Engine -SUPPORTED_MODULE = {"sqlite3": Sqlite3Engine, "psycopg2": PostgresEngine} - -def create_engine(database_config): +def create_engine(database_config) -> BaseDatabaseEngine: name = database_config["name"] - engine_class = SUPPORTED_MODULE.get(name, None) - if engine_class: + if name == "sqlite3": + import sqlite3 + + return Sqlite3Engine(sqlite3, database_config) + + if name == "psycopg2": # pypy requires psycopg2cffi rather than psycopg2 - if name == "psycopg2" and platform.python_implementation() == "PyPy": - name = "psycopg2cffi" - module = importlib.import_module(name) - return engine_class(module, database_config) + if platform.python_implementation() == "PyPy": + import psycopg2cffi as psycopg2 # type: ignore + else: + import psycopg2 # type: ignore + + return PostgresEngine(psycopg2, database_config) raise RuntimeError("Unsupported database engine '%s'" % (name,)) -__all__ = ["create_engine", "IncorrectDatabaseSetup"] +__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"] diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index ec5a4d198b..ab0bbe4bd3 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -12,7 +12,94 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import abc +from typing import Generic, TypeVar + +from synapse.storage.types import Connection class IncorrectDatabaseSetup(RuntimeError): pass + + +ConnectionType = TypeVar("ConnectionType", bound=Connection) + + +class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta): + def __init__(self, module, database_config: dict): + self.module = module + + @property + @abc.abstractmethod + def single_threaded(self) -> bool: + ... + + @property + @abc.abstractmethod + def can_native_upsert(self) -> bool: + """ + Do we support native UPSERTs? + """ + ... + + @property + @abc.abstractmethod + def supports_tuple_comparison(self) -> bool: + """ + Do we support comparing tuples, i.e. `(a, b) > (c, d)`? + """ + ... + + @property + @abc.abstractmethod + def supports_using_any_list(self) -> bool: + """ + Do we support using `a = ANY(?)` and passing a list + """ + ... + + @abc.abstractmethod + def check_database( + self, db_conn: ConnectionType, allow_outdated_version: bool = False + ) -> None: + ... + + @abc.abstractmethod + def check_new_database(self, txn) -> None: + """Gets called when setting up a brand new database. This allows us to + apply stricter checks on new databases versus existing database. + """ + ... + + @abc.abstractmethod + def convert_param_style(self, sql: str) -> str: + ... + + @abc.abstractmethod + def on_new_connection(self, db_conn: ConnectionType) -> None: + ... + + @abc.abstractmethod + def is_deadlock(self, error: Exception) -> bool: + ... + + @abc.abstractmethod + def is_connection_closed(self, conn: ConnectionType) -> bool: + ... + + @abc.abstractmethod + def lock_table(self, txn, table: str) -> None: + ... + + @abc.abstractmethod + def get_next_state_group_id(self, txn) -> int: + """Returns an int that can be used as a new state_group ID + """ + ... + + @property + @abc.abstractmethod + def server_version(self) -> str: + """Gets a string giving the server version. For example: '3.22.0' + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 53b3f372b0..6c7d08a6f2 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,16 +15,14 @@ import logging -from ._base import IncorrectDatabaseSetup +from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup logger = logging.getLogger(__name__) -class PostgresEngine(object): - single_threaded = False - +class PostgresEngine(BaseDatabaseEngine): def __init__(self, database_module, database_config): - self.module = database_module + super().__init__(database_module, database_config) self.module.extensions.register_type(self.module.extensions.UNICODE) # Disables passing `bytes` to txn.execute, c.f. #6186. If you do @@ -36,6 +34,10 @@ class PostgresEngine(object): self.synchronous_commit = database_config.get("synchronous_commit", True) self._version = None # unknown as yet + @property + def single_threaded(self) -> bool: + return False + def check_database(self, db_conn, allow_outdated_version: bool = False): # Get the version of PostgreSQL that we're using. As per the psycopg2 # docs: The number is formed by converting the major, minor, and diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 641e490697..2bfeefd54e 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -12,16 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import sqlite3 import struct import threading +from synapse.storage.engines import BaseDatabaseEngine -class Sqlite3Engine(object): - single_threaded = True +class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): def __init__(self, database_module, database_config): - self.module = database_module + super().__init__(database_module, database_config) database = database_config.get("args", {}).get("database") self._is_in_memory = database in (None, ":memory:",) @@ -31,6 +31,10 @@ class Sqlite3Engine(object): self._current_state_group_id = None self._current_state_group_id_lock = threading.Lock() + @property + def single_threaded(self) -> bool: + return True + @property def can_native_upsert(self): """ @@ -68,7 +72,6 @@ class Sqlite3Engine(object): return sql def on_new_connection(self, db_conn): - # We need to import here to avoid an import loop. from synapse.storage.prepare_database import prepare_database diff --git a/synapse/storage/types.py b/synapse/storage/types.py new file mode 100644 index 0000000000..daff81c5ee --- /dev/null +++ b/synapse/storage/types.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Iterable, Iterator, List, Tuple + +from typing_extensions import Protocol + + +""" +Some very basic protocol definitions for the DB-API2 classes specified in PEP-249 +""" + + +class Cursor(Protocol): + def execute(self, sql: str, parameters: Iterable[Any] = ...) -> Any: + ... + + def executemany(self, sql: str, parameters: Iterable[Iterable[Any]]) -> Any: + ... + + def fetchall(self) -> List[Tuple]: + ... + + def fetchone(self) -> Tuple: + ... + + @property + def description(self) -> Any: + return None + + @property + def rowcount(self) -> int: + return 0 + + def __iter__(self) -> Iterator[Tuple]: + ... + + def close(self) -> None: + ... + + +class Connection(Protocol): + def cursor(self) -> Cursor: + ... + + def close(self) -> None: + ... + + def commit(self) -> None: + ... + + def rollback(self, *args, **kwargs) -> None: + ... diff --git a/tox.ini b/tox.ini index 4ccfde01b5..6521535137 100644 --- a/tox.ini +++ b/tox.ini @@ -168,7 +168,6 @@ commands= coverage html [testenv:mypy] -basepython = python3.7 skip_install = True deps = {[base]deps} @@ -179,7 +178,8 @@ env = extras = all commands = mypy \ synapse/api \ - synapse/config/ \ + synapse/appservice \ + synapse/config \ synapse/events/spamcheck.py \ synapse/federation/sender \ synapse/federation/transport \ @@ -192,6 +192,7 @@ commands = mypy \ synapse/rest \ synapse/spam_checker_api \ synapse/storage/engines \ + synapse/storage/database.py \ synapse/streams # To find all folders that pass mypy you run: From b32ac60c22493cd191d63eae5104fa9d69c37495 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 27 Feb 2020 23:47:40 +1100 Subject: [PATCH 188/382] Expose common commands via snap run interface to allow easier invocation (#6315) Signed-off-by: James Hebden --- changelog.d/6315.feature | 1 + snap/snapcraft.yaml | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6315.feature diff --git a/changelog.d/6315.feature b/changelog.d/6315.feature new file mode 100644 index 0000000000..c5377dd1e9 --- /dev/null +++ b/changelog.d/6315.feature @@ -0,0 +1 @@ +Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 9e644e8567..6b62b79114 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,20 +1,31 @@ name: matrix-synapse base: core18 -version: git +version: git summary: Reference Matrix homeserver description: | Synapse is the reference Matrix homeserver. Matrix is a federated and decentralised instant messaging and VoIP system. -grade: stable -confinement: strict +grade: stable +confinement: strict apps: - matrix-synapse: + matrix-synapse: command: synctl --no-daemonize start $SNAP_COMMON/homeserver.yaml stop-command: synctl -c $SNAP_COMMON stop plugs: [network-bind, network] - daemon: simple + daemon: simple + hash-password: + command: hash_password + generate-config: + command: generate_config + generate-signing-key: + command: generate_signing_key.py + register-new-matrix-user: + command: register_new_matrix_user + plugs: [network] + synctl: + command: synctl parts: matrix-synapse: source: . From cab4a52535097c5836fe67c5e09e8350d7ccf03c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Feb 2020 13:08:43 +0000 Subject: [PATCH 189/382] set worker_app for frontend proxy test (#7003) to stop the federationhandler trying to do master stuff --- changelog.d/7003.misc | 1 + tests/app/test_frontend_proxy.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/7003.misc diff --git a/changelog.d/7003.misc b/changelog.d/7003.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/7003.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 160e55aca9..d3feafa1b7 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -27,6 +27,11 @@ class FrontendProxyTests(HomeserverTestCase): return hs + def default_config(self, name="test"): + c = super().default_config(name) + c["worker_app"] = "synapse.app.frontend_proxy" + return c + def test_listen_http_with_presence_enabled(self): """ When presence is on, the stub servlet will not register. From 2201bc979588720bd99880b9cd8df2292b2d483f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 27 Feb 2020 16:33:21 +0000 Subject: [PATCH 190/382] Don't refuse to start worker if media listener configured. (#7002) Instead lets just warn if the worker has a media listener configured but has the media repository disabled. Previously non media repository workers would just ignore the media listener. --- changelog.d/7002.misc | 1 + synapse/app/generic_worker.py | 30 ++++++++++++++++++------------ 2 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 changelog.d/7002.misc diff --git a/changelog.d/7002.misc b/changelog.d/7002.misc new file mode 100644 index 0000000000..ec5c004bbe --- /dev/null +++ b/changelog.d/7002.misc @@ -0,0 +1 @@ +Merge worker apps together. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 30efd39092..b2c764bfe8 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -494,20 +494,26 @@ class GenericWorkerServer(HomeServer): elif name == "federation": resources.update({FEDERATION_PREFIX: TransportLayerServer(self)}) elif name == "media": - media_repo = self.get_media_repository_resource() + if self.config.can_load_media_repo: + media_repo = self.get_media_repository_resource() - # We need to serve the admin servlets for media on the - # worker. - admin_resource = JsonResource(self, canonical_json=False) - register_servlets_for_media_repo(self, admin_resource) + # We need to serve the admin servlets for media on the + # worker. + admin_resource = JsonResource(self, canonical_json=False) + register_servlets_for_media_repo(self, admin_resource) - resources.update( - { - MEDIA_PREFIX: media_repo, - LEGACY_MEDIA_PREFIX: media_repo, - "/_synapse/admin": admin_resource, - } - ) + resources.update( + { + MEDIA_PREFIX: media_repo, + LEGACY_MEDIA_PREFIX: media_repo, + "/_synapse/admin": admin_resource, + } + ) + else: + logger.warning( + "A 'media' listener is configured but the media" + " repository is disabled. Ignoring." + ) if name == "openid" and "federation" not in res["names"]: # Only load the openid resource separately if federation resource From 9b06d8f8a62dc5c423aa9a694e0759eaf1c3c77e Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 28 Feb 2020 10:58:05 +0100 Subject: [PATCH 191/382] Fixed set a user as an admin with the new API (#6928) Fix #6910 --- changelog.d/6910.bugfix | 1 + synapse/rest/admin/users.py | 6 +- .../storage/data_stores/main/registration.py | 16 +- tests/rest/admin/test_user.py | 220 +++++++++++++++--- 4 files changed, 200 insertions(+), 43 deletions(-) create mode 100644 changelog.d/6910.bugfix diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix new file mode 100644 index 0000000000..707f1ff7b5 --- /dev/null +++ b/changelog.d/6910.bugfix @@ -0,0 +1 @@ +Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index c5b461a236..80f959248d 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -211,9 +211,7 @@ class UserRestServletV2(RestServlet): if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") - await self.admin_handler.set_user_server_admin( - target_user, set_admin_to - ) + await self.store.set_server_admin(target_user, set_admin_to) if "password" in body: if ( @@ -651,6 +649,6 @@ class UserAdminServlet(RestServlet): if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") - await self.store.set_user_server_admin(target_user, set_admin_to) + await self.store.set_server_admin(target_user, set_admin_to) return 200, {} diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index 49306642ed..3e53c8568a 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -301,12 +301,16 @@ class RegistrationWorkerStore(SQLBaseStore): admin (bool): true iff the user is to be a server admin, false otherwise. """ - return self.db.simple_update_one( - table="users", - keyvalues={"name": user.to_string()}, - updatevalues={"admin": 1 if admin else 0}, - desc="set_server_admin", - ) + + def set_server_admin_txn(txn): + self.db.simple_update_one_txn( + txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0} + ) + self._invalidate_cache_and_stream( + txn, self.get_user_by_id, (user.to_string(),) + ) + + return self.db.runInteraction("set_server_admin", set_server_admin_txn) def _query_for_auth(self, txn, token): sql = ( diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index cbe4a6a51f..6416fb5d2a 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -16,6 +16,7 @@ import hashlib import hmac import json +import urllib.parse from mock import Mock @@ -371,22 +372,24 @@ class UserRestTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.url = "/_synapse/admin/v2/users/@bob:test" - self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") self.other_user_token = self.login("user", "pass") + self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote( + self.other_user + ) def test_requester_is_no_admin(self): """ If the user is not a server admin, an error is returned. """ self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" request, channel = self.make_request( - "GET", self.url, access_token=self.other_user_token, + "GET", url, access_token=self.other_user_token, ) self.render(request) @@ -394,7 +397,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("You are not a server admin", channel.json_body["error"]) request, channel = self.make_request( - "PUT", self.url, access_token=self.other_user_token, content=b"{}", + "PUT", url, access_token=self.other_user_token, content=b"{}", ) self.render(request) @@ -417,24 +420,26 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) - def test_requester_is_admin(self): + def test_create_server_admin(self): """ - If the user is a server admin, a new user is created. + Check that a new admin user is created successfully. """ self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" + # Create user (server admin) body = json.dumps( { "password": "abc123", "admin": True, + "displayname": "Bob's name", "threepids": [{"medium": "email", "address": "bob@bob.bob"}], } ) - # Create user request, channel = self.make_request( "PUT", - self.url, + url, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -442,29 +447,85 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("bob", channel.json_body["displayname"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(True, channel.json_body["admin"]) # Get user request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", url, access_token=self.admin_user_tok, ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("bob", channel.json_body["displayname"]) - self.assertEqual(1, channel.json_body["admin"]) - self.assertEqual(0, channel.json_body["is_guest"]) - self.assertEqual(0, channel.json_body["deactivated"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(True, channel.json_body["admin"]) + self.assertEqual(False, channel.json_body["is_guest"]) + self.assertEqual(False, channel.json_body["deactivated"]) + + def test_create_user(self): + """ + Check that a new regular user is created successfully. + """ + self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" + + # Create user + body = json.dumps( + { + "password": "abc123", + "admin": False, + "displayname": "Bob's name", + "threepids": [{"medium": "email", "address": "bob@bob.bob"}], + } + ) + + request, channel = self.make_request( + "PUT", + url, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(False, channel.json_body["admin"]) + + # Get user + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(False, channel.json_body["admin"]) + self.assertEqual(False, channel.json_body["is_guest"]) + self.assertEqual(False, channel.json_body["deactivated"]) + + def test_set_password(self): + """ + Test setting a new password for another user. + """ + self.hs.config.registration_shared_secret = None # Change password body = json.dumps({"password": "hahaha"}) request, channel = self.make_request( "PUT", - self.url, + self.url_other_user, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -472,41 +533,133 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def test_set_displayname(self): + """ + Test setting the displayname of another user. + """ + self.hs.config.registration_shared_secret = None + # Modify user + body = json.dumps({"displayname": "foobar"}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("foobar", channel.json_body["displayname"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("foobar", channel.json_body["displayname"]) + + def test_set_threepid(self): + """ + Test setting threepid for an other user. + """ + self.hs.config.registration_shared_secret = None + + # Delete old and add new threepid to user body = json.dumps( - { - "displayname": "foobar", - "deactivated": True, - "threepids": [{"medium": "email", "address": "bob2@bob.bob"}], - } + {"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]} ) request, channel = self.make_request( "PUT", - self.url, + self.url_other_user, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("foobar", channel.json_body["displayname"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"]) + + def test_deactivate_user(self): + """ + Test deactivating another user. + """ + + # Deactivate user + body = json.dumps({"deactivated": True}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) # the user is deactivated, the threepid will be deleted # Get user request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", self.url_other_user, access_token=self.admin_user_tok, ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("foobar", channel.json_body["displayname"]) - self.assertEqual(1, channel.json_body["admin"]) - self.assertEqual(0, channel.json_body["is_guest"]) - self.assertEqual(1, channel.json_body["deactivated"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["deactivated"]) + + def test_set_user_as_admin(self): + """ + Test setting the admin flag on a user. + """ + self.hs.config.registration_shared_secret = None + + # Set a user as an admin + body = json.dumps({"admin": True}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["admin"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["admin"]) def test_accidental_deactivation_prevention(self): """ @@ -514,13 +667,14 @@ class UserRestTestCase(unittest.HomeserverTestCase): for the deactivated body parameter """ self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" # Create user body = json.dumps({"password": "abc123"}) request, channel = self.make_request( "PUT", - self.url, + url, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -532,7 +686,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Get user request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", url, access_token=self.admin_user_tok, ) self.render(request) @@ -546,7 +700,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "PUT", - self.url, + url, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -556,7 +710,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Check user is not deactivated request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", url, access_token=self.admin_user_tok, ) self.render(request) From c3c6c0e6222cc1bc8ae35a66389dc428d0ddbc92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Feb 2020 11:15:11 +0000 Subject: [PATCH 192/382] Add 'device_lists_outbound_pokes' as extra table. This makes sure we check all the relevant tables to get the current max stream ID. Currently not doing so isn't problematic as the max stream ID in `device_lists_outbound_pokes` is the same as in `device_lists_stream`, however that will change. --- synapse/replication/slave/storage/devices.py | 8 +++++++- synapse/storage/data_stores/main/__init__.py | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 1c77687eea..bf46cc4f8a 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -29,7 +29,13 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto self.hs = hs self._device_list_id_gen = SlavedIdTracker( - db_conn, "device_lists_stream", "stream_id" + db_conn, + "device_lists_stream", + "stream_id", + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ], ) device_list_max = self._device_list_id_gen.get_current_token() self._device_list_stream_cache = StreamChangeCache( diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py index acca079f23..649e835303 100644 --- a/synapse/storage/data_stores/main/__init__.py +++ b/synapse/storage/data_stores/main/__init__.py @@ -144,7 +144,10 @@ class DataStore( db_conn, "device_lists_stream", "stream_id", - extra_tables=[("user_signature_stream", "stream_id")], + extra_tables=[ + ("user_signature_stream", "stream_id"), + ("device_lists_outbound_pokes", "stream_id"), + ], ) self._cross_signing_id_gen = StreamIdGenerator( db_conn, "e2e_cross_signing_keys", "stream_id" From f5caa1864e3d3c24c691b3a3bff723f77def129e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Feb 2020 11:21:25 +0000 Subject: [PATCH 193/382] Change device lists stream to have one row per id. This will make it possible to process the streams more incrementally, avoiding having to process large chunks at once. --- synapse/storage/data_stores/main/devices.py | 59 ++++++++++++++------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index d55733a4cd..3299607910 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -1017,29 +1017,41 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): """Persist that a user's devices have been updated, and which hosts (if any) should be poked. """ - with self._device_list_id_gen.get_next() as stream_id: + if not device_ids: + return + + with self._device_list_id_gen.get_next_mult(len(device_ids)) as stream_ids: yield self.db.runInteraction( - "add_device_change_to_streams", - self._add_device_change_txn, + "add_device_change_to_stream", + self._add_device_change_to_stream_txn, + user_id, + device_ids, + stream_ids, + ) + + if not hosts: + return stream_ids[-1] + + context = get_active_span_text_map() + with self._device_list_id_gen.get_next_mult( + len(hosts) * len(device_ids) + ) as stream_ids: + yield self.db.runInteraction( + "add_device_outbound_poke_to_stream", + self._add_device_outbound_poke_to_stream_txn, user_id, device_ids, hosts, - stream_id, + stream_ids, + context, ) - return stream_id - def _add_device_change_txn(self, txn, user_id, device_ids, hosts, stream_id): - now = self._clock.time_msec() + return stream_ids[-1] + def _add_device_change_to_stream_txn(self, txn, user_id, device_ids, stream_ids): txn.call_after( - self._device_list_stream_cache.entity_has_changed, user_id, stream_id + self._device_list_stream_cache.entity_has_changed, user_id, stream_ids[-1], ) - for host in hosts: - txn.call_after( - self._device_list_federation_stream_cache.entity_has_changed, - host, - stream_id, - ) # Delete older entries in the table, as we really only care about # when the latest change happened. @@ -1048,7 +1060,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): DELETE FROM device_lists_stream WHERE user_id = ? AND device_id = ? AND stream_id < ? """, - [(user_id, device_id, stream_id) for device_id in device_ids], + [(user_id, device_id, stream_ids[0]) for device_id in device_ids], ) self.db.simple_insert_many_txn( @@ -1056,11 +1068,22 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): table="device_lists_stream", values=[ {"stream_id": stream_id, "user_id": user_id, "device_id": device_id} - for device_id in device_ids + for stream_id, device_id in zip(stream_ids, device_ids) ], ) - context = get_active_span_text_map() + def _add_device_outbound_poke_to_stream_txn( + self, txn, user_id, device_ids, hosts, stream_ids, context, + ): + for host in hosts: + txn.call_after( + self._device_list_federation_stream_cache.entity_has_changed, + host, + stream_ids[-1], + ) + + now = self._clock.time_msec() + next_stream_id = iter(stream_ids) self.db.simple_insert_many_txn( txn, @@ -1068,7 +1091,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): values=[ { "destination": destination, - "stream_id": stream_id, + "stream_id": next(next_stream_id), "user_id": user_id, "device_id": device_id, "sent": False, From 9ce4e344a808e15a36a2d9ea03b77ebfc6ac7fe2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Feb 2020 11:24:05 +0000 Subject: [PATCH 194/382] Change device list replication to match new semantics. Instead of sending down batches of user ID/host tuples, send down a row per entity (user ID or host). --- synapse/app/generic_worker.py | 2 +- synapse/replication/slave/storage/devices.py | 25 ++++++++++---------- synapse/replication/tcp/streams/_base.py | 13 ++++++---- synapse/storage/data_stores/main/devices.py | 15 +++++++----- 4 files changed, 32 insertions(+), 23 deletions(-) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index b2c764bfe8..561a6f4b22 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -774,7 +774,7 @@ class FederationSenderHandler(object): # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: - hosts = {row.destination for row in rows} + hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index bf46cc4f8a..01a4f85884 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -61,23 +61,24 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto def process_replication_rows(self, stream_name, token, rows): if stream_name == DeviceListsStream.NAME: self._device_list_id_gen.advance(token) - for row in rows: - self._invalidate_caches_for_devices(token, row.user_id, row.destination) + self._invalidate_caches_for_devices(token, rows) elif stream_name == UserSignatureStream.NAME: + self._device_list_id_gen.advance(token) for row in rows: self._user_signature_stream_cache.entity_has_changed(row.user_id, token) return super(SlavedDeviceStore, self).process_replication_rows( stream_name, token, rows ) - def _invalidate_caches_for_devices(self, token, user_id, destination): - self._device_list_stream_cache.entity_has_changed(user_id, token) + def _invalidate_caches_for_devices(self, token, rows): + for row in rows: + if row.entity.startswith("@"): + self._device_list_stream_cache.entity_has_changed(row.entity, token) + self.get_cached_devices_for_user.invalidate((row.entity,)) + self._get_cached_user_device.invalidate_many((row.entity,)) + self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,)) - if destination: - self._device_list_federation_stream_cache.entity_has_changed( - destination, token - ) - - self.get_cached_devices_for_user.invalidate((user_id,)) - self._get_cached_user_device.invalidate_many((user_id,)) - self.get_device_list_last_stream_id_for_remote.invalidate((user_id,)) + else: + self._device_list_federation_stream_cache.entity_has_changed( + row.entity, token + ) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 208e8a667b..7a8b6e9df1 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -94,9 +94,13 @@ PublicRoomsStreamRow = namedtuple( "network_id", # str, optional ), ) -DeviceListsStreamRow = namedtuple( - "DeviceListsStreamRow", ("user_id", "destination") # str # str -) + + +@attr.s +class DeviceListsStreamRow: + entity = attr.ib(type=str) + + ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",)) # str TagAccountDataStreamRow = namedtuple( "TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict @@ -363,7 +367,8 @@ class PublicRoomsStream(Stream): class DeviceListsStream(Stream): - """Someone added/changed/removed a device + """Either a user has updated their devices or a remote server needs to be + told about a device update. """ NAME = "device_lists" diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 3299607910..768afe7a6c 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -612,15 +612,18 @@ class DeviceWorkerStore(SQLBaseStore): combined list of changes to devices, and which destinations need to be poked. `destination` may be None if no destinations need to be poked. """ - # We do a group by here as there can be a large number of duplicate - # entries, since we throw away device IDs. + + # This query Does The Right Thing where it'll correctly apply the + # bounds to the inner queries. sql = """ - SELECT MAX(stream_id) AS stream_id, user_id, destination - FROM device_lists_stream - LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id) + SELECT stream_id, entity FROM ( + SELECT stream_id, user_id AS entity FROM device_lists_stream + UNION ALL + SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes + ) AS e WHERE ? < stream_id AND stream_id <= ? - GROUP BY user_id, destination """ + return self.db.execute( "get_all_device_list_changes_for_remotes", None, sql, from_key, to_key ) From 59ad93d2a415cd07ab6f6afd490d0a5ceeec93a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Feb 2020 11:27:37 +0000 Subject: [PATCH 195/382] Newsfile --- changelog.d/7010.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7010.misc diff --git a/changelog.d/7010.misc b/changelog.d/7010.misc new file mode 100644 index 0000000000..4ba1f6cdf8 --- /dev/null +++ b/changelog.d/7010.misc @@ -0,0 +1 @@ +Change device list streams to have one row per ID. From f70f44abc73689a66d0a05dc703ca38241092174 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Feb 2020 11:45:35 +0000 Subject: [PATCH 196/382] Remove handling of multiple rows per ID --- synapse/storage/data_stores/main/devices.py | 35 +--------------- tests/storage/test_devices.py | 45 --------------------- 2 files changed, 1 insertion(+), 79 deletions(-) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 768afe7a6c..06e1d9f033 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -112,23 +112,13 @@ class DeviceWorkerStore(SQLBaseStore): if not has_changed: return now_stream_id, [] - # We retrieve n+1 devices from the list of outbound pokes where n is - # our outbound device update limit. We then check if the very last - # device has the same stream_id as the second-to-last device. If so, - # then we ignore all devices with that stream_id and only send the - # devices with a lower stream_id. - # - # If when culling the list we end up with no devices afterwards, we - # consider the device update to be too large, and simply skip the - # stream_id; the rationale being that such a large device list update - # is likely an error. updates = yield self.db.runInteraction( "get_device_updates_by_remote", self._get_device_updates_by_remote_txn, destination, from_stream_id, now_stream_id, - limit + 1, + limit, ) # Return an empty list if there are no updates @@ -166,14 +156,6 @@ class DeviceWorkerStore(SQLBaseStore): "device_id": verify_key.version, } - # if we have exceeded the limit, we need to exclude any results with the - # same stream_id as the last row. - if len(updates) > limit: - stream_id_cutoff = updates[-1][2] - now_stream_id = stream_id_cutoff - 1 - else: - stream_id_cutoff = None - # Perform the equivalent of a GROUP BY # # Iterate through the updates list and copy non-duplicate @@ -192,10 +174,6 @@ class DeviceWorkerStore(SQLBaseStore): query_map = {} cross_signing_keys_by_user = {} for user_id, device_id, update_stream_id, update_context in updates: - if stream_id_cutoff is not None and update_stream_id >= stream_id_cutoff: - # Stop processing updates - break - if ( user_id in master_key_by_user and device_id == master_key_by_user[user_id]["device_id"] @@ -218,17 +196,6 @@ class DeviceWorkerStore(SQLBaseStore): if update_stream_id > previous_update_stream_id: query_map[key] = (update_stream_id, update_context) - # If we didn't find any updates with a stream_id lower than the cutoff, it - # means that there are more than limit updates all of which have the same - # steam_id. - - # That should only happen if a client is spamming the server with new - # devices, in which case E2E isn't going to work well anyway. We'll just - # skip that stream_id and return an empty list, and continue with the next - # stream_id next time. - if not query_map and not cross_signing_keys_by_user: - return stream_id_cutoff, [] - results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index 6f8d990959..c2539b353a 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -88,51 +88,6 @@ class DeviceStoreTestCase(tests.unittest.TestCase): # Check original device_ids are contained within these updates self._check_devices_in_updates(device_ids, device_updates) - @defer.inlineCallbacks - def test_get_device_updates_by_remote_limited(self): - # Test breaking the update limit in 1, 101, and 1 device_id segments - - # first add one device - device_ids1 = ["device_id0"] - yield self.store.add_device_change_to_streams( - "user_id", device_ids1, ["someotherhost"] - ) - - # then add 101 - device_ids2 = ["device_id" + str(i + 1) for i in range(101)] - yield self.store.add_device_change_to_streams( - "user_id", device_ids2, ["someotherhost"] - ) - - # then one more - device_ids3 = ["newdevice"] - yield self.store.add_device_change_to_streams( - "user_id", device_ids3, ["someotherhost"] - ) - - # - # now read them back. - # - - # first we should get a single update - now_stream_id, device_updates = yield self.store.get_device_updates_by_remote( - "someotherhost", -1, limit=100 - ) - self._check_devices_in_updates(device_ids1, device_updates) - - # Then we should get an empty list back as the 101 devices broke the limit - now_stream_id, device_updates = yield self.store.get_device_updates_by_remote( - "someotherhost", now_stream_id, limit=100 - ) - self.assertEqual(len(device_updates), 0) - - # The 101 devices should've been cleared, so we should now just get one device - # update - now_stream_id, device_updates = yield self.store.get_device_updates_by_remote( - "someotherhost", now_stream_id, limit=100 - ) - self._check_devices_in_updates(device_ids3, device_updates) - def _check_devices_in_updates(self, expected_device_ids, device_updates): """Check that an specific device ids exist in a list of device update EDUs""" self.assertEqual(len(device_updates), len(expected_device_ids)) From 12d425900048b29a95b06428f04ed6ecc9e09d15 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 28 Feb 2020 07:31:07 -0500 Subject: [PATCH 197/382] Add some type annotations to the federation base & client classes (#6995) --- changelog.d/6995.misc | 1 + synapse/federation/federation_base.py | 60 +++++++++++++++---------- synapse/federation/federation_client.py | 10 ++--- tox.ini | 2 + 4 files changed, 45 insertions(+), 28 deletions(-) create mode 100644 changelog.d/6995.misc diff --git a/changelog.d/6995.misc b/changelog.d/6995.misc new file mode 100644 index 0000000000..884b4cf4ee --- /dev/null +++ b/changelog.d/6995.misc @@ -0,0 +1 @@ +Add some type annotations to the federation base & client classes. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 9fff65716a..190ea1fba1 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -15,11 +15,13 @@ # limitations under the License. import logging from collections import namedtuple +from typing import Iterable, List import six from twisted.internet import defer -from twisted.internet.defer import DeferredList +from twisted.internet.defer import Deferred, DeferredList +from twisted.python.failure import Failure from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -29,6 +31,7 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import check_event_content_hash +from synapse.crypto.keyring import Keyring from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict @@ -56,7 +59,12 @@ class FederationBase(object): @defer.inlineCallbacks def _check_sigs_and_hash_and_fetch( - self, origin, pdus, room_version, outlier=False, include_none=False + self, + origin: str, + pdus: List[EventBase], + room_version: str, + outlier: bool = False, + include_none: bool = False, ): """Takes a list of PDUs and checks the signatures and hashs of each one. If a PDU fails its signature check then we check if we have it in @@ -69,11 +77,11 @@ class FederationBase(object): a new list. Args: - origin (str) - pdu (list) - room_version (str) - outlier (bool): Whether the events are outliers or not - include_none (str): Whether to include None in the returned list + origin + pdu + room_version + outlier: Whether the events are outliers or not + include_none: Whether to include None in the returned list for events that have failed their checks Returns: @@ -82,7 +90,7 @@ class FederationBase(object): deferreds = self._check_sigs_and_hashes(room_version, pdus) @defer.inlineCallbacks - def handle_check_result(pdu, deferred): + def handle_check_result(pdu: EventBase, deferred: Deferred): try: res = yield make_deferred_yieldable(deferred) except SynapseError: @@ -96,8 +104,10 @@ class FederationBase(object): if not res and pdu.origin != origin: try: + # This should not exist in the base implementation, until + # this is fixed, ignore it for typing. See issue #6997. res = yield defer.ensureDeferred( - self.get_pdu( + self.get_pdu( # type: ignore destinations=[pdu.origin], event_id=pdu.event_id, room_version=room_version, @@ -127,21 +137,23 @@ class FederationBase(object): else: return [p for p in valid_pdus if p] - def _check_sigs_and_hash(self, room_version, pdu): + def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred: return make_deferred_yieldable( self._check_sigs_and_hashes(room_version, [pdu])[0] ) - def _check_sigs_and_hashes(self, room_version, pdus): + def _check_sigs_and_hashes( + self, room_version: str, pdus: List[EventBase] + ) -> List[Deferred]: """Checks that each of the received events is correctly signed by the sending server. Args: - room_version (str): The room version of the PDUs - pdus (list[FrozenEvent]): the events to be checked + room_version: The room version of the PDUs + pdus: the events to be checked Returns: - list[Deferred]: for each input event, a deferred which: + For each input event, a deferred which: * returns the original event if the checks pass * returns a redacted version of the event (if the signature matched but the hash did not) @@ -152,7 +164,7 @@ class FederationBase(object): ctx = LoggingContext.current_context() - def callback(_, pdu): + def callback(_, pdu: EventBase): with PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was @@ -189,7 +201,7 @@ class FederationBase(object): return pdu - def errback(failure, pdu): + def errback(failure: Failure, pdu: EventBase): failure.trap(SynapseError) with PreserveLoggingContext(ctx): logger.warning( @@ -215,16 +227,18 @@ class PduToCheckSig( pass -def _check_sigs_on_pdus(keyring, room_version, pdus): +def _check_sigs_on_pdus( + keyring: Keyring, room_version: str, pdus: Iterable[EventBase] +) -> List[Deferred]: """Check that the given events are correctly signed Args: - keyring (synapse.crypto.Keyring): keyring object to do the checks - room_version (str): the room version of the PDUs - pdus (Collection[EventBase]): the events to be checked + keyring: keyring object to do the checks + room_version: the room version of the PDUs + pdus: the events to be checked Returns: - List[Deferred]: a Deferred for each event in pdus, which will either succeed if + A Deferred for each event in pdus, which will either succeed if the signatures are valid, or fail (with a SynapseError) if not. """ @@ -329,7 +343,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check] -def _flatten_deferred_list(deferreds): +def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred: """Given a list of deferreds, either return the single deferred, combine into a DeferredList, or return an already resolved deferred. """ @@ -341,7 +355,7 @@ def _flatten_deferred_list(deferreds): return defer.succeed(None) -def _is_invite_via_3pid(event): +def _is_invite_via_3pid(event: EventBase) -> bool: return ( event.type == EventTypes.Member and event.membership == Membership.INVITE diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4870e39652..b5538bc07a 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -187,7 +187,7 @@ class FederationClient(FederationBase): async def backfill( self, dest: str, room_id: str, limit: int, extremities: Iterable[str] - ) -> List[EventBase]: + ) -> Optional[List[EventBase]]: """Requests some more historic PDUs for the given room from the given destination server. @@ -199,9 +199,9 @@ class FederationClient(FederationBase): """ logger.debug("backfill extrem=%s", extremities) - # If there are no extremeties then we've (probably) reached the start. + # If there are no extremities then we've (probably) reached the start. if not extremities: - return + return None transaction_data = await self.transport_layer.backfill( dest, room_id, extremities, limit @@ -284,7 +284,7 @@ class FederationClient(FederationBase): pdu_list = [ event_from_pdu_json(p, room_version, outlier=outlier) for p in transaction_data["pdus"] - ] + ] # type: List[EventBase] if pdu_list and pdu_list[0]: pdu = pdu_list[0] @@ -615,7 +615,7 @@ class FederationClient(FederationBase): ] if auth_chain_create_events != [create_event.event_id]: raise InvalidResponseError( - "Unexpected create event(s) in auth chain" + "Unexpected create event(s) in auth chain: %s" % (auth_chain_create_events,) ) diff --git a/tox.ini b/tox.ini index 6521535137..097ebb8774 100644 --- a/tox.ini +++ b/tox.ini @@ -181,6 +181,8 @@ commands = mypy \ synapse/appservice \ synapse/config \ synapse/events/spamcheck.py \ + synapse/federation/federation_base.py \ + synapse/federation/federation_client.py \ synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/presence.py \ From d96ac97d29bb55a98a9ea2b7ab8f98fd72e4a419 Mon Sep 17 00:00:00 2001 From: Sandro Date: Sun, 1 Mar 2020 00:32:26 +0100 Subject: [PATCH 198/382] Fix mounting of homeserver.yaml when it does not exist on host (#6913) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sandro Jäckel --- contrib/docker/docker-compose.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 453b305053..17354b6610 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -15,10 +15,9 @@ services: restart: unless-stopped # See the readme for a full documentation of the environment settings environment: - - SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml + - SYNAPSE_CONFIG_PATH=/data/homeserver.yaml volumes: # You may either store all the files in a local folder - - ./matrix-config/homeserver.yaml:/etc/homeserver.yaml - ./files:/data # .. or you may split this between different storage points # - ./files:/data From e4ffb14d5764d49efc28e7f3970d443eae11f087 Mon Sep 17 00:00:00 2001 From: Uday Bansal <43824981+udaybansal19@users.noreply.github.com> Date: Sun, 1 Mar 2020 05:07:23 +0530 Subject: [PATCH 199/382] Fix last date for ACMEv1 install (#7015) Support for getting TLS certificates through ACMEv1 ended on November 2019. Signed-off-by: Uday Bansal <43824981+udaybansal19@users.noreply.github.com> --- INSTALL.md | 2 +- changelog.d/7015.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7015.misc diff --git a/INSTALL.md b/INSTALL.md index aa5eb882bb..ffb82bdcc3 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -418,7 +418,7 @@ so, you will need to edit `homeserver.yaml`, as follows: for having Synapse automatically provision and renew federation certificates through ACME can be found at [ACME.md](docs/ACME.md). Note that, as pointed out in that document, this feature will not - work with installs set up after November 2020. + work with installs set up after November 2019. If you are using your own certificate, be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates diff --git a/changelog.d/7015.misc b/changelog.d/7015.misc new file mode 100644 index 0000000000..9709dc606e --- /dev/null +++ b/changelog.d/7015.misc @@ -0,0 +1 @@ +Change date in INSTALL.md#tls-certificates for last date of getting TLS certificates to November 2019. \ No newline at end of file From cc7ab0d84afd7cef3f5e0aabd72602535e9d4fbf Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 1 Mar 2020 21:21:36 +0000 Subject: [PATCH 200/382] rst->md --- contrib/grafana/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/grafana/README.md b/contrib/grafana/README.md index 6a6cc0bed4..ca780d412e 100644 --- a/contrib/grafana/README.md +++ b/contrib/grafana/README.md @@ -1,6 +1,6 @@ # Using the Synapse Grafana dashboard 0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/ -1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst +1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md 2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/ 3. Set up additional recording rules From e53744c737527ebb2af94b677b359743473b0434 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2020 12:52:28 +0000 Subject: [PATCH 201/382] Fix worker handling --- synapse/app/generic_worker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 561a6f4b22..d596852419 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -676,8 +676,9 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): elif stream_name == "device_lists": all_room_ids = set() for row in rows: - room_ids = await self.store.get_rooms_for_user(row.user_id) - all_room_ids.update(room_ids) + if row.entity.startswith("@"): + room_ids = await self.store.get_rooms_for_user(row.entity) + all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) elif stream_name == "presence": await self.presence_handler.process_replication_rows(token, rows) From bbeee33d63c43cb80118c0dccf8abd9d4ac1b8f3 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 28 Feb 2020 10:58:05 +0100 Subject: [PATCH 202/382] Fixed set a user as an admin with the new API (#6928) Fix #6910 --- changelog.d/6910.bugfix | 1 + synapse/rest/admin/users.py | 6 +- .../storage/data_stores/main/registration.py | 16 +- tests/rest/admin/test_user.py | 211 +++++++++++++++--- 4 files changed, 195 insertions(+), 39 deletions(-) create mode 100644 changelog.d/6910.bugfix diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix new file mode 100644 index 0000000000..707f1ff7b5 --- /dev/null +++ b/changelog.d/6910.bugfix @@ -0,0 +1 @@ +Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 2107b5dc56..064908fbb0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -211,9 +211,7 @@ class UserRestServletV2(RestServlet): if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") - await self.admin_handler.set_user_server_admin( - target_user, set_admin_to - ) + await self.store.set_server_admin(target_user, set_admin_to) if "password" in body: if ( @@ -648,6 +646,6 @@ class UserAdminServlet(RestServlet): if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") - await self.store.set_user_server_admin(target_user, set_admin_to) + await self.store.set_server_admin(target_user, set_admin_to) return 200, {} diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index 49306642ed..3e53c8568a 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -301,12 +301,16 @@ class RegistrationWorkerStore(SQLBaseStore): admin (bool): true iff the user is to be a server admin, false otherwise. """ - return self.db.simple_update_one( - table="users", - keyvalues={"name": user.to_string()}, - updatevalues={"admin": 1 if admin else 0}, - desc="set_server_admin", - ) + + def set_server_admin_txn(txn): + self.db.simple_update_one_txn( + txn, "users", {"name": user.to_string()}, {"admin": 1 if admin else 0} + ) + self._invalidate_cache_and_stream( + txn, self.get_user_by_id, (user.to_string(),) + ) + + return self.db.runInteraction("set_server_admin", set_server_admin_txn) def _query_for_auth(self, txn, token): sql = ( diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 490ce8f55d..70688c2494 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -16,6 +16,7 @@ import hashlib import hmac import json +import urllib.parse from mock import Mock @@ -371,22 +372,24 @@ class UserRestTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - self.url = "/_synapse/admin/v2/users/@bob:test" - self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") self.other_user_token = self.login("user", "pass") + self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote( + self.other_user + ) def test_requester_is_no_admin(self): """ If the user is not a server admin, an error is returned. """ self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" request, channel = self.make_request( - "GET", self.url, access_token=self.other_user_token, + "GET", url, access_token=self.other_user_token, ) self.render(request) @@ -394,7 +397,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("You are not a server admin", channel.json_body["error"]) request, channel = self.make_request( - "PUT", self.url, access_token=self.other_user_token, content=b"{}", + "PUT", url, access_token=self.other_user_token, content=b"{}", ) self.render(request) @@ -417,24 +420,26 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) - def test_requester_is_admin(self): + def test_create_server_admin(self): """ - If the user is a server admin, a new user is created. + Check that a new admin user is created successfully. """ self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" + # Create user (server admin) body = json.dumps( { "password": "abc123", "admin": True, + "displayname": "Bob's name", "threepids": [{"medium": "email", "address": "bob@bob.bob"}], } ) - # Create user request, channel = self.make_request( "PUT", - self.url, + url, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -442,29 +447,85 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("bob", channel.json_body["displayname"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(True, channel.json_body["admin"]) # Get user request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", url, access_token=self.admin_user_tok, ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("bob", channel.json_body["displayname"]) - self.assertEqual(1, channel.json_body["admin"]) - self.assertEqual(0, channel.json_body["is_guest"]) - self.assertEqual(0, channel.json_body["deactivated"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(True, channel.json_body["admin"]) + self.assertEqual(False, channel.json_body["is_guest"]) + self.assertEqual(False, channel.json_body["deactivated"]) + + def test_create_user(self): + """ + Check that a new regular user is created successfully. + """ + self.hs.config.registration_shared_secret = None + url = "/_synapse/admin/v2/users/@bob:test" + + # Create user + body = json.dumps( + { + "password": "abc123", + "admin": False, + "displayname": "Bob's name", + "threepids": [{"medium": "email", "address": "bob@bob.bob"}], + } + ) + + request, channel = self.make_request( + "PUT", + url, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(False, channel.json_body["admin"]) + + # Get user + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("Bob's name", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) + self.assertEqual(False, channel.json_body["admin"]) + self.assertEqual(False, channel.json_body["is_guest"]) + self.assertEqual(False, channel.json_body["deactivated"]) + + def test_set_password(self): + """ + Test setting a new password for another user. + """ + self.hs.config.registration_shared_secret = None # Change password body = json.dumps({"password": "hahaha"}) request, channel = self.make_request( "PUT", - self.url, + self.url_other_user, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) @@ -472,38 +533,130 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def test_set_displayname(self): + """ + Test setting the displayname of another user. + """ + self.hs.config.registration_shared_secret = None + # Modify user + body = json.dumps({"displayname": "foobar"}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("foobar", channel.json_body["displayname"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("foobar", channel.json_body["displayname"]) + + def test_set_threepid(self): + """ + Test setting threepid for an other user. + """ + self.hs.config.registration_shared_secret = None + + # Delete old and add new threepid to user body = json.dumps( - { - "displayname": "foobar", - "deactivated": True, - "threepids": [{"medium": "email", "address": "bob2@bob.bob"}], - } + {"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]} ) request, channel = self.make_request( "PUT", - self.url, + self.url_other_user, access_token=self.admin_user_tok, content=body.encode(encoding="utf_8"), ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("foobar", channel.json_body["displayname"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"]) + + def test_deactivate_user(self): + """ + Test deactivating another user. + """ + + # Deactivate user + body = json.dumps({"deactivated": True}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) # the user is deactivated, the threepid will be deleted # Get user request, channel = self.make_request( - "GET", self.url, access_token=self.admin_user_tok, + "GET", self.url_other_user, access_token=self.admin_user_tok, ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("foobar", channel.json_body["displayname"]) - self.assertEqual(1, channel.json_body["admin"]) - self.assertEqual(0, channel.json_body["is_guest"]) - self.assertEqual(1, channel.json_body["deactivated"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["deactivated"]) + + def test_set_user_as_admin(self): + """ + Test setting the admin flag on a user. + """ + self.hs.config.registration_shared_secret = None + + # Set a user as an admin + body = json.dumps({"admin": True}) + + request, channel = self.make_request( + "PUT", + self.url_other_user, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["admin"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_other_user, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("@user:test", channel.json_body["name"]) + self.assertEqual(True, channel.json_body["admin"]) From 174aaa1d62e54b57499d0606bf0f24bf81c6adf2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 2 Mar 2020 14:53:56 +0000 Subject: [PATCH 203/382] remove spurious changelog --- changelog.d/6910.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/6910.bugfix diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix deleted file mode 100644 index 707f1ff7b5..0000000000 --- a/changelog.d/6910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. From 3ab8e9c2932476d18af94b6c60cc3613139148ec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2020 16:17:11 +0000 Subject: [PATCH 204/382] Fix py35-old CI by using native tox. (#7018) I'm not really sure how this was going wrong, but this seems like the right approach anyway. --- .buildkite/scripts/test_old_deps.sh | 7 +------ changelog.d/7018.bugfix | 1 + 2 files changed, 2 insertions(+), 6 deletions(-) create mode 100644 changelog.d/7018.bugfix diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh index dfd71b2511..cdb77b556c 100755 --- a/.buildkite/scripts/test_old_deps.sh +++ b/.buildkite/scripts/test_old_deps.sh @@ -6,12 +6,7 @@ set -ex apt-get update -apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev - -# workaround for https://github.com/jaraco/zipp/issues/40 -python3.5 -m pip install 'setuptools>=34.4.0' - -python3.5 -m pip install tox +apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev tox export LANG="C.UTF-8" diff --git a/changelog.d/7018.bugfix b/changelog.d/7018.bugfix new file mode 100644 index 0000000000..d1b6c1d464 --- /dev/null +++ b/changelog.d/7018.bugfix @@ -0,0 +1 @@ +Fix py35-old CI by using native tox package. From b2bd54a2e31d9a248f73fadb184ae9b4cbdb49f9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 2 Mar 2020 16:36:32 +0000 Subject: [PATCH 205/382] Add a confirmation step to the SSO login flow --- docs/sample_config.yaml | 34 ++++++++ synapse/config/_base.pyi | 2 + synapse/config/homeserver.py | 2 + synapse/config/sso.py | 74 ++++++++++++++++ .../res/templates/sso_redirect_confirm.html | 14 +++ synapse/rest/client/v1/login.py | 40 +++++++-- tests/rest/client/v1/test_login.py | 85 +++++++++++++++++++ 7 files changed, 245 insertions(+), 6 deletions(-) create mode 100644 synapse/config/sso.py create mode 100644 synapse/res/templates/sso_redirect_confirm.html diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8a036071e1..bbb8a4d934 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1360,6 +1360,40 @@ saml2_config: # # name: value +# Additional settings to use with single-sign on systems such as SAML2 and CAS. +# +sso: + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. + # + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. + # + # Synapse will look for the following templates in this directory: + # + # * HTML page for confirmation of redirect during authentication: + # 'sso_redirect_confirm.html'. + # + # When rendering, this template is given three variables: + # * redirect_url: the URL the user is about to be redirected to. Needs + # manual escaping (see + # https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). + # + # * display_url: the same as `redirect_url`, but with the query + # parameters stripped. The intention is to have a + # human-readable URL to show to users, not to use it as + # the final address to redirect to. Needs manual escaping + # (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). + # + # * server_name: the homeserver's name. + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" + + # The JWT needs to contain a globally unique "sub" (subject) claim. # #jwt_config: diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 86bc965ee4..3053fc9d27 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -24,6 +24,7 @@ from synapse.config import ( server, server_notices_config, spam_checker, + sso, stats, third_party_event_rules, tls, @@ -57,6 +58,7 @@ class RootConfig: key: key.KeyConfig saml2: saml2_config.SAML2Config cas: cas.CasConfig + sso: sso.SSOConfig jwt: jwt_config.JWTConfig password: password.PasswordConfig email: emailconfig.EmailConfig diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 6e348671c7..b4bca08b20 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -38,6 +38,7 @@ from .saml2_config import SAML2Config from .server import ServerConfig from .server_notices_config import ServerNoticesConfig from .spam_checker import SpamCheckerConfig +from .sso import SSOConfig from .stats import StatsConfig from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig @@ -65,6 +66,7 @@ class HomeServerConfig(RootConfig): KeyConfig, SAML2Config, CasConfig, + SSOConfig, JWTConfig, PasswordConfig, EmailConfig, diff --git a/synapse/config/sso.py b/synapse/config/sso.py new file mode 100644 index 0000000000..f426b65b4f --- /dev/null +++ b/synapse/config/sso.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Dict + +import pkg_resources + +from ._base import Config, ConfigError + + +class SSOConfig(Config): + """SSO Configuration + """ + + section = "sso" + + def read_config(self, config, **kwargs): + sso_config = config.get("sso") or {} # type: Dict[str, Any] + + # Pick a template directory in order of: + # * The sso-specific template_dir + # * /path/to/synapse/install/res/templates + template_dir = sso_config.get("template_dir") + if not template_dir: + template_dir = pkg_resources.resource_filename("synapse", "res/templates",) + + self.sso_redirect_confirm_template_dir = template_dir + + def generate_config_section(self, **kwargs): + return """\ + # Additional settings to use with single-sign on systems such as SAML2 and CAS. + # + sso: + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. + # + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. + # + # Synapse will look for the following templates in this directory: + # + # * HTML page for a confirmation step before redirecting back to the client + # with the login token: 'sso_redirect_confirm.html'. + # + # When rendering, this template is given three variables: + # * redirect_url: the URL the user is about to be redirected to. Needs + # manual escaping (see + # https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). + # + # * display_url: the same as `redirect_url`, but with the query + # parameters stripped. The intention is to have a + # human-readable URL to show to users, not to use it as + # the final address to redirect to. Needs manual escaping + # (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). + # + # * server_name: the homeserver's name. + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" + """ diff --git a/synapse/res/templates/sso_redirect_confirm.html b/synapse/res/templates/sso_redirect_confirm.html new file mode 100644 index 0000000000..20a15e1e74 --- /dev/null +++ b/synapse/res/templates/sso_redirect_confirm.html @@ -0,0 +1,14 @@ + + + + + SSO redirect confirmation + + +

The application at {{ display_url | e }} is requesting full access to your {{ server_name }} Matrix account.

+

If you don't recognise this address, you should ignore this and close this tab.

+

+ I trust this address +

+ + \ No newline at end of file diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 1294e080dc..1acfd01d8e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -29,6 +29,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest +from synapse.push.mailer import load_jinja2_templates from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import UserID, map_username_to_mxid_localpart @@ -548,6 +549,13 @@ class SSOAuthHandler(object): self._registration_handler = hs.get_registration_handler() self._macaroon_gen = hs.get_macaroon_generator() + # Load the redirect page HTML template + self._template = load_jinja2_templates( + hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"], + )[0] + + self._server_name = hs.config.server_name + async def on_successful_auth( self, username, request, client_redirect_url, user_display_name=None ): @@ -592,21 +600,41 @@ class SSOAuthHandler(object): request: client_redirect_url: """ - + # Create a login token login_token = self._macaroon_gen.generate_short_term_login_token( registered_user_id ) - redirect_url = self._add_login_token_to_redirect_url( - client_redirect_url, login_token + + # Remove the query parameters from the redirect URL to get a shorter version of + # it. This is only to display a human-readable URL in the template, but not the + # URL we redirect users to. + redirect_url_no_params = client_redirect_url.split("?")[0] + + # Append the login token to the original redirect URL (i.e. with its query + # parameters kept intact) to build the URL to which the template needs to + # redirect the users once they have clicked on the confirmation link. + redirect_url = self._add_query_param_to_url( + client_redirect_url, "loginToken", login_token ) - request.redirect(redirect_url) + + # Serve the redirect confirmation page + html = self._template.render( + display_url=redirect_url_no_params, + redirect_url=redirect_url, + server_name=self._server_name, + ) + + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html),)) + request.write(html.encode("utf8")) finish_request(request) @staticmethod - def _add_login_token_to_redirect_url(url, token): + def _add_query_param_to_url(url, param_name, param): url_parts = list(urllib.parse.urlparse(url)) query = dict(urllib.parse.parse_qsl(url_parts[4])) - query.update({"loginToken": token}) + query.update({param_name: param}) url_parts[4] = urllib.parse.urlencode(query) return urllib.parse.urlunparse(url_parts) diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index eae5411325..2b8ad5c753 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -1,4 +1,7 @@ import json +import urllib.parse + +from mock import Mock import synapse.rest.admin from synapse.rest.client.v1 import login @@ -252,3 +255,85 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEquals(channel.code, 200, channel.result) + + +class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): + + servlets = [ + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.base_url = "https://matrix.goodserver.com/" + self.redirect_path = "_synapse/client/login/sso/redirect/confirm" + + config = self.default_config() + config["enable_registration"] = True + config["cas_config"] = { + "enabled": True, + "server_url": "https://fake.test", + "service_url": "https://matrix.goodserver.com:8448", + } + config["public_baseurl"] = self.base_url + + async def get_raw(uri, args): + """Return an example response payload from a call to the `/proxyValidate` + endpoint of a CAS server, copied from + https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20 + + This needs to be returned by an async function (as opposed to set as the + mock's return value) because the corresponding Synapse code awaits on it. + """ + return """ + + + username + PGTIOU-84678-8a9d... + + https://proxy2/pgtUrl + https://proxy1/pgtUrl + + + + """ + + mocked_http_client = Mock(spec=["get_raw"]) + mocked_http_client.get_raw.side_effect = get_raw + + self.hs = self.setup_test_homeserver( + config=config, proxied_http_client=mocked_http_client, + ) + + return self.hs + + def test_cas_redirect_confirm(self): + """Tests that the SSO login flow serves a confirmation page before redirecting a + user to the redirect URL. + """ + base_url = "/login/cas/ticket?redirectUrl" + redirect_url = "https://dodgy-site.com/" + + url_parts = list(urllib.parse.urlparse(base_url)) + query = dict(urllib.parse.parse_qsl(url_parts[4])) + query.update({"redirectUrl": redirect_url}) + query.update({"ticket": "ticket"}) + url_parts[4] = urllib.parse.urlencode(query) + cas_ticket_url = urllib.parse.urlunparse(url_parts) + + # Get Synapse to call the fake CAS and serve the template. + request, channel = self.make_request("GET", cas_ticket_url) + self.render(request) + + # Test that the response is HTML. + content_type_header_value = "" + for header in channel.result.get("headers", []): + if header[0] == b"Content-Type": + content_type_header_value = header[1].decode("utf8") + + self.assertTrue(content_type_header_value.startswith("text/html")) + + # Test that the body isn't empty. + self.assertTrue(len(channel.result["body"]) > 0) + + # And that it contains our redirect link + self.assertIn(redirect_url, channel.result["body"].decode("UTF-8")) From b29474e0aa866a50ec96cd921cc5025fc9718e73 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Mar 2020 16:52:15 +0000 Subject: [PATCH 206/382] Always return a deferred from `get_current_state_deltas`. (#7019) This currently causes presence notify code to log exceptions when there is no state changes to process. This doesn't actually cause any problems as we'd simply do nothing anyway. --- changelog.d/7019.misc | 1 + synapse/storage/data_stores/main/state_deltas.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7019.misc diff --git a/changelog.d/7019.misc b/changelog.d/7019.misc new file mode 100644 index 0000000000..5130f4e8af --- /dev/null +++ b/changelog.d/7019.misc @@ -0,0 +1 @@ +Port `synapse.handlers.presence` to async/await. diff --git a/synapse/storage/data_stores/main/state_deltas.py b/synapse/storage/data_stores/main/state_deltas.py index 12c982cb26..725e12507f 100644 --- a/synapse/storage/data_stores/main/state_deltas.py +++ b/synapse/storage/data_stores/main/state_deltas.py @@ -15,6 +15,8 @@ import logging +from twisted.internet import defer + from synapse.storage._base import SQLBaseStore logger = logging.getLogger(__name__) @@ -56,7 +58,7 @@ class StateDeltasStore(SQLBaseStore): # if the CSDs haven't changed between prev_stream_id and now, we # know for certain that they haven't changed between prev_stream_id and # max_stream_id. - return max_stream_id, [] + return defer.succeed((max_stream_id, [])) def get_current_state_deltas_txn(txn): # First we calculate the max stream id that will give us less than From b68041df3dcbcf3ca04c500d1712aa22a3c2580c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 2 Mar 2020 17:05:09 +0000 Subject: [PATCH 207/382] Add a whitelist for the SSO confirmation step. --- docs/sample_config.yaml | 22 +++++++++++++++++--- synapse/config/sso.py | 18 +++++++++++++++++ synapse/rest/client/v1/login.py | 26 ++++++++++++++++-------- tests/rest/client/v1/test_login.py | 32 +++++++++++++++++++++++++++--- 4 files changed, 84 insertions(+), 14 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bbb8a4d934..f719ec696f 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1363,6 +1363,22 @@ saml2_config: # Additional settings to use with single-sign on systems such as SAML2 and CAS. # sso: + # A list of client URLs which are whitelisted so that the user does not + # have to confirm giving access to their account to the URL. Any client + # whose URL starts with an entry in the following list will not be subject + # to an additional confirmation step after the SSO login is completed. + # + # WARNING: An entry such as "https://my.client" is insecure, because it + # will also match "https://my.client.evil.site", exposing your users to + # phishing attacks from evil.site. To avoid this, include a slash after the + # hostname: "https://my.client/". + # + # By default, this list is empty. + # + #client_whitelist: + # - https://riot.im/develop + # - https://my.custom.client/ + # Directory in which Synapse will try to find the template files below. # If not set, default templates from within the Synapse package will be used. # @@ -1372,8 +1388,8 @@ sso: # # Synapse will look for the following templates in this directory: # - # * HTML page for confirmation of redirect during authentication: - # 'sso_redirect_confirm.html'. + # * HTML page for a confirmation step before redirecting back to the client + # with the login token: 'sso_redirect_confirm.html'. # # When rendering, this template is given three variables: # * redirect_url: the URL the user is about to be redirected to. Needs @@ -1381,7 +1397,7 @@ sso: # https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). # # * display_url: the same as `redirect_url`, but with the query - # parameters stripped. The intention is to have a + # parameters stripped. The intention is to have a # human-readable URL to show to users, not to use it as # the final address to redirect to. Needs manual escaping # (see https://jinja.palletsprojects.com/en/2.11.x/templates/#html-escaping). diff --git a/synapse/config/sso.py b/synapse/config/sso.py index f426b65b4f..56299bd4e4 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -37,11 +37,29 @@ class SSOConfig(Config): self.sso_redirect_confirm_template_dir = template_dir + self.sso_client_whitelist = sso_config.get("client_whitelist") or [] + def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. # sso: + # A list of client URLs which are whitelisted so that the user does not + # have to confirm giving access to their account to the URL. Any client + # whose URL starts with an entry in the following list will not be subject + # to an additional confirmation step after the SSO login is completed. + # + # WARNING: An entry such as "https://my.client" is insecure, because it + # will also match "https://my.client.evil.site", exposing your users to + # phishing attacks from evil.site. To avoid this, include a slash after the + # hostname: "https://my.client/". + # + # By default, this list is empty. + # + #client_whitelist: + # - https://riot.im/develop + # - https://my.custom.client/ + # Directory in which Synapse will try to find the template files below. # If not set, default templates from within the Synapse package will be used. # diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 1acfd01d8e..b2bc7537db 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -556,6 +556,9 @@ class SSOAuthHandler(object): self._server_name = hs.config.server_name + # cast to tuple for use with str.startswith + self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) + async def on_successful_auth( self, username, request, client_redirect_url, user_display_name=None ): @@ -605,11 +608,6 @@ class SSOAuthHandler(object): registered_user_id ) - # Remove the query parameters from the redirect URL to get a shorter version of - # it. This is only to display a human-readable URL in the template, but not the - # URL we redirect users to. - redirect_url_no_params = client_redirect_url.split("?")[0] - # Append the login token to the original redirect URL (i.e. with its query # parameters kept intact) to build the URL to which the template needs to # redirect the users once they have clicked on the confirmation link. @@ -617,17 +615,29 @@ class SSOAuthHandler(object): client_redirect_url, "loginToken", login_token ) - # Serve the redirect confirmation page + # if the client is whitelisted, we can redirect straight to it + if client_redirect_url.startswith(self._whitelisted_sso_clients): + request.redirect(redirect_url) + finish_request(request) + return + + # Otherwise, serve the redirect confirmation page. + + # Remove the query parameters from the redirect URL to get a shorter version of + # it. This is only to display a human-readable URL in the template, but not the + # URL we redirect users to. + redirect_url_no_params = client_redirect_url.split("?")[0] + html = self._template.render( display_url=redirect_url_no_params, redirect_url=redirect_url, server_name=self._server_name, - ) + ).encode("utf-8") request.setResponseCode(200) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader(b"Content-Length", b"%d" % (len(html),)) - request.write(html.encode("utf8")) + request.write(html) finish_request(request) @staticmethod diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 2b8ad5c753..da2c9bfa1e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -268,13 +268,11 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): self.redirect_path = "_synapse/client/login/sso/redirect/confirm" config = self.default_config() - config["enable_registration"] = True config["cas_config"] = { "enabled": True, "server_url": "https://fake.test", "service_url": "https://matrix.goodserver.com:8448", } - config["public_baseurl"] = self.base_url async def get_raw(uri, args): """Return an example response payload from a call to the `/proxyValidate` @@ -310,7 +308,7 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): """Tests that the SSO login flow serves a confirmation page before redirecting a user to the redirect URL. """ - base_url = "/login/cas/ticket?redirectUrl" + base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl" redirect_url = "https://dodgy-site.com/" url_parts = list(urllib.parse.urlparse(base_url)) @@ -325,6 +323,7 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): self.render(request) # Test that the response is HTML. + self.assertEqual(channel.code, 200) content_type_header_value = "" for header in channel.result.get("headers", []): if header[0] == b"Content-Type": @@ -337,3 +336,30 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): # And that it contains our redirect link self.assertIn(redirect_url, channel.result["body"].decode("UTF-8")) + + @override_config( + { + "sso": { + "client_whitelist": [ + "https://legit-site.com/", + "https://other-site.com/", + ] + } + } + ) + def test_cas_redirect_whitelisted(self): + """Tests that the SSO login flow serves a redirect to a whitelisted url + """ + redirect_url = "https://legit-site.com/" + cas_ticket_url = ( + "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" + % (urllib.parse.quote(redirect_url)) + ) + + # Get Synapse to call the fake CAS and serve the template. + request, channel = self.make_request("GET", cas_ticket_url) + self.render(request) + + self.assertEqual(channel.code, 302) + location_headers = channel.headers.getRawHeaders("Location") + self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url) From 65c73cdfec1876a9fec2fd2c3a74923cd146fe0b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 3 Mar 2020 10:54:44 +0000 Subject: [PATCH 208/382] Factor out complete_sso_login and expose it to the Module API --- synapse/config/sso.py | 2 +- synapse/handlers/auth.py | 74 +++++++++++++++++++++++++++++++++ synapse/module_api/__init__.py | 19 +++++++++ synapse/rest/client/v1/login.py | 58 +------------------------- 4 files changed, 96 insertions(+), 57 deletions(-) diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 56299bd4e4..95762689bc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -16,7 +16,7 @@ from typing import Any, Dict import pkg_resources -from ._base import Config, ConfigError +from ._base import Config class SSOConfig(Config): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 48a88d3c2a..7ca90f91c4 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -17,6 +17,8 @@ import logging import time import unicodedata +import urllib.parse +from typing import Any import attr import bcrypt @@ -38,8 +40,11 @@ from synapse.api.errors import ( from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker +from synapse.http.server import finish_request +from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi +from synapse.push.mailer import load_jinja2_templates from synapse.types import UserID from synapse.util.caches.expiringcache import ExpiringCache @@ -108,6 +113,16 @@ class AuthHandler(BaseHandler): self._clock = self.hs.get_clock() + # Load the SSO redirect confirmation page HTML template + self._sso_redirect_confirm_template = load_jinja2_templates( + hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"], + )[0] + + self._server_name = hs.config.server_name + + # cast to tuple for use with str.startswith + self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) + @defer.inlineCallbacks def validate_user_via_ui_auth(self, requester, request_body, clientip): """ @@ -927,6 +942,65 @@ class AuthHandler(BaseHandler): else: return defer.succeed(False) + def complete_sso_login( + self, + registered_user_id: str, + request: SynapseRequest, + client_redirect_url: str, + ): + """Having figured out a mxid for this user, complete the HTTP request + + Args: + registered_user_id: The registered user ID to complete SSO login for. + request: The request to complete. + client_redirect_url: The URL to which to redirect the user at the end of the + process. + """ + # Create a login token + login_token = self.macaroon_gen.generate_short_term_login_token( + registered_user_id + ) + + # Append the login token to the original redirect URL (i.e. with its query + # parameters kept intact) to build the URL to which the template needs to + # redirect the users once they have clicked on the confirmation link. + redirect_url = self.add_query_param_to_url( + client_redirect_url, "loginToken", login_token + ) + + # if the client is whitelisted, we can redirect straight to it + if client_redirect_url.startswith(self._whitelisted_sso_clients): + request.redirect(redirect_url) + finish_request(request) + return + + # Otherwise, serve the redirect confirmation page. + + # Remove the query parameters from the redirect URL to get a shorter version of + # it. This is only to display a human-readable URL in the template, but not the + # URL we redirect users to. + redirect_url_no_params = client_redirect_url.split("?")[0] + + html = self._sso_redirect_confirm_template.render( + display_url=redirect_url_no_params, + redirect_url=redirect_url, + server_name=self._server_name, + ).encode("utf-8") + + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html),)) + request.write(html) + finish_request(request) + + @staticmethod + def add_query_param_to_url(url: str, param_name: str, param: Any): + url_parts = list(urllib.parse.urlparse(url)) + query = dict(urllib.parse.parse_qsl(url_parts[4])) + query.update({param_name: param}) + url_parts[4] = urllib.parse.urlencode(query) + return urllib.parse.urlunparse(url_parts) + @attr.s class MacaroonGenerator(object): diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d680ee95e1..c7fffd72f2 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -17,6 +17,7 @@ import logging from twisted.internet import defer +from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID @@ -211,3 +212,21 @@ class ModuleApi(object): Deferred[object]: result of func """ return self._store.db.runInteraction(desc, func, *args, **kwargs) + + def complete_sso_login( + self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str + ): + """Complete a SSO login by redirecting the user to a page to confirm whether they + want their access token sent to `client_redirect_url`, or redirect them to that + URL with a token directly if the URL matches with one of the whitelisted clients. + + Args: + registered_user_id: The MXID that has been registered as a previous step of + of this SSO login. + request: The request to respond to. + client_redirect_url: The URL to which to offer to redirect the user (or to + redirect them directly if whitelisted). + """ + self._auth_handler.complete_sso_login( + registered_user_id, request, client_redirect_url, + ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index b2bc7537db..d0d4999795 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -28,7 +28,6 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) -from synapse.http.site import SynapseRequest from synapse.push.mailer import load_jinja2_templates from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder @@ -591,63 +590,10 @@ class SSOAuthHandler(object): localpart=localpart, default_display_name=user_display_name ) - self.complete_sso_login(registered_user_id, request, client_redirect_url) - - def complete_sso_login( - self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str - ): - """Having figured out a mxid for this user, complete the HTTP request - - Args: - registered_user_id: - request: - client_redirect_url: - """ - # Create a login token - login_token = self._macaroon_gen.generate_short_term_login_token( - registered_user_id + self._auth_handler.complete_sso_login( + registered_user_id, request, client_redirect_url ) - # Append the login token to the original redirect URL (i.e. with its query - # parameters kept intact) to build the URL to which the template needs to - # redirect the users once they have clicked on the confirmation link. - redirect_url = self._add_query_param_to_url( - client_redirect_url, "loginToken", login_token - ) - - # if the client is whitelisted, we can redirect straight to it - if client_redirect_url.startswith(self._whitelisted_sso_clients): - request.redirect(redirect_url) - finish_request(request) - return - - # Otherwise, serve the redirect confirmation page. - - # Remove the query parameters from the redirect URL to get a shorter version of - # it. This is only to display a human-readable URL in the template, but not the - # URL we redirect users to. - redirect_url_no_params = client_redirect_url.split("?")[0] - - html = self._template.render( - display_url=redirect_url_no_params, - redirect_url=redirect_url, - server_name=self._server_name, - ).encode("utf-8") - - request.setResponseCode(200) - request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Content-Length", b"%d" % (len(html),)) - request.write(html) - finish_request(request) - - @staticmethod - def _add_query_param_to_url(url, param_name, param): - url_parts = list(urllib.parse.urlparse(url)) - query = dict(urllib.parse.parse_qsl(url_parts[4])) - query.update({param_name: param}) - url_parts[4] = urllib.parse.urlencode(query) - return urllib.parse.urlunparse(url_parts) - def register_servlets(hs, http_server): LoginRestServlet(hs).register(http_server) From a0178df10422a76fd403b82d2b2a4ed28a9a9d1e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 3 Mar 2020 11:29:07 +0000 Subject: [PATCH 209/382] Fix wrong handler being used in SAML handler --- synapse/handlers/saml_handler.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 7f411b53b9..9406753393 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -25,7 +25,6 @@ from synapse.api.errors import SynapseError from synapse.config import ConfigError from synapse.http.servlet import parse_string from synapse.module_api import ModuleApi -from synapse.rest.client.v1.login import SSOAuthHandler from synapse.types import ( UserID, map_username_to_mxid_localpart, @@ -48,7 +47,7 @@ class Saml2SessionData: class SamlHandler: def __init__(self, hs): self._saml_client = Saml2Client(hs.config.saml2_sp_config) - self._sso_auth_handler = SSOAuthHandler(hs) + self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() self._clock = hs.get_clock() @@ -116,7 +115,7 @@ class SamlHandler: self.expire_sessions() user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) - self._sso_auth_handler.complete_sso_login(user_id, request, relay_state) + self._auth_handler.complete_sso_login(user_id, request, relay_state) async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url): try: From 7dcbc33a1be04c46b930699c03c15bc759f4b22c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 3 Mar 2020 07:12:45 -0500 Subject: [PATCH 210/382] Validate the alt_aliases property of canonical alias events (#6971) --- changelog.d/6971.feature | 1 + synapse/api/errors.py | 1 + synapse/handlers/directory.py | 14 +-- synapse/handlers/message.py | 47 ++++++++- synapse/types.py | 15 ++- tests/handlers/test_directory.py | 70 ++++++------- tests/rest/client/v1/test_rooms.py | 160 +++++++++++++++++++++++++++++ tests/test_types.py | 2 +- 8 files changed, 256 insertions(+), 54 deletions(-) create mode 100644 changelog.d/6971.feature diff --git a/changelog.d/6971.feature b/changelog.d/6971.feature new file mode 100644 index 0000000000..ccf02a61df --- /dev/null +++ b/changelog.d/6971.feature @@ -0,0 +1 @@ +Validate the alt_aliases property of canonical alias events. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 0c20601600..616942b057 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -66,6 +66,7 @@ class Codes(object): EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + BAD_ALIAS = "M_BAD_ALIAS" class CodeMessageException(RuntimeError): diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 0b23ca919a..61eb49059b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - -import collections import logging import string from typing import List @@ -307,15 +305,17 @@ class DirectoryHandler(BaseHandler): send_update = True content.pop("alias", "") - # Filter alt_aliases for the removed alias. - alt_aliases = content.pop("alt_aliases", None) - # If the aliases are not a list (or not found) do not attempt to modify - # the list. - if isinstance(alt_aliases, collections.Sequence): + # Filter the alt_aliases property for the removed alias. Note that the + # value is not modified if alt_aliases is of an unexpected form. + alt_aliases = content.get("alt_aliases") + if isinstance(alt_aliases, (list, tuple)) and alias_str in alt_aliases: send_update = True alt_aliases = [alias for alias in alt_aliases if alias != alias_str] + if alt_aliases: content["alt_aliases"] = alt_aliases + else: + del content["alt_aliases"] if send_update: yield self.event_creation_handler.create_and_send_nonmember_event( diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a0103addd3..0c84c6cec4 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -888,19 +888,60 @@ class EventCreationHandler(object): yield self.base_handler.maybe_kick_guest_users(event, context) if event.type == EventTypes.CanonicalAlias: - # Check the alias is acually valid (at this time at least) + # Validate a newly added alias or newly added alt_aliases. + + original_alias = None + original_alt_aliases = set() + + original_event_id = event.unsigned.get("replaces_state") + if original_event_id: + original_event = yield self.store.get_event(original_event_id) + + if original_event: + original_alias = original_event.content.get("alias", None) + original_alt_aliases = original_event.content.get("alt_aliases", []) + + # Check the alias is currently valid (if it has changed). room_alias_str = event.content.get("alias", None) - if room_alias_str: + directory_handler = self.hs.get_handlers().directory_handler + if room_alias_str and room_alias_str != original_alias: room_alias = RoomAlias.from_string(room_alias_str) - directory_handler = self.hs.get_handlers().directory_handler mapping = yield directory_handler.get_association(room_alias) if mapping["room_id"] != event.room_id: raise SynapseError( 400, "Room alias %s does not point to the room" % (room_alias_str,), + Codes.BAD_ALIAS, ) + # Check that alt_aliases is the proper form. + alt_aliases = event.content.get("alt_aliases", []) + if not isinstance(alt_aliases, (list, tuple)): + raise SynapseError( + 400, "The alt_aliases property must be a list.", Codes.INVALID_PARAM + ) + + # If the old version of alt_aliases is of an unknown form, + # completely replace it. + if not isinstance(original_alt_aliases, (list, tuple)): + original_alt_aliases = [] + + # Check that each alias is currently valid. + new_alt_aliases = set(alt_aliases) - set(original_alt_aliases) + if new_alt_aliases: + for alias_str in new_alt_aliases: + room_alias = RoomAlias.from_string(alias_str) + mapping = yield directory_handler.get_association(room_alias) + + if mapping["room_id"] != event.room_id: + raise SynapseError( + 400, + "Room alias %s does not point to the room" + % (room_alias_str,), + Codes.BAD_ALIAS, + ) + federation_handler = self.hs.get_handlers().federation_handler if event.type == EventTypes.Member: diff --git a/synapse/types.py b/synapse/types.py index f3cd465735..acf60baddc 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -23,7 +23,7 @@ import attr from signedjson.key import decode_verify_key_bytes from unpaddedbase64 import decode_base64 -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError # define a version of typing.Collection that works on python 3.5 if sys.version_info[:3] >= (3, 6, 0): @@ -166,11 +166,13 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom return self @classmethod - def from_string(cls, s): + def from_string(cls, s: str): """Parse the string given by 's' into a structure object.""" if len(s) < 1 or s[0:1] != cls.SIGIL: raise SynapseError( - 400, "Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL) + 400, + "Expected %s string to start with '%s'" % (cls.__name__, cls.SIGIL), + Codes.INVALID_PARAM, ) parts = s[1:].split(":", 1) @@ -179,6 +181,7 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom 400, "Expected %s of the form '%slocalname:domain'" % (cls.__name__, cls.SIGIL), + Codes.INVALID_PARAM, ) domain = parts[1] @@ -235,11 +238,13 @@ class GroupID(DomainSpecificString): def from_string(cls, s): group_id = super(GroupID, cls).from_string(s) if not group_id.localpart: - raise SynapseError(400, "Group ID cannot be empty") + raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM) if contains_invalid_mxid_characters(group_id.localpart): raise SynapseError( - 400, "Group ID can only contain characters a-z, 0-9, or '=_-./'" + 400, + "Group ID can only contain characters a-z, 0-9, or '=_-./'", + Codes.INVALID_PARAM, ) return group_id diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 27b916aed4..3397cfa485 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -88,6 +88,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ) def test_delete_alias_not_allowed(self): + """Removing an alias should be denied if a user does not have the proper permissions.""" room_id = "!8765qwer:test" self.get_success( self.store.create_room_alias_association(self.my_room, room_id, ["test"]) @@ -101,6 +102,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ) def test_delete_alias(self): + """Removing an alias should work when a user does has the proper permissions.""" room_id = "!8765qwer:test" user_id = "@user:test" self.get_success( @@ -159,30 +161,42 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) self.test_alias = "#test:test" - self.room_alias = RoomAlias.from_string(self.test_alias) + self.room_alias = self._add_alias(self.test_alias) + + def _add_alias(self, alias: str) -> RoomAlias: + """Add an alias to the test room.""" + room_alias = RoomAlias.from_string(alias) # Create a new alias to this room. self.get_success( self.store.create_room_alias_association( - self.room_alias, self.room_id, ["test"], self.admin_user + room_alias, self.room_id, ["test"], self.admin_user + ) + ) + return room_alias + + def _set_canonical_alias(self, content): + """Configure the canonical alias state on the room.""" + self.helper.send_state( + self.room_id, "m.room.canonical_alias", content, tok=self.admin_user_tok, + ) + + def _get_canonical_alias(self): + """Get the canonical alias state of the room.""" + return self.get_success( + self.state_handler.get_current_state( + self.room_id, EventTypes.CanonicalAlias, "" ) ) def test_remove_alias(self): """Removing an alias that is the canonical alias should remove it there too.""" # Set this new alias as the canonical alias for this room - self.helper.send_state( - self.room_id, - "m.room.canonical_alias", - {"alias": self.test_alias, "alt_aliases": [self.test_alias]}, - tok=self.admin_user_tok, + self._set_canonical_alias( + {"alias": self.test_alias, "alt_aliases": [self.test_alias]} ) - data = self.get_success( - self.state_handler.get_current_state( - self.room_id, EventTypes.CanonicalAlias, "" - ) - ) + data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual(data["content"]["alt_aliases"], [self.test_alias]) @@ -193,11 +207,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) ) - data = self.get_success( - self.state_handler.get_current_state( - self.room_id, EventTypes.CanonicalAlias, "" - ) - ) + data = self._get_canonical_alias() self.assertNotIn("alias", data["content"]) self.assertNotIn("alt_aliases", data["content"]) @@ -205,29 +215,17 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): """Removing an alias listed as in alt_aliases should remove it there too.""" # Create a second alias. other_test_alias = "#test2:test" - other_room_alias = RoomAlias.from_string(other_test_alias) - self.get_success( - self.store.create_room_alias_association( - other_room_alias, self.room_id, ["test"], self.admin_user - ) - ) + other_room_alias = self._add_alias(other_test_alias) # Set the alias as the canonical alias for this room. - self.helper.send_state( - self.room_id, - "m.room.canonical_alias", + self._set_canonical_alias( { "alias": self.test_alias, "alt_aliases": [self.test_alias, other_test_alias], - }, - tok=self.admin_user_tok, + } ) - data = self.get_success( - self.state_handler.get_current_state( - self.room_id, EventTypes.CanonicalAlias, "" - ) - ) + data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual( data["content"]["alt_aliases"], [self.test_alias, other_test_alias] @@ -240,11 +238,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ) ) - data = self.get_success( - self.state_handler.get_current_state( - self.room_id, EventTypes.CanonicalAlias, "" - ) - ) + data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual(data["content"]["alt_aliases"], [self.test_alias]) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 2f3df5f88f..7dd86d0c27 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1821,3 +1821,163 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(channel.code, expected_code, channel.result) + + +class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + directory.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.room_owner = self.register_user("room_owner", "test") + self.room_owner_tok = self.login("room_owner", "test") + + self.room_id = self.helper.create_room_as( + self.room_owner, tok=self.room_owner_tok + ) + + self.alias = "#alias:test" + self._set_alias_via_directory(self.alias) + + def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + url = "/_matrix/client/r0/directory/room/" + alias + data = {"room_id": self.room_id} + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.room_owner_tok + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def _get_canonical_alias(self, expected_code: int = 200) -> JsonDict: + """Calls the endpoint under test. returns the json response object.""" + request, channel = self.make_request( + "GET", + "rooms/%s/state/m.room.canonical_alias" % (self.room_id,), + access_token=self.room_owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + res = channel.json_body + self.assertIsInstance(res, dict) + return res + + def _set_canonical_alias(self, content: str, expected_code: int = 200) -> JsonDict: + """Calls the endpoint under test. returns the json response object.""" + request, channel = self.make_request( + "PUT", + "rooms/%s/state/m.room.canonical_alias" % (self.room_id,), + json.dumps(content), + access_token=self.room_owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + res = channel.json_body + self.assertIsInstance(res, dict) + return res + + def test_canonical_alias(self): + """Test a basic alias message.""" + # There is no canonical alias to start with. + self._get_canonical_alias(expected_code=404) + + # Create an alias. + self._set_canonical_alias({"alias": self.alias}) + + # Canonical alias now exists! + res = self._get_canonical_alias() + self.assertEqual(res, {"alias": self.alias}) + + # Now remove the alias. + self._set_canonical_alias({}) + + # There is an alias event, but it is empty. + res = self._get_canonical_alias() + self.assertEqual(res, {}) + + def test_alt_aliases(self): + """Test a canonical alias message with alt_aliases.""" + # Create an alias. + self._set_canonical_alias({"alt_aliases": [self.alias]}) + + # Canonical alias now exists! + res = self._get_canonical_alias() + self.assertEqual(res, {"alt_aliases": [self.alias]}) + + # Now remove the alt_aliases. + self._set_canonical_alias({}) + + # There is an alias event, but it is empty. + res = self._get_canonical_alias() + self.assertEqual(res, {}) + + def test_alias_alt_aliases(self): + """Test a canonical alias message with an alias and alt_aliases.""" + # Create an alias. + self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) + + # Canonical alias now exists! + res = self._get_canonical_alias() + self.assertEqual(res, {"alias": self.alias, "alt_aliases": [self.alias]}) + + # Now remove the alias and alt_aliases. + self._set_canonical_alias({}) + + # There is an alias event, but it is empty. + res = self._get_canonical_alias() + self.assertEqual(res, {}) + + def test_partial_modify(self): + """Test removing only the alt_aliases.""" + # Create an alias. + self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) + + # Canonical alias now exists! + res = self._get_canonical_alias() + self.assertEqual(res, {"alias": self.alias, "alt_aliases": [self.alias]}) + + # Now remove the alt_aliases. + self._set_canonical_alias({"alias": self.alias}) + + # There is an alias event, but it is empty. + res = self._get_canonical_alias() + self.assertEqual(res, {"alias": self.alias}) + + def test_add_alias(self): + """Test removing only the alt_aliases.""" + # Create an additional alias. + second_alias = "#second:test" + self._set_alias_via_directory(second_alias) + + # Add the canonical alias. + self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) + + # Then add the second alias. + self._set_canonical_alias( + {"alias": self.alias, "alt_aliases": [self.alias, second_alias]} + ) + + # Canonical alias now exists! + res = self._get_canonical_alias() + self.assertEqual( + res, {"alias": self.alias, "alt_aliases": [self.alias, second_alias]} + ) + + def test_bad_data(self): + """Invalid data for alt_aliases should cause errors.""" + self._set_canonical_alias({"alt_aliases": "@bad:test"}, expected_code=400) + self._set_canonical_alias({"alt_aliases": None}, expected_code=400) + self._set_canonical_alias({"alt_aliases": 0}, expected_code=400) + self._set_canonical_alias({"alt_aliases": 1}, expected_code=400) + self._set_canonical_alias({"alt_aliases": False}, expected_code=400) + self._set_canonical_alias({"alt_aliases": True}, expected_code=400) + self._set_canonical_alias({"alt_aliases": {}}, expected_code=400) + + def test_bad_alias(self): + """An alias which does not point to the room raises a SynapseError.""" + self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) + self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) diff --git a/tests/test_types.py b/tests/test_types.py index 8d97c751ea..480bea1bdc 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -75,7 +75,7 @@ class GroupIDTestCase(unittest.TestCase): self.fail("Parsing '%s' should raise exception" % id_string) except SynapseError as exc: self.assertEqual(400, exc.code) - self.assertEqual("M_UNKNOWN", exc.errcode) + self.assertEqual("M_INVALID_PARAM", exc.errcode) class MapUsernameTestCase(unittest.TestCase): From fd983fad968941987314501b67147a264e2e927a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 3 Mar 2020 14:58:37 +0000 Subject: [PATCH 211/382] v1.11.1 --- CHANGES.md | 15 +++++++++++++++ changelog.d/6910.bugfix | 1 - changelog.d/6996.bugfix | 1 - synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/6910.bugfix delete mode 100644 changelog.d/6996.bugfix diff --git a/CHANGES.md b/CHANGES.md index ff681762cd..dc9ca05ad1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.11.1 (2020-03-03) +=========================== + +This release includes a security fix impacting installations using Single Sign-On (i.e. SAML2 or CAS) for authentication. Administrators of such installations are encouraged to upgrade as soon as possible. + +The release also includes fixes for a couple of other bugs. + +Bugfixes +-------- + +- Add a confirmation step to the SSO login flow before redirecting users to the redirect URL. ([b2bd54a2](https://github.com/matrix-org/synapse/commit/b2bd54a2e31d9a248f73fadb184ae9b4cbdb49f9), [65c73cdf](https://github.com/matrix-org/synapse/commit/65c73cdfec1876a9fec2fd2c3a74923cd146fe0b), [a0178df1](https://github.com/matrix-org/synapse/commit/a0178df10422a76fd403b82d2b2a4ed28a9a9d1e)) +- Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. ([\#6910](https://github.com/matrix-org/synapse/issues/6910)) +- Fix bug introduced in Synapse 1.11.0 which sometimes caused errors when joining rooms over federation, with `'coroutine' object has no attribute 'event_id'`. ([\#6996](https://github.com/matrix-org/synapse/issues/6996)) + + Synapse 1.11.0 (2020-02-21) =========================== diff --git a/changelog.d/6910.bugfix b/changelog.d/6910.bugfix deleted file mode 100644 index 707f1ff7b5..0000000000 --- a/changelog.d/6910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed set a user as an admin with the admin API `PUT /_synapse/admin/v2/users/`. Contributed by @dklimpel. diff --git a/changelog.d/6996.bugfix b/changelog.d/6996.bugfix deleted file mode 100644 index 765d376c7c..0000000000 --- a/changelog.d/6996.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which caused an error when joining a room, with `'coroutine' object has no attribute 'event_id'`. diff --git a/synapse/__init__.py b/synapse/__init__.py index 3406ce634f..e56ba89ff4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.11.0" +__version__ = "1.11.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 6b0ef34706e32121ff6f802d62b3eb8545785afe Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 3 Mar 2020 15:01:43 +0000 Subject: [PATCH 212/382] Update debian changelog --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index fbb44cb94b..c39ea8f47f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.11.1) stable; urgency=medium + + * New synapse release 1.11.1. + + -- Synapse Packaging team Tue, 03 Mar 2020 15:01:22 +0000 + matrix-synapse-py3 (1.11.0) stable; urgency=medium * New synapse release 1.11.0. From 8ef8fb2c1c7c4aeb80fce4deea477b37754ce539 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 4 Mar 2020 13:11:04 +0000 Subject: [PATCH 213/382] Read the room version from database when fetching events (#6874) This is a precursor to giving EventBase objects the knowledge of which room version they belong to. --- changelog.d/6874.misc | 1 + .../storage/data_stores/main/events_worker.py | 84 +++++++++++++++---- .../replication/slave/storage/test_events.py | 10 +++ 3 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 changelog.d/6874.misc diff --git a/changelog.d/6874.misc b/changelog.d/6874.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6874.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 47a3a26072..ca237c6f12 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -28,9 +28,12 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.errors import NotFoundError -from synapse.api.room_versions import EventFormatVersions -from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401 -from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersions, +) +from synapse.events import make_event_from_dict from synapse.events.utils import prune_event from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics.background_process_metrics import run_as_background_process @@ -580,8 +583,49 @@ class EventsWorkerStore(SQLBaseStore): # of a event format version, so it must be a V1 event. format_version = EventFormatVersions.V1 - original_ev = event_type_from_format_version(format_version)( + room_version_id = row["room_version_id"] + + if not room_version_id: + # this should only happen for out-of-band membership events + if not internal_metadata.get("out_of_band_membership"): + logger.warning( + "Room %s for event %s is unknown", d["room_id"], event_id + ) + continue + + # take a wild stab at the room version based on the event format + if format_version == EventFormatVersions.V1: + room_version = RoomVersions.V1 + elif format_version == EventFormatVersions.V2: + room_version = RoomVersions.V3 + else: + room_version = RoomVersions.V5 + else: + room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not room_version: + logger.error( + "Event %s in room %s has unknown room version %s", + event_id, + d["room_id"], + room_version_id, + ) + continue + + if room_version.event_format != format_version: + logger.error( + "Event %s in room %s with version %s has wrong format: " + "expected %s, was %s", + event_id, + d["room_id"], + room_version_id, + room_version.event_format, + format_version, + ) + continue + + original_ev = make_event_from_dict( event_dict=d, + room_version=room_version, internal_metadata_dict=internal_metadata, rejected_reason=rejected_reason, ) @@ -661,6 +705,12 @@ class EventsWorkerStore(SQLBaseStore): of EventFormatVersions. 'None' means the event predates EventFormatVersions (so the event is format V1). + * room_version_id (str|None): The version of the room which contains the event. + Hopefully one of RoomVersions. + + Due to historical reasons, there may be a few events in the database which + do not have an associated room; in this case None will be returned here. + * rejected_reason (str|None): if the event was rejected, the reason why. @@ -676,17 +726,18 @@ class EventsWorkerStore(SQLBaseStore): """ event_dict = {} for evs in batch_iter(event_ids, 200): - sql = ( - "SELECT " - " e.event_id, " - " e.internal_metadata," - " e.json," - " e.format_version, " - " rej.reason " - " FROM event_json as e" - " LEFT JOIN rejections as rej USING (event_id)" - " WHERE " - ) + sql = """\ + SELECT + e.event_id, + e.internal_metadata, + e.json, + e.format_version, + r.room_version, + rej.reason + FROM event_json as e + LEFT JOIN rooms r USING (room_id) + LEFT JOIN rejections as rej USING (event_id) + WHERE """ clause, args = make_in_list_sql_clause( txn.database_engine, "e.event_id", evs @@ -701,7 +752,8 @@ class EventsWorkerStore(SQLBaseStore): "internal_metadata": row[1], "json": row[2], "format_version": row[3], - "rejected_reason": row[4], + "room_version_id": row[4], + "rejected_reason": row[5], "redactions": [], } diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index d31210fbe4..f0561b30e3 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -15,6 +15,7 @@ import logging from canonicaljson import encode_canonical_json +from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource @@ -58,6 +59,15 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)] return super(SlavedEventStoreTestCase, self).setUp() + def prepare(self, *args, **kwargs): + super().prepare(*args, **kwargs) + + self.get_success( + self.master_store.store_room( + ROOM_ID, USER_ID, is_public=False, room_version=RoomVersions.V1, + ) + ) + def tearDown(self): [unpatch() for unpatch in self.unpatches] From 13892776ef7e0b1af2f82c9ca53f7bbd1c60d66f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 Mar 2020 11:30:46 -0500 Subject: [PATCH 214/382] Allow deleting an alias if the user has sufficient power level (#6986) --- changelog.d/6986.feature | 1 + synapse/api/auth.py | 9 +- synapse/handlers/directory.py | 107 +++++++++++++++------- tests/handlers/test_directory.py | 148 ++++++++++++++++++++++++------- tox.ini | 1 + 5 files changed, 192 insertions(+), 74 deletions(-) create mode 100644 changelog.d/6986.feature diff --git a/changelog.d/6986.feature b/changelog.d/6986.feature new file mode 100644 index 0000000000..16dea8bd7f --- /dev/null +++ b/changelog.d/6986.feature @@ -0,0 +1 @@ +Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5ca18b4301..c1ade1333b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -539,7 +539,7 @@ class Auth(object): @defer.inlineCallbacks def check_can_change_room_list(self, room_id: str, user: UserID): - """Check if the user is allowed to edit the room's entry in the + """Determine whether the user is allowed to edit the room's entry in the published room list. Args: @@ -570,12 +570,7 @@ class Auth(object): ) user_level = event_auth.get_user_power_level(user_id, auth_events) - if user_level < send_level: - raise AuthError( - 403, - "This server requires you to be a moderator in the room to" - " edit its room list entry", - ) + return user_level >= send_level @staticmethod def has_access_token(request): diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 61eb49059b..1d842c369b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -15,7 +15,7 @@ import logging import string -from typing import List +from typing import Iterable, List, Optional from twisted.internet import defer @@ -28,6 +28,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) +from synapse.appservice import ApplicationService from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -55,7 +56,13 @@ class DirectoryHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() @defer.inlineCallbacks - def _create_association(self, room_alias, room_id, servers=None, creator=None): + def _create_association( + self, + room_alias: RoomAlias, + room_id: str, + servers: Optional[Iterable[str]] = None, + creator: Optional[str] = None, + ): # general association creation for both human users and app services for wchar in string.whitespace: @@ -81,17 +88,21 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def create_association( - self, requester, room_alias, room_id, servers=None, check_membership=True, + self, + requester: Requester, + room_alias: RoomAlias, + room_id: str, + servers: Optional[List[str]] = None, + check_membership: bool = True, ): """Attempt to create a new alias Args: - requester (Requester) - room_alias (RoomAlias) - room_id (str) - servers (list[str]|None): List of servers that others servers - should try and join via - check_membership (bool): Whether to check if the user is in the room + requester + room_alias + room_id + servers: Iterable of servers that others servers should try and join via + check_membership: Whether to check if the user is in the room before the alias can be set (if the server's config requires it). Returns: @@ -145,15 +156,15 @@ class DirectoryHandler(BaseHandler): yield self._create_association(room_alias, room_id, servers, creator=user_id) @defer.inlineCallbacks - def delete_association(self, requester, room_alias): + def delete_association(self, requester: Requester, room_alias: RoomAlias): """Remove an alias from the directory (this is only meant for human users; AS users should call delete_appservice_association) Args: - requester (Requester): - room_alias (RoomAlias): + requester + room_alias Returns: Deferred[unicode]: room id that the alias used to point to @@ -189,16 +200,16 @@ class DirectoryHandler(BaseHandler): room_id = yield self._delete_association(room_alias) try: - yield self._update_canonical_alias( - requester, requester.user.to_string(), room_id, room_alias - ) + yield self._update_canonical_alias(requester, user_id, room_id, room_alias) except AuthError as e: logger.info("Failed to update alias events: %s", e) return room_id @defer.inlineCallbacks - def delete_appservice_association(self, service, room_alias): + def delete_appservice_association( + self, service: ApplicationService, room_alias: RoomAlias + ): if not service.is_interested_in_alias(room_alias.to_string()): raise SynapseError( 400, @@ -208,7 +219,7 @@ class DirectoryHandler(BaseHandler): yield self._delete_association(room_alias) @defer.inlineCallbacks - def _delete_association(self, room_alias): + def _delete_association(self, room_alias: RoomAlias): if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") @@ -217,7 +228,7 @@ class DirectoryHandler(BaseHandler): return room_id @defer.inlineCallbacks - def get_association(self, room_alias): + def get_association(self, room_alias: RoomAlias): room_id = None if self.hs.is_mine(room_alias): result = yield self.get_association_from_room_alias(room_alias) @@ -282,7 +293,9 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def _update_canonical_alias(self, requester, user_id, room_id, room_alias): + def _update_canonical_alias( + self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias + ): """ Send an updated canonical alias event if the removed alias was set as the canonical alias or listed in the alt_aliases field. @@ -331,7 +344,7 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def get_association_from_room_alias(self, room_alias): + def get_association_from_room_alias(self, room_alias: RoomAlias): result = yield self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists @@ -339,7 +352,7 @@ class DirectoryHandler(BaseHandler): result = yield as_handler.query_room_alias_exists(room_alias) return result - def can_modify_alias(self, alias, user_id=None): + def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None): # Any application service "interested" in an alias they are regexing on # can modify the alias. # Users can only modify the alias if ALL the interested services have @@ -360,22 +373,42 @@ class DirectoryHandler(BaseHandler): return defer.succeed(True) @defer.inlineCallbacks - def _user_can_delete_alias(self, alias, user_id): + def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): + """Determine whether a user can delete an alias. + + One of the following must be true: + + 1. The user created the alias. + 2. The user is a server administrator. + 3. The user has a power-level sufficient to send a canonical alias event + for the current room. + + """ creator = yield self.store.get_room_alias_creator(alias.to_string()) if creator is not None and creator == user_id: return True - is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) - return is_admin + # Resolve the alias to the corresponding room. + room_mapping = yield self.get_association(alias) + room_id = room_mapping["room_id"] + if not room_id: + return False + + res = yield self.auth.check_can_change_room_list( + room_id, UserID.from_string(user_id) + ) + return res @defer.inlineCallbacks - def edit_published_room_list(self, requester, room_id, visibility): + def edit_published_room_list( + self, requester: Requester, room_id: str, visibility: str + ): """Edit the entry of the room in the published room list. requester - room_id (str) - visibility (str): "public" or "private" + room_id + visibility: "public" or "private" """ user_id = requester.user.to_string() @@ -400,7 +433,15 @@ class DirectoryHandler(BaseHandler): if room is None: raise SynapseError(400, "Unknown room") - yield self.auth.check_can_change_room_list(room_id, requester.user) + can_change_room_list = yield self.auth.check_can_change_room_list( + room_id, requester.user + ) + if not can_change_room_list: + raise AuthError( + 403, + "This server requires you to be a moderator in the room to" + " edit its room list entry", + ) making_public = visibility == "public" if making_public: @@ -421,16 +462,16 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def edit_published_appservice_room_list( - self, appservice_id, network_id, room_id, visibility + self, appservice_id: str, network_id: str, room_id: str, visibility: str ): """Add or remove a room from the appservice/network specific public room list. Args: - appservice_id (str): ID of the appservice that owns the list - network_id (str): The ID of the network the list is associated with - room_id (str) - visibility (str): either "public" or "private" + appservice_id: ID of the appservice that owns the list + network_id: The ID of the network the list is associated with + room_id + visibility: either "public" or "private" """ if visibility not in ["public", "private"]: raise SynapseError(400, "Invalid visibility setting") diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 3397cfa485..5e40adba52 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -18,6 +18,7 @@ from mock import Mock from twisted.internet import defer +import synapse import synapse.api.errors from synapse.api.constants import EventTypes from synapse.config.room_directory import RoomDirectoryConfig @@ -87,40 +88,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ignore_backoff=True, ) - def test_delete_alias_not_allowed(self): - """Removing an alias should be denied if a user does not have the proper permissions.""" - room_id = "!8765qwer:test" - self.get_success( - self.store.create_room_alias_association(self.my_room, room_id, ["test"]) - ) - - self.get_failure( - self.handler.delete_association( - create_requester("@user:test"), self.my_room - ), - synapse.api.errors.AuthError, - ) - - def test_delete_alias(self): - """Removing an alias should work when a user does has the proper permissions.""" - room_id = "!8765qwer:test" - user_id = "@user:test" - self.get_success( - self.store.create_room_alias_association( - self.my_room, room_id, ["test"], user_id - ) - ) - - result = self.get_success( - self.handler.delete_association(create_requester(user_id), self.my_room) - ) - self.assertEquals(room_id, result) - - # The alias should not be found. - self.get_failure( - self.handler.get_association(self.my_room), synapse.api.errors.SynapseError - ) - def test_incoming_fed_query(self): self.get_success( self.store.create_room_alias_association( @@ -135,6 +102,119 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) +class TestDeleteAlias(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + self.handler = hs.get_handlers().directory_handler + self.state_handler = hs.get_state_handler() + + # Create user + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create a test room + self.room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.test_alias = "#test:test" + self.room_alias = RoomAlias.from_string(self.test_alias) + + # Create a test user. + self.test_user = self.register_user("user", "pass", admin=False) + self.test_user_tok = self.login("user", "pass") + self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) + + def _create_alias(self, user): + # Create a new alias to this room. + self.get_success( + self.store.create_room_alias_association( + self.room_alias, self.room_id, ["test"], user + ) + ) + + def test_delete_alias_not_allowed(self): + """A user that doesn't meet the expected guidelines cannot delete an alias.""" + self._create_alias(self.admin_user) + self.get_failure( + self.handler.delete_association( + create_requester(self.test_user), self.room_alias + ), + synapse.api.errors.AuthError, + ) + + def test_delete_alias_creator(self): + """An alias creator can delete their own alias.""" + # Create an alias from a different user. + self._create_alias(self.test_user) + + # Delete the user's alias. + result = self.get_success( + self.handler.delete_association( + create_requester(self.test_user), self.room_alias + ) + ) + self.assertEquals(self.room_id, result) + + # Confirm the alias is gone. + self.get_failure( + self.handler.get_association(self.room_alias), + synapse.api.errors.SynapseError, + ) + + def test_delete_alias_admin(self): + """A server admin can delete an alias created by another user.""" + # Create an alias from a different user. + self._create_alias(self.test_user) + + # Delete the user's alias as the admin. + result = self.get_success( + self.handler.delete_association( + create_requester(self.admin_user), self.room_alias + ) + ) + self.assertEquals(self.room_id, result) + + # Confirm the alias is gone. + self.get_failure( + self.handler.get_association(self.room_alias), + synapse.api.errors.SynapseError, + ) + + def test_delete_alias_sufficient_power(self): + """A user with a sufficient power level should be able to delete an alias.""" + self._create_alias(self.admin_user) + + # Increase the user's power level. + self.helper.send_state( + self.room_id, + "m.room.power_levels", + {"users": {self.test_user: 100}}, + tok=self.admin_user_tok, + ) + + # They can now delete the alias. + result = self.get_success( + self.handler.delete_association( + create_requester(self.test_user), self.room_alias + ) + ) + self.assertEquals(self.room_id, result) + + # Confirm the alias is gone. + self.get_failure( + self.handler.get_association(self.room_alias), + synapse.api.errors.SynapseError, + ) + + class CanonicalAliasTestCase(unittest.HomeserverTestCase): """Test modifications of the canonical alias when delete aliases. """ diff --git a/tox.ini b/tox.ini index 097ebb8774..7622aa19f1 100644 --- a/tox.ini +++ b/tox.ini @@ -185,6 +185,7 @@ commands = mypy \ synapse/federation/federation_client.py \ synapse/federation/sender \ synapse/federation/transport \ + synapse/handlers/directory.py \ synapse/handlers/presence.py \ synapse/handlers/sync.py \ synapse/handlers/ui_auth \ From 31a2116331fea015fe162f298eca19d9a5a58ecb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Mar 2020 17:28:13 +0000 Subject: [PATCH 215/382] Hide extremities dummy events from clients --- synapse/visibility.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/visibility.py b/synapse/visibility.py index e60d9756b7..a48a4f3dfe 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -119,6 +119,9 @@ def filter_events_for_client( the original event if they can see it as normal. """ + if event.type == "org.matrix.dummy_event": + return None + if not event.is_state() and event.sender in ignore_list: return None From 83b6c69d3d0c6249610ed33a86f3d0526334089c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 4 Mar 2020 17:29:09 +0000 Subject: [PATCH 216/382] Changelog --- changelog.d/7035.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7035.bugfix diff --git a/changelog.d/7035.bugfix b/changelog.d/7035.bugfix new file mode 100644 index 0000000000..56292dc8ac --- /dev/null +++ b/changelog.d/7035.bugfix @@ -0,0 +1 @@ +Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. From 78a15b1f9d3ba3aca49dc4332e86203180d5c863 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 5 Mar 2020 15:46:44 +0000 Subject: [PATCH 217/382] Store room_versions in EventBase objects (#6875) This is a bit fiddly because it all has to be done on one fell swoop: * Wherever we create a new event, pass in the room version (and check it matches the format version) * When we prune an event, use the room version of the unpruned event to create the pruned version. * When we pass an event over the replication protocol, pass the room version over alongside it, and use it when deserialising the event again. --- changelog.d/6875.misc | 1 + synapse/events/__init__.py | 53 +++++++++++++++++++------- synapse/events/utils.py | 14 ++----- synapse/replication/http/federation.py | 13 +++++-- synapse/replication/http/send_event.py | 14 +++++-- 5 files changed, 63 insertions(+), 32 deletions(-) create mode 100644 changelog.d/6875.misc diff --git a/changelog.d/6875.misc b/changelog.d/6875.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6875.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 7307116556..533ba327f5 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -15,9 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import abc import os from distutils.util import strtobool -from typing import Optional, Type +from typing import Dict, Optional, Type import six @@ -199,15 +200,25 @@ class _EventInternalMetadata(object): return self._dict.get("redacted", False) -class EventBase(object): +class EventBase(metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def format_version(self) -> int: + """The EventFormatVersion implemented by this event""" + ... + def __init__( self, - event_dict, - signatures={}, - unsigned={}, - internal_metadata_dict={}, - rejected_reason=None, + event_dict: JsonDict, + room_version: RoomVersion, + signatures: Dict[str, Dict[str, str]], + unsigned: JsonDict, + internal_metadata_dict: JsonDict, + rejected_reason: Optional[str], ): + assert room_version.event_format == self.format_version + + self.room_version = room_version self.signatures = signatures self.unsigned = unsigned self.rejected_reason = rejected_reason @@ -303,7 +314,13 @@ class EventBase(object): class FrozenEvent(EventBase): format_version = EventFormatVersions.V1 # All events of this type are V1 - def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): + def __init__( + self, + event_dict: JsonDict, + room_version: RoomVersion, + internal_metadata_dict: JsonDict = {}, + rejected_reason: Optional[str] = None, + ): event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -326,8 +343,9 @@ class FrozenEvent(EventBase): self._event_id = event_dict["event_id"] - super(FrozenEvent, self).__init__( + super().__init__( frozen_dict, + room_version=room_version, signatures=signatures, unsigned=unsigned, internal_metadata_dict=internal_metadata_dict, @@ -352,7 +370,13 @@ class FrozenEvent(EventBase): class FrozenEventV2(EventBase): format_version = EventFormatVersions.V2 # All events of this type are V2 - def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): + def __init__( + self, + event_dict: JsonDict, + room_version: RoomVersion, + internal_metadata_dict: JsonDict = {}, + rejected_reason: Optional[str] = None, + ): event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -377,8 +401,9 @@ class FrozenEventV2(EventBase): self._event_id = None - super(FrozenEventV2, self).__init__( + super().__init__( frozen_dict, + room_version=room_version, signatures=signatures, unsigned=unsigned, internal_metadata_dict=internal_metadata_dict, @@ -445,7 +470,7 @@ class FrozenEventV3(FrozenEventV2): return self._event_id -def event_type_from_format_version(format_version: int) -> Type[EventBase]: +def _event_type_from_format_version(format_version: int) -> Type[EventBase]: """Returns the python type to use to construct an Event object for the given event format version. @@ -474,5 +499,5 @@ def make_event_from_dict( rejected_reason: Optional[str] = None, ) -> EventBase: """Construct an EventBase from the given event dict""" - event_type = event_type_from_format_version(room_version.event_format) - return event_type(event_dict, internal_metadata_dict, rejected_reason) + event_type = _event_type_from_format_version(room_version.event_format) + return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f70f5032fb..bc6f98ae3b 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -35,26 +35,20 @@ from . import EventBase SPLIT_FIELD_REGEX = re.compile(r"(? EventBase: """ Returns a pruned version of the given event, which removes all keys we don't know about or think could potentially be dodgy. This is used when we "redact" an event. We want to remove all fields that the user has specified, but we do want to keep necessary information like type, state_key etc. - - Args: - event (FrozenEvent) - - Returns: - FrozenEvent """ pruned_event_dict = prune_event_dict(event.get_dict()) - from . import event_type_from_format_version + from . import make_event_from_dict - pruned_event = event_type_from_format_version(event.format_version)( - pruned_event_dict, event.internal_metadata.get_dict() + pruned_event = make_event_from_dict( + pruned_event_dict, event.room_version, event.internal_metadata.get_dict() ) # Mark the event as redacted diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 8794720101..7e23b565b9 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -18,7 +18,7 @@ import logging from twisted.internet import defer from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import event_type_from_format_version +from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint @@ -38,6 +38,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): { "events": [{ "event": { .. serialized event .. }, + "room_version": .., // "1", "2", "3", etc: the version of the room + // containing the event + "event_format_version": .., // 1,2,3 etc: the event format version "internal_metadata": { .. serialized internal_metadata .. }, "rejected_reason": .., // The event.rejected_reason field "context": { .. serialized event context .. }, @@ -73,6 +76,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_payloads.append( { "event": event.get_pdu_json(), + "room_version": event.room_version.identifier, "event_format_version": event.format_version, "internal_metadata": event.internal_metadata.get_dict(), "rejected_reason": event.rejected_reason, @@ -95,12 +99,13 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts = [] for event_payload in event_payloads: event_dict = event_payload["event"] - format_ver = event_payload["event_format_version"] + room_ver = KNOWN_ROOM_VERSIONS[event_payload["room_version"]] internal_metadata = event_payload["internal_metadata"] rejected_reason = event_payload["rejected_reason"] - EventType = event_type_from_format_version(format_ver) - event = EventType(event_dict, internal_metadata, rejected_reason) + event = make_event_from_dict( + event_dict, room_ver, internal_metadata, rejected_reason + ) context = EventContext.deserialize( self.storage, event_payload["context"] diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 84b92f16ad..b74b088ff4 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -17,7 +17,8 @@ import logging from twisted.internet import defer -from synapse.events import event_type_from_format_version +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint @@ -37,6 +38,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): { "event": { .. serialized event .. }, + "room_version": .., // "1", "2", "3", etc: the version of the room + // containing the event + "event_format_version": .., // 1,2,3 etc: the event format version "internal_metadata": { .. serialized internal_metadata .. }, "rejected_reason": .., // The event.rejected_reason field "context": { .. serialized event context .. }, @@ -77,6 +81,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): payload = { "event": event.get_pdu_json(), + "room_version": event.room_version.identifier, "event_format_version": event.format_version, "internal_metadata": event.internal_metadata.get_dict(), "rejected_reason": event.rejected_reason, @@ -93,12 +98,13 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): content = parse_json_object_from_request(request) event_dict = content["event"] - format_ver = content["event_format_version"] + room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]] internal_metadata = content["internal_metadata"] rejected_reason = content["rejected_reason"] - EventType = event_type_from_format_version(format_ver) - event = EventType(event_dict, internal_metadata, rejected_reason) + event = make_event_from_dict( + event_dict, room_ver, internal_metadata, rejected_reason + ) requester = Requester.deserialize(self.store, content["requester"]) context = EventContext.deserialize(self.storage, content["context"]) From 87972f07e5da0760ca5e11e62b1bda8c49f6f606 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Mar 2020 11:29:56 -0500 Subject: [PATCH 218/382] Convert remote key resource REST layer to async/await. (#7020) --- changelog.d/7020.misc | 1 + synapse/rest/key/v2/remote_key_resource.py | 11 ++++------- 2 files changed, 5 insertions(+), 7 deletions(-) create mode 100644 changelog.d/7020.misc diff --git a/changelog.d/7020.misc b/changelog.d/7020.misc new file mode 100644 index 0000000000..188b4378cb --- /dev/null +++ b/changelog.d/7020.misc @@ -0,0 +1 @@ +Port `synapse.rest.keys` to async/await. diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 4b6d030a57..ab671f7334 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -18,8 +18,6 @@ from typing import Dict, Set from canonicaljson import encode_canonical_json, json from signedjson.sign import sign_json -from twisted.internet import defer - from synapse.api.errors import Codes, SynapseError from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import ( @@ -125,8 +123,7 @@ class RemoteKey(DirectServeResource): await self.query_keys(request, query, query_remote_on_cache_miss=True) - @defer.inlineCallbacks - def query_keys(self, request, query, query_remote_on_cache_miss=False): + async def query_keys(self, request, query, query_remote_on_cache_miss=False): logger.info("Handling query for keys %r", query) store_queries = [] @@ -143,7 +140,7 @@ class RemoteKey(DirectServeResource): for key_id in key_ids: store_queries.append((server_name, key_id, None)) - cached = yield self.store.get_server_keys_json(store_queries) + cached = await self.store.get_server_keys_json(store_queries) json_results = set() @@ -215,8 +212,8 @@ class RemoteKey(DirectServeResource): json_results.add(bytes(result["key_json"])) if cache_misses and query_remote_on_cache_miss: - yield self.fetcher.get_keys(cache_misses) - yield self.query_keys(request, query, query_remote_on_cache_miss=False) + await self.fetcher.get_keys(cache_misses) + await self.query_keys(request, query, query_remote_on_cache_miss=False) else: signed_keys = [] for key_json in json_results: From 80e580ae92d1170a4ac2f6afb2fa70f5f8e7b4ac Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 6 Mar 2020 11:02:52 +0000 Subject: [PATCH 219/382] Make sure that is_verified is a boolean when processing room keys --- synapse/handlers/e2e_room_keys.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index f1b4424a02..854c181fcc 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -207,6 +207,12 @@ class E2eRoomKeysHandler(object): changed = False # if anything has changed, we need to update the etag for room_id, room in iteritems(room_keys["rooms"]): for session_id, room_key in iteritems(room["sessions"]): + if not isinstance(room_key["is_verified"], bool): + msg = ( + "is_verified must be a boolean in keys for room %s" % room_id + ) + raise SynapseError(400, msg, Codes.INVALID_PARAM) + log_kv( { "message": "Trying to upload room key", From a27056d539724614e960f3da3c2e3443aa8625ad Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 6 Mar 2020 11:06:47 +0000 Subject: [PATCH 220/382] Changelog --- changelog.d/7045.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7045.misc diff --git a/changelog.d/7045.misc b/changelog.d/7045.misc new file mode 100644 index 0000000000..74c1abea56 --- /dev/null +++ b/changelog.d/7045.misc @@ -0,0 +1 @@ +Add a type check to `is_verified` when processing room keys. From 45df9d35a9500e9a21139951845980a296a62e0b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 6 Mar 2020 11:10:52 +0000 Subject: [PATCH 221/382] Lint --- synapse/handlers/e2e_room_keys.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 854c181fcc..a953a7fe04 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -209,7 +209,8 @@ class E2eRoomKeysHandler(object): for session_id, room_key in iteritems(room["sessions"]): if not isinstance(room_key["is_verified"], bool): msg = ( - "is_verified must be a boolean in keys for room %s" % room_id + "is_verified must be a boolean in keys for room %s" + % room_id ) raise SynapseError(400, msg, Codes.INVALID_PARAM) From 297aaf48166f153d35c38160d0c747770d925f39 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 6 Mar 2020 15:07:28 +0000 Subject: [PATCH 222/382] Mention the session ID in the error message --- synapse/handlers/e2e_room_keys.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index a953a7fe04..cad38d2bdd 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -209,8 +209,9 @@ class E2eRoomKeysHandler(object): for session_id, room_key in iteritems(room["sessions"]): if not isinstance(room_key["is_verified"], bool): msg = ( - "is_verified must be a boolean in keys for room %s" - % room_id + "is_verified must be a boolean in keys for session %s in" + "room %s" + % (session_id, room_id) ) raise SynapseError(400, msg, Codes.INVALID_PARAM) From 54b78a0e3b6efcc9b576e5a706991382d2984d10 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 6 Mar 2020 15:11:13 +0000 Subject: [PATCH 223/382] Lint --- synapse/handlers/e2e_room_keys.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index cad38d2bdd..9abaf13b8f 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -210,8 +210,7 @@ class E2eRoomKeysHandler(object): if not isinstance(room_key["is_verified"], bool): msg = ( "is_verified must be a boolean in keys for session %s in" - "room %s" - % (session_id, room_id) + "room %s" % (session_id, room_id) ) raise SynapseError(400, msg, Codes.INVALID_PARAM) From 1d66dce83e58827aae12080552edeaeb357b1997 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 6 Mar 2020 18:14:19 +0000 Subject: [PATCH 224/382] Break down monthly active users by appservice_id (#7030) * Break down monthly active users by appservice_id and emit via prometheus. Co-authored-by: Brendan Abolivier --- changelog.d/7030.feature | 1 + synapse/app/homeserver.py | 13 ++++++ .../data_stores/main/monthly_active_users.py | 32 +++++++++++++- tests/storage/test_monthly_active_users.py | 42 +++++++++++++++++++ 4 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7030.feature diff --git a/changelog.d/7030.feature b/changelog.d/7030.feature new file mode 100644 index 0000000000..fcfdb8d8a1 --- /dev/null +++ b/changelog.d/7030.feature @@ -0,0 +1 @@ +Break down monthly active users by `appservice_id` and emit via Prometheus. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index c2a334a2b0..e0fdddfdc9 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -298,6 +298,11 @@ class SynapseHomeServer(HomeServer): # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") +current_mau_by_service_gauge = Gauge( + "synapse_admin_mau_current_mau_by_service", + "Current MAU by service", + ["app_service"], +) max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") registered_reserved_users_mau_gauge = Gauge( "synapse_admin_mau:registered_reserved_users", @@ -585,12 +590,20 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): current_mau_count = 0 + current_mau_count_by_service = {} reserved_users = () store = hs.get_datastore() if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: current_mau_count = yield store.get_monthly_active_count() + current_mau_count_by_service = ( + yield store.get_monthly_active_count_by_service() + ) reserved_users = yield store.get_registered_reserved_users() current_mau_gauge.set(float(current_mau_count)) + + for app_service, count in current_mau_count_by_service.items(): + current_mau_by_service_gauge.labels(app_service).set(float(count)) + registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.max_mau_value)) diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py index 1507a14e09..925bc5691b 100644 --- a/synapse/storage/data_stores/main/monthly_active_users.py +++ b/synapse/storage/data_stores/main/monthly_active_users.py @@ -43,13 +43,40 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): def _count_users(txn): sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users" - txn.execute(sql) (count,) = txn.fetchone() return count return self.db.runInteraction("count_users", _count_users) + @cached(num_args=0) + def get_monthly_active_count_by_service(self): + """Generates current count of monthly active users broken down by service. + A service is typically an appservice but also includes native matrix users. + Since the `monthly_active_users` table is populated from the `user_ips` table + `config.track_appservice_user_ips` must be set to `true` for this + method to return anything other than native matrix users. + + Returns: + Deferred[dict]: dict that includes a mapping between app_service_id + and the number of occurrences. + + """ + + def _count_users_by_service(txn): + sql = """ + SELECT COALESCE(appservice_id, 'native'), COALESCE(count(*), 0) + FROM monthly_active_users + LEFT JOIN users ON monthly_active_users.user_id=users.name + GROUP BY appservice_id; + """ + + txn.execute(sql) + result = txn.fetchall() + return dict(result) + + return self.db.runInteraction("count_users_by_service", _count_users_by_service) + @defer.inlineCallbacks def get_registered_reserved_users(self): """Of the reserved threepids defined in config, which are associated @@ -291,6 +318,9 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): ) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) + self._invalidate_cache_and_stream( + txn, self.get_monthly_active_count_by_service, () + ) self._invalidate_cache_and_stream( txn, self.user_last_seen_monthly_active, (user_id,) ) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 3c78faab45..bc53bf0951 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -303,3 +303,45 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.pump() self.store.upsert_monthly_active_user.assert_not_called() + + def test_get_monthly_active_count_by_service(self): + appservice1_user1 = "@appservice1_user1:example.com" + appservice1_user2 = "@appservice1_user2:example.com" + + appservice2_user1 = "@appservice2_user1:example.com" + native_user1 = "@native_user1:example.com" + + service1 = "service1" + service2 = "service2" + native = "native" + + self.store.register_user( + user_id=appservice1_user1, password_hash=None, appservice_id=service1 + ) + self.store.register_user( + user_id=appservice1_user2, password_hash=None, appservice_id=service1 + ) + self.store.register_user( + user_id=appservice2_user1, password_hash=None, appservice_id=service2 + ) + self.store.register_user(user_id=native_user1, password_hash=None) + self.pump() + + count = self.store.get_monthly_active_count_by_service() + self.assertEqual({}, self.get_success(count)) + + self.store.upsert_monthly_active_user(native_user1) + self.store.upsert_monthly_active_user(appservice1_user1) + self.store.upsert_monthly_active_user(appservice1_user2) + self.store.upsert_monthly_active_user(appservice2_user1) + self.pump() + + count = self.store.get_monthly_active_count() + self.assertEqual(4, self.get_success(count)) + + count = self.store.get_monthly_active_count_by_service() + result = self.get_success(count) + + self.assertEqual(2, result[service1]) + self.assertEqual(1, result[service2]) + self.assertEqual(1, result[native]) From 2bff4457d9a40ffdd8ae1b5d1249a5e78fb8da01 Mon Sep 17 00:00:00 2001 From: Neil Pilgrim Date: Sat, 7 Mar 2020 09:57:26 -0800 Subject: [PATCH 225/382] Add type hints to logging/context.py (#6309) * Add type hints to logging/context.py Signed-off-by: neiljp (Neil Pilgrim) --- changelog.d/6309.misc | 1 + synapse/logging/context.py | 121 +++++++++++++++++++++++-------------- 2 files changed, 75 insertions(+), 47 deletions(-) create mode 100644 changelog.d/6309.misc diff --git a/changelog.d/6309.misc b/changelog.d/6309.misc new file mode 100644 index 0000000000..1aa7294617 --- /dev/null +++ b/changelog.d/6309.misc @@ -0,0 +1 @@ +Add type hints to `logging/context.py`. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 1b940842f6..1eccc0e83f 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -27,10 +27,15 @@ import inspect import logging import threading import types -from typing import Any, List +from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union + +from typing_extensions import Literal from twisted.internet import defer, threads +if TYPE_CHECKING: + from synapse.logging.scopecontextmanager import _LogContextScope + logger = logging.getLogger(__name__) try: @@ -91,7 +96,7 @@ class ContextResourceUsage(object): "evt_db_fetch_count", ] - def __init__(self, copy_from=None): + def __init__(self, copy_from: "Optional[ContextResourceUsage]" = None) -> None: """Create a new ContextResourceUsage Args: @@ -101,27 +106,28 @@ class ContextResourceUsage(object): if copy_from is None: self.reset() else: - self.ru_utime = copy_from.ru_utime - self.ru_stime = copy_from.ru_stime - self.db_txn_count = copy_from.db_txn_count + # FIXME: mypy can't infer the types set via reset() above, so specify explicitly for now + self.ru_utime = copy_from.ru_utime # type: float + self.ru_stime = copy_from.ru_stime # type: float + self.db_txn_count = copy_from.db_txn_count # type: int - self.db_txn_duration_sec = copy_from.db_txn_duration_sec - self.db_sched_duration_sec = copy_from.db_sched_duration_sec - self.evt_db_fetch_count = copy_from.evt_db_fetch_count + self.db_txn_duration_sec = copy_from.db_txn_duration_sec # type: float + self.db_sched_duration_sec = copy_from.db_sched_duration_sec # type: float + self.evt_db_fetch_count = copy_from.evt_db_fetch_count # type: int - def copy(self): + def copy(self) -> "ContextResourceUsage": return ContextResourceUsage(copy_from=self) - def reset(self): + def reset(self) -> None: self.ru_stime = 0.0 self.ru_utime = 0.0 self.db_txn_count = 0 - self.db_txn_duration_sec = 0 - self.db_sched_duration_sec = 0 + self.db_txn_duration_sec = 0.0 + self.db_sched_duration_sec = 0.0 self.evt_db_fetch_count = 0 - def __repr__(self): + def __repr__(self) -> str: return ( " "ContextResourceUsage": """Add another ContextResourceUsage's stats to this one's. Args: @@ -149,7 +155,7 @@ class ContextResourceUsage(object): self.evt_db_fetch_count += other.evt_db_fetch_count return self - def __isub__(self, other): + def __isub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": self.ru_utime -= other.ru_utime self.ru_stime -= other.ru_stime self.db_txn_count -= other.db_txn_count @@ -158,17 +164,20 @@ class ContextResourceUsage(object): self.evt_db_fetch_count -= other.evt_db_fetch_count return self - def __add__(self, other): + def __add__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": res = ContextResourceUsage(copy_from=self) res += other return res - def __sub__(self, other): + def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": res = ContextResourceUsage(copy_from=self) res -= other return res +LoggingContextOrSentinel = Union["LoggingContext", "LoggingContext.Sentinel"] + + class LoggingContext(object): """Additional context for log formatting. Contexts are scoped within a "with" block. @@ -201,7 +210,14 @@ class LoggingContext(object): class Sentinel(object): """Sentinel to represent the root context""" - __slots__ = [] # type: List[Any] + __slots__ = ["previous_context", "alive", "request", "scope"] + + def __init__(self) -> None: + # Minimal set for compatibility with LoggingContext + self.previous_context = None + self.alive = None + self.request = None + self.scope = None def __str__(self): return "sentinel" @@ -235,7 +251,7 @@ class LoggingContext(object): sentinel = Sentinel() - def __init__(self, name=None, parent_context=None, request=None): + def __init__(self, name=None, parent_context=None, request=None) -> None: self.previous_context = LoggingContext.current_context() self.name = name @@ -250,7 +266,7 @@ class LoggingContext(object): self.request = None self.tag = "" self.alive = True - self.scope = None + self.scope = None # type: Optional[_LogContextScope] self.parent_context = parent_context @@ -261,13 +277,13 @@ class LoggingContext(object): # the request param overrides the request from the parent context self.request = request - def __str__(self): + def __str__(self) -> str: if self.request: return str(self.request) return "%s@%x" % (self.name, id(self)) @classmethod - def current_context(cls): + def current_context(cls) -> LoggingContextOrSentinel: """Get the current logging context from thread local storage Returns: @@ -276,7 +292,9 @@ class LoggingContext(object): return getattr(cls.thread_local, "current_context", cls.sentinel) @classmethod - def set_current_context(cls, context): + def set_current_context( + cls, context: LoggingContextOrSentinel + ) -> LoggingContextOrSentinel: """Set the current logging context in thread local storage Args: context(LoggingContext): The context to activate. @@ -291,7 +309,7 @@ class LoggingContext(object): context.start() return current - def __enter__(self): + def __enter__(self) -> "LoggingContext": """Enters this logging context into thread local storage""" old_context = self.set_current_context(self) if self.previous_context != old_context: @@ -304,7 +322,7 @@ class LoggingContext(object): return self - def __exit__(self, type, value, traceback): + def __exit__(self, type, value, traceback) -> None: """Restore the logging context in thread local storage to the state it was before this context was entered. Returns: @@ -318,7 +336,6 @@ class LoggingContext(object): logger.warning( "Expected logging context %s but found %s", self, current ) - self.previous_context = None self.alive = False # if we have a parent, pass our CPU usage stats on @@ -330,7 +347,7 @@ class LoggingContext(object): # reset them in case we get entered again self._resource_usage.reset() - def copy_to(self, record): + def copy_to(self, record) -> None: """Copy logging fields from this context to a log record or another LoggingContext """ @@ -341,14 +358,14 @@ class LoggingContext(object): # we also track the current scope: record.scope = self.scope - def copy_to_twisted_log_entry(self, record): + def copy_to_twisted_log_entry(self, record) -> None: """ Copy logging fields from this context to a Twisted log record. """ record["request"] = self.request record["scope"] = self.scope - def start(self): + def start(self) -> None: if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) return @@ -358,7 +375,7 @@ class LoggingContext(object): if not self.usage_start: self.usage_start = get_thread_resource_usage() - def stop(self): + def stop(self) -> None: if get_thread_id() != self.main_thread: logger.warning("Stopped logcontext %s on different thread", self) return @@ -378,7 +395,7 @@ class LoggingContext(object): self.usage_start = None - def get_resource_usage(self): + def get_resource_usage(self) -> ContextResourceUsage: """Get resources used by this logcontext so far. Returns: @@ -398,11 +415,13 @@ class LoggingContext(object): return res - def _get_cputime(self): + def _get_cputime(self) -> Tuple[float, float]: """Get the cpu usage time so far Returns: Tuple[float, float]: seconds in user mode, seconds in system mode """ + assert self.usage_start is not None + current = get_thread_resource_usage() # Indicate to mypy that we know that self.usage_start is None. @@ -430,13 +449,13 @@ class LoggingContext(object): return utime_delta, stime_delta - def add_database_transaction(self, duration_sec): + def add_database_transaction(self, duration_sec: float) -> None: if duration_sec < 0: raise ValueError("DB txn time can only be non-negative") self._resource_usage.db_txn_count += 1 self._resource_usage.db_txn_duration_sec += duration_sec - def add_database_scheduled(self, sched_sec): + def add_database_scheduled(self, sched_sec: float) -> None: """Record a use of the database pool Args: @@ -447,7 +466,7 @@ class LoggingContext(object): raise ValueError("DB scheduling time can only be non-negative") self._resource_usage.db_sched_duration_sec += sched_sec - def record_event_fetch(self, event_count): + def record_event_fetch(self, event_count: int) -> None: """Record a number of events being fetched from the db Args: @@ -464,10 +483,10 @@ class LoggingContextFilter(logging.Filter): missing fields """ - def __init__(self, **defaults): + def __init__(self, **defaults) -> None: self.defaults = defaults - def filter(self, record): + def filter(self, record) -> Literal[True]: """Add each fields from the logging contexts to the record. Returns: True to include the record in the log output. @@ -492,12 +511,13 @@ class PreserveLoggingContext(object): __slots__ = ["current_context", "new_context", "has_parent"] - def __init__(self, new_context=None): + def __init__(self, new_context: Optional[LoggingContext] = None) -> None: if new_context is None: - new_context = LoggingContext.sentinel - self.new_context = new_context + self.new_context = LoggingContext.sentinel # type: LoggingContextOrSentinel + else: + self.new_context = new_context - def __enter__(self): + def __enter__(self) -> None: """Captures the current logging context""" self.current_context = LoggingContext.set_current_context(self.new_context) @@ -506,7 +526,7 @@ class PreserveLoggingContext(object): if not self.current_context.alive: logger.debug("Entering dead context: %s", self.current_context) - def __exit__(self, type, value, traceback): + def __exit__(self, type, value, traceback) -> None: """Restores the current logging context""" context = LoggingContext.set_current_context(self.current_context) @@ -525,7 +545,9 @@ class PreserveLoggingContext(object): logger.debug("Restoring dead context: %s", self.current_context) -def nested_logging_context(suffix, parent_context=None): +def nested_logging_context( + suffix: str, parent_context: Optional[LoggingContext] = None +) -> LoggingContext: """Creates a new logging context as a child of another. The nested logging context will have a 'request' made up of the parent context's @@ -546,10 +568,12 @@ def nested_logging_context(suffix, parent_context=None): Returns: LoggingContext: new logging context. """ - if parent_context is None: - parent_context = LoggingContext.current_context() + if parent_context is not None: + context = parent_context # type: LoggingContextOrSentinel + else: + context = LoggingContext.current_context() return LoggingContext( - parent_context=parent_context, request=parent_context.request + "-" + suffix + parent_context=context, request=str(context.request) + "-" + suffix ) @@ -654,7 +678,10 @@ def make_deferred_yieldable(deferred): return deferred -def _set_context_cb(result, context): +ResultT = TypeVar("ResultT") + + +def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: """A callback function which just sets the logging context""" LoggingContext.set_current_context(context) return result From 1f5f3ae8b1c5db96d36ac7c104f13553bc4283da Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 8 Mar 2020 14:49:33 +0100 Subject: [PATCH 226/382] Add options to disable setting profile info for prevent changes. --- synapse/config/registration.py | 11 +++++++++++ synapse/handlers/profile.py | 10 ++++++++++ tests/handlers/test_profile.py | 33 ++++++++++++++++++++++++++++++++- 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 9bb3beedbc..d9f452dcea 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -129,6 +129,9 @@ class RegistrationConfig(Config): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) + self.disable_set_displayname = config.get("disable_set_displayname", False) + self.disable_set_avatar_url = config.get("disable_set_avatar_url", False) + self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False ) @@ -330,6 +333,14 @@ class RegistrationConfig(Config): #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process + # If enabled, don't let users set their own display names/avatars + # other than for the very first time (unless they are a server admin). + # Useful when provisioning users based on the contents of a 3rd party + # directory and to avoid ambiguities. + # + # disable_set_displayname: False + # disable_set_avatar_url: False + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 50ce0c585b..fb7e84f3b8 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -157,6 +157,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if not by_admin and self.hs.config.disable_set_displayname: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.display_name: + raise SynapseError(400, "Changing displayname is disabled on this server") + if len(new_displayname) > MAX_DISPLAYNAME_LEN: raise SynapseError( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) @@ -218,6 +223,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if not by_admin and self.hs.config.disable_set_avatar_url: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.avatar_url: + raise SynapseError(400, "Changing avatar url is disabled on this server") + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: raise SynapseError( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index d60c124eec..b85520c688 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -19,7 +19,7 @@ from mock import Mock, NonCallableMock from twisted.internet import defer import synapse.types -from synapse.api.errors import AuthError +from synapse.api.errors import AuthError, SynapseError from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID @@ -70,6 +70,7 @@ class ProfileTestCase(unittest.TestCase): yield self.store.create_profile(self.frank.localpart) self.handler = hs.get_profile_handler() + self.config = hs.config @defer.inlineCallbacks def test_get_my_name(self): @@ -90,6 +91,19 @@ class ProfileTestCase(unittest.TestCase): "Frank Jr.", ) + @defer.inlineCallbacks + def test_set_my_name_if_disabled(self): + self.config.disable_set_displayname = True + + # Set first displayname is allowed, if displayname is null + self.store.set_profile_displayname(self.frank.localpart, "Frank") + + d = self.handler.set_displayname( + self.frank, synapse.types.create_requester(self.frank), "Frank Jr." + ) + + yield self.assertFailure(d, SynapseError) + @defer.inlineCallbacks def test_set_my_name_noauth(self): d = self.handler.set_displayname( @@ -147,3 +161,20 @@ class ProfileTestCase(unittest.TestCase): (yield self.store.get_profile_avatar_url(self.frank.localpart)), "http://my.server/pic.gif", ) + + @defer.inlineCallbacks + def test_set_my_avatar_if_disabled(self): + self.config.disable_set_avatar_url = True + + # Set first time avatar is allowed, if displayname is null + self.store.set_profile_avatar_url( + self.frank.localpart, "http://my.server/me.png" + ) + + d = self.handler.set_avatar_url( + self.frank, + synapse.types.create_requester(self.frank), + "http://my.server/pic.gif", + ) + + yield self.assertFailure(d, SynapseError) From fb078f921b4d49fe3087d89563bce7b8cee0292c Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 8 Mar 2020 15:19:07 +0100 Subject: [PATCH 227/382] changelog --- changelog.d/7053.feature | 1 + docs/sample_config.yaml | 8 ++++++++ synapse/config/registration.py | 4 ++-- synapse/handlers/profile.py | 10 ++++++++-- 4 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7053.feature diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature new file mode 100644 index 0000000000..79955b9780 --- /dev/null +++ b/changelog.d/7053.feature @@ -0,0 +1 @@ +Add options to disable setting profile info for prevent changes. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 54cbe840d5..d646f0cefe 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1057,6 +1057,14 @@ account_threepid_delegates: #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process +# If enabled, don't let users set their own display names/avatars +# other than for the very first time (unless they are a server admin). +# Useful when provisioning users based on the contents of a 3rd party +# directory and to avoid ambiguities. +# +#disable_set_displayname: False +#disable_set_avatar_url: False + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d9f452dcea..bdbd6f3130 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -338,8 +338,8 @@ class RegistrationConfig(Config): # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # - # disable_set_displayname: False - # disable_set_avatar_url: False + #disable_set_displayname: False + #disable_set_avatar_url: False # Users who register on this homeserver will automatically be joined # to these rooms diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index fb7e84f3b8..445981bf3d 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -160,7 +160,10 @@ class BaseProfileHandler(BaseHandler): if not by_admin and self.hs.config.disable_set_displayname: profile = yield self.store.get_profileinfo(target_user.localpart) if profile.display_name: - raise SynapseError(400, "Changing displayname is disabled on this server") + raise SynapseError( + 400, + "Changing displayname is disabled on this server" + ) if len(new_displayname) > MAX_DISPLAYNAME_LEN: raise SynapseError( @@ -226,7 +229,10 @@ class BaseProfileHandler(BaseHandler): if not by_admin and self.hs.config.disable_set_avatar_url: profile = yield self.store.get_profileinfo(target_user.localpart) if profile.avatar_url: - raise SynapseError(400, "Changing avatar url is disabled on this server") + raise SynapseError( + 400, + "Changing avatar url is disabled on this server" + ) if len(new_avatar_url) > MAX_AVATAR_URL_LEN: raise SynapseError( From ce460dc31c6de5852277310db825d23c27d4b9fd Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 8 Mar 2020 15:22:43 +0100 Subject: [PATCH 228/382] lint --- synapse/handlers/profile.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 445981bf3d..b049dd8e26 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -161,8 +161,7 @@ class BaseProfileHandler(BaseHandler): profile = yield self.store.get_profileinfo(target_user.localpart) if profile.display_name: raise SynapseError( - 400, - "Changing displayname is disabled on this server" + 400, "Changing displayname is disabled on this server" ) if len(new_displayname) > MAX_DISPLAYNAME_LEN: @@ -230,8 +229,7 @@ class BaseProfileHandler(BaseHandler): profile = yield self.store.get_profileinfo(target_user.localpart) if profile.avatar_url: raise SynapseError( - 400, - "Changing avatar url is disabled on this server" + 400, "Changing avatar url is disabled on this server" ) if len(new_avatar_url) > MAX_AVATAR_URL_LEN: From 20545a2199359f627977d14d477d0288f7fb3a07 Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 8 Mar 2020 15:28:00 +0100 Subject: [PATCH 229/382] lint2 --- docs/sample_config.yaml | 4 ++-- synapse/config/registration.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d646f0cefe..a73e4498fe 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1062,8 +1062,8 @@ account_threepid_delegates: # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # -#disable_set_displayname: False -#disable_set_avatar_url: False +#disable_set_displayname: false +#disable_set_avatar_url: false # Users who register on this homeserver will automatically be joined # to these rooms diff --git a/synapse/config/registration.py b/synapse/config/registration.py index bdbd6f3130..0422c39451 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -338,8 +338,8 @@ class RegistrationConfig(Config): # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # - #disable_set_displayname: False - #disable_set_avatar_url: False + #disable_set_displayname: false + #disable_set_avatar_url: false # Users who register on this homeserver will automatically be joined # to these rooms From 99bbe177b67f85fb70be61d47068a57fbb3b92f6 Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Sun, 8 Mar 2020 21:58:12 +0100 Subject: [PATCH 230/382] add disable_3pid_changes --- docs/sample_config.yaml | 5 +++++ synapse/config/registration.py | 6 ++++++ synapse/rest/client/v2_alpha/account.py | 10 ++++++++++ 3 files changed, 21 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a73e4498fe..d3ecffac7d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1065,6 +1065,11 @@ account_threepid_delegates: #disable_set_displayname: false #disable_set_avatar_url: false +# If true, stop users from trying to change the 3PIDs associated with +# their accounts. +# +#disable_3pid_changes: false + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 0422c39451..1abc0a79af 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -131,6 +131,7 @@ class RegistrationConfig(Config): self.disable_set_displayname = config.get("disable_set_displayname", False) self.disable_set_avatar_url = config.get("disable_set_avatar_url", False) + self.disable_3pid_changes = config.get("disable_3pid_changes", False) self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False @@ -341,6 +342,11 @@ class RegistrationConfig(Config): #disable_set_displayname: false #disable_set_avatar_url: false + # If true, stop users from trying to change the 3PIDs associated with + # their accounts. + # + #disable_3pid_changes: false + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index dc837d6c75..97bddf36d9 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -599,6 +599,9 @@ class ThreepidRestServlet(RestServlet): return 200, {"threepids": threepids} async def on_POST(self, request): + if self.hs.config.disable_3pid_changes: + raise SynapseError(400, "3PID changes disabled on this server") + requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -643,6 +646,9 @@ class ThreepidAddRestServlet(RestServlet): @interactive_auth_handler async def on_POST(self, request): + if self.hs.config.disable_3pid_changes: + raise SynapseError(400, "3PID changes disabled on this server") + requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -738,10 +744,14 @@ class ThreepidDeleteRestServlet(RestServlet): def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() + self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() async def on_POST(self, request): + if self.hs.config.disable_3pid_changes: + raise SynapseError(400, "3PID changes disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, ["medium", "address"]) From 66315d862fdec0ddc1414010626b344d48c14167 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 9 Mar 2020 07:19:24 -0400 Subject: [PATCH 231/382] Update routing of fallback auth in the worker docs. (#7048) --- changelog.d/7048.doc | 1 + docs/workers.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/7048.doc diff --git a/changelog.d/7048.doc b/changelog.d/7048.doc new file mode 100644 index 0000000000..c9666f333e --- /dev/null +++ b/changelog.d/7048.doc @@ -0,0 +1 @@ +Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. diff --git a/docs/workers.md b/docs/workers.md index 0d84a58958..cf460283d5 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -273,6 +273,7 @@ Additionally, the following REST endpoints can be handled, but all requests must be routed to the same instance: ^/_matrix/client/(r0|unstable)/register$ + ^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$ Pagination requests can also be handled, but all requests with the same path room must be routed to the same instance. Additionally, care must be taken to From 06eb5cae08272c401a586991fc81f788825f910b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 9 Mar 2020 08:58:25 -0400 Subject: [PATCH 232/382] Remove special auth and redaction rules for aliases events in experimental room ver. (#7037) --- changelog.d/7037.feature | 1 + synapse/api/room_versions.py | 9 ++- synapse/crypto/event_signing.py | 2 +- synapse/event_auth.py | 8 +- synapse/events/utils.py | 12 ++- synapse/storage/data_stores/main/events.py | 10 ++- tests/events/test_utils.py | 35 +++++++- tests/test_event_auth.py | 93 +++++++++++++++++++++- 8 files changed, 148 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7037.feature diff --git a/changelog.d/7037.feature b/changelog.d/7037.feature new file mode 100644 index 0000000000..4bc1b3b19f --- /dev/null +++ b/changelog.d/7037.feature @@ -0,0 +1 @@ +Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index cf7ee60d3a..871179749a 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -57,7 +57,7 @@ class RoomVersion(object): state_res = attr.ib() # int; one of the StateResolutionVersions enforce_key_validity = attr.ib() # bool - # bool: before MSC2260, anyone was allowed to send an aliases event + # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules special_case_aliases_auth = attr.ib(type=bool, default=False) @@ -102,12 +102,13 @@ class RoomVersions(object): enforce_key_validity=True, special_case_aliases_auth=True, ) - MSC2260_DEV = RoomVersion( - "org.matrix.msc2260", + MSC2432_DEV = RoomVersion( + "org.matrix.msc2432", RoomDisposition.UNSTABLE, EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=True, + special_case_aliases_auth=False, ) @@ -119,6 +120,6 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V3, RoomVersions.V4, RoomVersions.V5, - RoomVersions.MSC2260_DEV, + RoomVersions.MSC2432_DEV, ) } # type: Dict[str, RoomVersion] diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 5f733c1cf5..0422c43fab 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -140,7 +140,7 @@ def compute_event_signature( Returns: a dictionary in the same format of an event's signatures field. """ - redact_json = prune_event_dict(event_dict) + redact_json = prune_event_dict(room_version, event_dict) redact_json.pop("age_ts", None) redact_json.pop("unsigned", None) if logger.isEnabledFor(logging.DEBUG): diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 472f165044..46beb5334f 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -137,7 +137,7 @@ def check( raise AuthError(403, "This room has been marked as unfederatable.") # 4. If type is m.room.aliases - if event.type == EventTypes.Aliases: + if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth: # 4a. If event has no state_key, reject if not event.is_state(): raise AuthError(403, "Alias event must be a state event") @@ -152,10 +152,8 @@ def check( ) # 4c. Otherwise, allow. - # This is removed by https://github.com/matrix-org/matrix-doc/pull/2260 - if room_version_obj.special_case_aliases_auth: - logger.debug("Allowing! %s", event) - return + logger.debug("Allowing! %s", event) + return if logger.isEnabledFor(logging.DEBUG): logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()]) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index bc6f98ae3b..b75b097e5e 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -23,6 +23,7 @@ from frozendict import frozendict from twisted.internet import defer from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.room_versions import RoomVersion from synapse.util.async_helpers import yieldable_gather_results from . import EventBase @@ -43,7 +44,7 @@ def prune_event(event: EventBase) -> EventBase: the user has specified, but we do want to keep necessary information like type, state_key etc. """ - pruned_event_dict = prune_event_dict(event.get_dict()) + pruned_event_dict = prune_event_dict(event.room_version, event.get_dict()) from . import make_event_from_dict @@ -57,15 +58,12 @@ def prune_event(event: EventBase) -> EventBase: return pruned_event -def prune_event_dict(event_dict): +def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict: """Redacts the event_dict in the same way as `prune_event`, except it operates on dicts rather than event objects - Args: - event_dict (dict) - Returns: - dict: A copy of the pruned event dict + A copy of the pruned event dict """ allowed_keys = [ @@ -112,7 +110,7 @@ def prune_event_dict(event_dict): "kick", "redact", ) - elif event_type == EventTypes.Aliases: + elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth: add_fields("aliases") elif event_type == EventTypes.RoomHistoryVisibility: add_fields("history_visibility") diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 8ae23df00a..d593ef47b8 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1168,7 +1168,11 @@ class EventsStore( and original_event.internal_metadata.is_redacted() ): # Redaction was allowed - pruned_json = encode_json(prune_event_dict(original_event.get_dict())) + pruned_json = encode_json( + prune_event_dict( + original_event.room_version, original_event.get_dict() + ) + ) else: # Redaction wasn't allowed pruned_json = None @@ -1929,7 +1933,9 @@ class EventsStore( return # Prune the event's dict then convert it to JSON. - pruned_json = encode_json(prune_event_dict(event.get_dict())) + pruned_json = encode_json( + prune_event_dict(event.room_version, event.get_dict()) + ) # Update the event_json table to replace the event's JSON with the pruned # JSON. diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 45d55b9e94..ab5f5ac549 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( copy_power_levels_contents, @@ -36,9 +37,9 @@ class PruneEventTestCase(unittest.TestCase): """ Asserts that a new event constructed with `evdict` will look like `matchdict` when it is redacted. """ - def run_test(self, evdict, matchdict): + def run_test(self, evdict, matchdict, **kwargs): self.assertEquals( - prune_event(make_event_from_dict(evdict)).get_dict(), matchdict + prune_event(make_event_from_dict(evdict, **kwargs)).get_dict(), matchdict ) def test_minimal(self): @@ -128,6 +129,36 @@ class PruneEventTestCase(unittest.TestCase): }, ) + def test_alias_event(self): + """Alias events have special behavior up through room version 6.""" + self.run_test( + { + "type": "m.room.aliases", + "event_id": "$test:domain", + "content": {"aliases": ["test"]}, + }, + { + "type": "m.room.aliases", + "event_id": "$test:domain", + "content": {"aliases": ["test"]}, + "signatures": {}, + "unsigned": {}, + }, + ) + + def test_msc2432_alias_event(self): + """After MSC2432, alias events have no special behavior.""" + self.run_test( + {"type": "m.room.aliases", "content": {"aliases": ["test"]}}, + { + "type": "m.room.aliases", + "content": {}, + "signatures": {}, + "unsigned": {}, + }, + room_version=RoomVersions.MSC2432_DEV, + ) + class SerializeEventTestCase(unittest.TestCase): def serialize(self, ev, fields): diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index bfa5d6f510..6c2351cf55 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -19,6 +19,7 @@ from synapse import event_auth from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict +from synapse.types import get_domain_from_id class EventAuthTestCase(unittest.TestCase): @@ -51,7 +52,7 @@ class EventAuthTestCase(unittest.TestCase): _random_state_event(joiner), auth_events, do_sig_check=False, - ), + ) def test_state_default_level(self): """ @@ -87,6 +88,83 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V1, _random_state_event(king), auth_events, do_sig_check=False, ) + def test_alias_event(self): + """Alias events have special behavior up through room version 6.""" + creator = "@creator:example.com" + other = "@other:example.com" + auth_events = { + ("m.room.create", ""): _create_event(creator), + ("m.room.member", creator): _join_event(creator), + } + + # creator should be able to send aliases + event_auth.check( + RoomVersions.V1, _alias_event(creator), auth_events, do_sig_check=False, + ) + + # Reject an event with no state key. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V1, + _alias_event(creator, state_key=""), + auth_events, + do_sig_check=False, + ) + + # If the domain of the sender does not match the state key, reject. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.V1, + _alias_event(creator, state_key="test.com"), + auth_events, + do_sig_check=False, + ) + + # Note that the member does *not* need to be in the room. + event_auth.check( + RoomVersions.V1, _alias_event(other), auth_events, do_sig_check=False, + ) + + def test_msc2432_alias_event(self): + """After MSC2432, alias events have no special behavior.""" + creator = "@creator:example.com" + other = "@other:example.com" + auth_events = { + ("m.room.create", ""): _create_event(creator), + ("m.room.member", creator): _join_event(creator), + } + + # creator should be able to send aliases + event_auth.check( + RoomVersions.MSC2432_DEV, + _alias_event(creator), + auth_events, + do_sig_check=False, + ) + + # No particular checks are done on the state key. + event_auth.check( + RoomVersions.MSC2432_DEV, + _alias_event(creator, state_key=""), + auth_events, + do_sig_check=False, + ) + event_auth.check( + RoomVersions.MSC2432_DEV, + _alias_event(creator, state_key="test.com"), + auth_events, + do_sig_check=False, + ) + + # Per standard auth rules, the member must be in the room. + with self.assertRaises(AuthError): + event_auth.check( + RoomVersions.MSC2432_DEV, + _alias_event(other), + auth_events, + do_sig_check=False, + ) + # helpers for making events @@ -131,6 +209,19 @@ def _power_levels_event(sender, content): ) +def _alias_event(sender, **kwargs): + data = { + "room_id": TEST_ROOM_ID, + "event_id": _get_event_id(), + "type": "m.room.aliases", + "sender": sender, + "state_key": get_domain_from_id(sender), + "content": {"aliases": []}, + } + data.update(**kwargs) + return make_event_from_dict(data) + + def _random_state_event(sender): return make_event_from_dict( { From 87c65576e08c1cdceb821bca15880110c4edd203 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 9 Mar 2020 13:58:38 +0000 Subject: [PATCH 233/382] Move `get_time_of_last_push_action_before` to the `EventPushActionsWorkerStore` Fixes #7054 I also had a look at the rest of the functions in `EventPushActionsStore` and in the push notifications send code and it looks to me like there shouldn't be any other method with this issue in this part of the codebase. --- .../data_stores/main/event_push_actions.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py index 9988a6d3fc..8eed590929 100644 --- a/synapse/storage/data_stores/main/event_push_actions.py +++ b/synapse/storage/data_stores/main/event_push_actions.py @@ -608,6 +608,23 @@ class EventPushActionsWorkerStore(SQLBaseStore): return range_end + @defer.inlineCallbacks + def get_time_of_last_push_action_before(self, stream_ordering): + def f(txn): + sql = ( + "SELECT e.received_ts" + " FROM event_push_actions AS ep" + " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id" + " WHERE ep.stream_ordering > ?" + " ORDER BY ep.stream_ordering ASC" + " LIMIT 1" + ) + txn.execute(sql, (stream_ordering,)) + return txn.fetchone() + + result = yield self.db.runInteraction("get_time_of_last_push_action_before", f) + return result[0] if result else None + class EventPushActionsStore(EventPushActionsWorkerStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" @@ -735,23 +752,6 @@ class EventPushActionsStore(EventPushActionsWorkerStore): pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) return push_actions - @defer.inlineCallbacks - def get_time_of_last_push_action_before(self, stream_ordering): - def f(txn): - sql = ( - "SELECT e.received_ts" - " FROM event_push_actions AS ep" - " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id" - " WHERE ep.stream_ordering > ?" - " ORDER BY ep.stream_ordering ASC" - " LIMIT 1" - ) - txn.execute(sql, (stream_ordering,)) - return txn.fetchone() - - result = yield self.db.runInteraction("get_time_of_last_push_action_before", f) - return result[0] if result else None - @defer.inlineCallbacks def get_latest_push_action_stream_ordering(self): def f(txn): From aee2bae9523a639c31c18ef7fab7a8a08ed3db03 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 9 Mar 2020 14:10:19 +0000 Subject: [PATCH 234/382] Fix undefined `room_id` in `make_summary_text` This would break notifications about un-named rooms when processing notifications in a batch. --- synapse/push/mailer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 4ccaf178ce..73580c1c6c 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -555,10 +555,12 @@ class Mailer(object): else: # If the reason room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" + room_id = reason["room_id"] + sender_ids = list( { notif_events[n["event_id"]].sender - for n in notifs_by_room[reason["room_id"]] + for n in notifs_by_room[room_id] } ) From f9e3a3f4d0ceef55d7254ba412982edf0192ccc1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 9 Mar 2020 14:21:01 +0000 Subject: [PATCH 235/382] Changelog It's the same as in #6964 since it's the most likely cause of the bug and that change hasn't been released yet. --- changelog.d/7055.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7055.misc diff --git a/changelog.d/7055.misc b/changelog.d/7055.misc new file mode 100644 index 0000000000..ec5c004bbe --- /dev/null +++ b/changelog.d/7055.misc @@ -0,0 +1 @@ +Merge worker apps together. From 04f4b5f6f87fbba0b2f1a4f011c496de3021c81a Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 Mar 2020 19:51:31 +0100 Subject: [PATCH 236/382] add tests --- tests/handlers/test_profile.py | 6 +- tests/rest/client/v2_alpha/test_account.py | 308 +++++++++++++++++++++ 2 files changed, 311 insertions(+), 3 deletions(-) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index b85520c688..98b508c3d4 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -70,7 +70,7 @@ class ProfileTestCase(unittest.TestCase): yield self.store.create_profile(self.frank.localpart) self.handler = hs.get_profile_handler() - self.config = hs.config + self.hs = hs @defer.inlineCallbacks def test_get_my_name(self): @@ -93,7 +93,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_name_if_disabled(self): - self.config.disable_set_displayname = True + self.hs.config.disable_set_displayname = True # Set first displayname is allowed, if displayname is null self.store.set_profile_displayname(self.frank.localpart, "Frank") @@ -164,7 +164,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_avatar_if_disabled(self): - self.config.disable_set_avatar_url = True + self.hs.config.disable_set_avatar_url = True # Set first time avatar is allowed, if displayname is null self.store.set_profile_avatar_url( diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index c3facc00eb..ac9f200de3 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -325,3 +325,311 @@ class DeactivateTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(request.code, 200) + + +class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + account.register_servlets, + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Email config. + self.email_attempts = [] + + def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): + self.email_attempts.append(msg) + return + + config["email"] = { + "enable_notifs": False, + "template_dir": os.path.abspath( + pkg_resources.resource_filename("synapse", "res/templates") + ), + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + } + config["public_baseurl"] = "https://example.com" + + self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + return self.hs + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.user_id = self.register_user("kermit", "test") + self.user_id_tok = self.login("kermit", "test") + self.email = "test@example.com" + self.url_3pid = b"account/3pid" + + def test_add_email(self): + """Test add mail to profile + """ + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) + + def test_add_email_if_disabled(self): + """Test add mail to profile if disabled + """ + self.hs.config.disable_3pid_changes = True + + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("3PID changes disabled on this server", channel.json_body["error"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_delete_email(self): + """Test delete mail from profile + """ + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=self.user_id, + medium="email", + address=self.email, + validated_at=0, + added_at=0, + ) + ) + + request, channel = self.make_request( + "POST", + b"account/3pid/delete", + { + "medium": "email", + "address": self.email + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_delete_email_if_disabled(self): + """Test delete mail from profile if disabled + """ + self.hs.config.disable_3pid_changes = True + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=self.user_id, + medium="email", + address=self.email, + validated_at=0, + added_at=0, + ) + ) + + request, channel = self.make_request( + "POST", + b"account/3pid/delete", + { + "medium": "email", + "address": self.email + }, + access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("3PID changes disabled on this server", channel.json_body["error"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) + + def test_cant_add_email_without_clicking_link(self): + """Test that we do actually need to click the link in the email + """ + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + + # Attempt to add email without clicking the link + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("No validated 3pid session found", channel.json_body["error"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_no_valid_token(self): + """Test that we do actually need to request a token and can't just + make a session up. + """ + client_secret = "foobar" + session_id = "weasle" + + # Attempt to add email without even requesting an email + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("No validated 3pid session found", channel.json_body["error"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def _request_token(self, email, client_secret): + request, channel = self.make_request( + "POST", + b"account/3pid/email/requestToken", + {"client_secret": client_secret, "email": email, "send_attempt": 1}, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + return channel.json_body["sid"] + + def _validate_token(self, link): + # Remove the host + path = link.replace("https://example.com", "") + + request, channel = self.make_request("GET", path, shorthand=False) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + def _get_link_from_email(self): + assert self.email_attempts, "No emails have been sent" + + raw_msg = self.email_attempts[-1].decode("UTF-8") + mail = Parser().parsestr(raw_msg) + + text = None + for part in mail.walk(): + if part.get_content_type() == "text/plain": + text = part.get_payload(decode=True).decode("UTF-8") + break + + if not text: + self.fail("Could not find text portion of email to parse") + + match = re.search(r"https://example.com\S+", text) + assert match, "Could not find link in email" + + return match.group(0) From 50ea178c201588b5e6b3f93e1af56aef0b4e8368 Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 Mar 2020 19:57:04 +0100 Subject: [PATCH 237/382] lint --- tests/rest/client/v2_alpha/test_account.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index ac9f200de3..e178a53335 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -438,7 +438,9 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("3PID changes disabled on this server", channel.json_body["error"]) + self.assertEqual( + "3PID changes disabled on this server", channel.json_body["error"] + ) # Get user request, channel = self.make_request( @@ -466,10 +468,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", b"account/3pid/delete", - { - "medium": "email", - "address": self.email - }, + {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) self.render(request) @@ -503,16 +502,15 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "POST", b"account/3pid/delete", - { - "medium": "email", - "address": self.email - }, + {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("3PID changes disabled on this server", channel.json_body["error"]) + self.assertEqual( + "3PID changes disabled on this server", channel.json_body["error"] + ) # Get user request, channel = self.make_request( From 7e5f40e7716813f0d32e2efcb32df3c263fbfc63 Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 Mar 2020 21:00:36 +0100 Subject: [PATCH 238/382] fix tests --- tests/handlers/test_profile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 98b508c3d4..f8c0da5ced 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -96,7 +96,7 @@ class ProfileTestCase(unittest.TestCase): self.hs.config.disable_set_displayname = True # Set first displayname is allowed, if displayname is null - self.store.set_profile_displayname(self.frank.localpart, "Frank") + yield self.store.set_profile_displayname(self.frank.localpart, "Frank") d = self.handler.set_displayname( self.frank, synapse.types.create_requester(self.frank), "Frank Jr." @@ -167,7 +167,7 @@ class ProfileTestCase(unittest.TestCase): self.hs.config.disable_set_avatar_url = True # Set first time avatar is allowed, if displayname is null - self.store.set_profile_avatar_url( + yield self.store.set_profile_avatar_url( self.frank.localpart, "http://my.server/me.png" ) From 885134529ffd95dd118d3228e69f0e3553f5a6a7 Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 Mar 2020 22:09:29 +0100 Subject: [PATCH 239/382] updates after review --- changelog.d/7053.feature | 2 +- docs/sample_config.yaml | 10 +++++----- synapse/config/registration.py | 16 ++++++++-------- synapse/handlers/profile.py | 8 ++++---- synapse/rest/client/v2_alpha/account.py | 18 ++++++++++++------ tests/handlers/test_profile.py | 6 +++--- tests/rest/client/v2_alpha/test_account.py | 17 +++++++---------- 7 files changed, 40 insertions(+), 37 deletions(-) diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature index 79955b9780..00f47b2a14 100644 --- a/changelog.d/7053.feature +++ b/changelog.d/7053.feature @@ -1 +1 @@ -Add options to disable setting profile info for prevent changes. \ No newline at end of file +Add options to prevent users from changing their profile or associated 3PIDs. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d3ecffac7d..8333800a10 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1057,18 +1057,18 @@ account_threepid_delegates: #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process -# If enabled, don't let users set their own display names/avatars +# If disabled, don't let users set their own display names/avatars # other than for the very first time (unless they are a server admin). # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # -#disable_set_displayname: false -#disable_set_avatar_url: false +#enable_set_displayname: true +#enable_set_avatar_url: true -# If true, stop users from trying to change the 3PIDs associated with +# If false, stop users from trying to change the 3PIDs associated with # their accounts. # -#disable_3pid_changes: false +#enable_3pid_changes: true # Users who register on this homeserver will automatically be joined # to these rooms diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1abc0a79af..d4897ec9b6 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -129,9 +129,9 @@ class RegistrationConfig(Config): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) - self.disable_set_displayname = config.get("disable_set_displayname", False) - self.disable_set_avatar_url = config.get("disable_set_avatar_url", False) - self.disable_3pid_changes = config.get("disable_3pid_changes", False) + self.enable_set_displayname = config.get("enable_set_displayname", True) + self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) + self.enable_3pid_changes = config.get("enable_3pid_changes", True) self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False @@ -334,18 +334,18 @@ class RegistrationConfig(Config): #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - # If enabled, don't let users set their own display names/avatars + # If disabled, don't let users set their own display names/avatars # other than for the very first time (unless they are a server admin). # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # - #disable_set_displayname: false - #disable_set_avatar_url: false + #enable_set_displayname: true + #enable_set_avatar_url: true - # If true, stop users from trying to change the 3PIDs associated with + # If false, stop users from trying to change the 3PIDs associated with # their accounts. # - #disable_3pid_changes: false + #enable_3pid_changes: true # Users who register on this homeserver will automatically be joined # to these rooms diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index b049dd8e26..eb85dba015 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -157,11 +157,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") - if not by_admin and self.hs.config.disable_set_displayname: + if not by_admin and not self.hs.config.enable_set_displayname: profile = yield self.store.get_profileinfo(target_user.localpart) if profile.display_name: raise SynapseError( - 400, "Changing displayname is disabled on this server" + 400, "Changing display name is disabled on this server", Codes.FORBIDDEN ) if len(new_displayname) > MAX_DISPLAYNAME_LEN: @@ -225,11 +225,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") - if not by_admin and self.hs.config.disable_set_avatar_url: + if not by_admin and not self.hs.config.enable_set_avatar_url: profile = yield self.store.get_profileinfo(target_user.localpart) if profile.avatar_url: raise SynapseError( - 400, "Changing avatar url is disabled on this server" + 400, "Changing avatar is disabled on this server", Codes.FORBIDDEN ) if len(new_avatar_url) > MAX_AVATAR_URL_LEN: diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 97bddf36d9..e40136f2f3 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -599,8 +599,10 @@ class ThreepidRestServlet(RestServlet): return 200, {"threepids": threepids} async def on_POST(self, request): - if self.hs.config.disable_3pid_changes: - raise SynapseError(400, "3PID changes disabled on this server") + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -646,8 +648,10 @@ class ThreepidAddRestServlet(RestServlet): @interactive_auth_handler async def on_POST(self, request): - if self.hs.config.disable_3pid_changes: - raise SynapseError(400, "3PID changes disabled on this server") + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -749,8 +753,10 @@ class ThreepidDeleteRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() async def on_POST(self, request): - if self.hs.config.disable_3pid_changes: - raise SynapseError(400, "3PID changes disabled on this server") + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) body = parse_json_object_from_request(request) assert_params_in_dict(body, ["medium", "address"]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index f8c0da5ced..e600b9777b 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -93,7 +93,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_name_if_disabled(self): - self.hs.config.disable_set_displayname = True + self.hs.config.enable_set_displayname = False # Set first displayname is allowed, if displayname is null yield self.store.set_profile_displayname(self.frank.localpart, "Frank") @@ -164,9 +164,9 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_avatar_if_disabled(self): - self.hs.config.disable_set_avatar_url = True + self.hs.config.enable_set_avatar_url = False - # Set first time avatar is allowed, if displayname is null + # Set first time avatar is allowed, if avatar is null yield self.store.set_profile_avatar_url( self.frank.localpart, "http://my.server/me.png" ) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index e178a53335..34e40a36d0 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -24,6 +24,7 @@ import pkg_resources import synapse.rest.admin from synapse.api.constants import LoginType, Membership +from synapse.api.errors import Codes from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register @@ -412,7 +413,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def test_add_email_if_disabled(self): """Test add mail to profile if disabled """ - self.hs.config.disable_3pid_changes = True + self.hs.config.enable_3pid_changes = True client_secret = "foobar" session_id = self._request_token(self.email, client_secret) @@ -438,9 +439,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual( - "3PID changes disabled on this server", channel.json_body["error"] - ) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user request, channel = self.make_request( @@ -486,7 +485,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def test_delete_email_if_disabled(self): """Test delete mail from profile if disabled """ - self.hs.config.disable_3pid_changes = True + self.hs.config.enable_3pid_changes = True # Add a threepid self.get_success( @@ -508,9 +507,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual( - "3PID changes disabled on this server", channel.json_body["error"] - ) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user request, channel = self.make_request( @@ -547,7 +544,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("No validated 3pid session found", channel.json_body["error"]) + self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user request, channel = self.make_request( @@ -582,7 +579,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("No validated 3pid session found", channel.json_body["error"]) + self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user request, channel = self.make_request( From 39f6595b4ab108cb451072ae251a91117002191c Mon Sep 17 00:00:00 2001 From: dklimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 9 Mar 2020 22:13:20 +0100 Subject: [PATCH 240/382] lint, fix tests --- synapse/handlers/profile.py | 4 +++- tests/rest/client/v2_alpha/test_account.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index eb85dba015..6aa1c0f5e0 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -161,7 +161,9 @@ class BaseProfileHandler(BaseHandler): profile = yield self.store.get_profileinfo(target_user.localpart) if profile.display_name: raise SynapseError( - 400, "Changing display name is disabled on this server", Codes.FORBIDDEN + 400, + "Changing display name is disabled on this server", + Codes.FORBIDDEN, ) if len(new_displayname) > MAX_DISPLAYNAME_LEN: diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 34e40a36d0..99cc9163f3 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -413,7 +413,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def test_add_email_if_disabled(self): """Test add mail to profile if disabled """ - self.hs.config.enable_3pid_changes = True + self.hs.config.enable_3pid_changes = False client_secret = "foobar" session_id = self._request_token(self.email, client_secret) @@ -485,7 +485,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): def test_delete_email_if_disabled(self): """Test delete mail from profile if disabled """ - self.hs.config.enable_3pid_changes = True + self.hs.config.enable_3pid_changes = False # Add a threepid self.get_success( From 6b0efe73e21a5d346111df4dd367bc39a03108bb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 13:49:11 +0000 Subject: [PATCH 241/382] SAML2: render a comprehensible error page if something goes wrong If an error happened while processing a SAML AuthN response, or a client ends up doing a `GET` request to `/authn_response`, then render a customisable error page rather than a confusing error. --- synapse/config/saml2_config.py | 26 +++++++++++++++++++++++++ synapse/handlers/saml_handler.py | 20 ++++++++++++++++++- synapse/rest/saml2/response_resource.py | 18 ++++++++++++++++- 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 423c158b11..db035bdb5d 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -27,6 +27,18 @@ DEFAULT_USER_MAPPING_PROVIDER = ( "synapse.handlers.saml_handler.DefaultSamlMappingProvider" ) +SAML2_ERROR_DEFAULT_HTML = """ + + +

Oops! Something went wrong

+

+ Try logging in again from the application and if the problem persists + please contact the administrator. +

+ + +""" + def _dict_merge(merge_dict, into_dict): """Do a deep merge of two dicts @@ -160,6 +172,13 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "5m") ) + if "error_html_path" in config: + self.saml2_error_html_content = self.read_file( + config["error_html_path"], "saml2_config.error_html_path", + ) + else: + self.saml2_error_html_content = SAML2_ERROR_DEFAULT_HTML + def _default_saml_config_dict( self, required_attributes: set, optional_attributes: set ): @@ -325,6 +344,13 @@ class SAML2Config(Config): # The default is 'uid'. # #grandfathered_mxid_source_attribute: upn + + # Path to a file containing HTML content to serve in case an error happens + # when the user gets redirected from the SAML IdP back to Synapse. + # If no file is provided, this defaults to some minimalistic HTML telling the + # user that something went wrong and they should try authenticating again. + # + #error_html_path: /path/to/static/content/saml_error.html """ % { "config_dir_path": config_dir_path } diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 9406753393..72c109981b 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -23,6 +23,7 @@ from saml2.client import Saml2Client from synapse.api.errors import SynapseError from synapse.config import ConfigError +from synapse.http.server import finish_request from synapse.http.servlet import parse_string from synapse.module_api import ModuleApi from synapse.types import ( @@ -73,6 +74,8 @@ class SamlHandler: # a lock on the mappings self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock) + self._error_html_content = hs.config.saml2_error_html_content + def handle_redirect_request(self, client_redirect_url): """Handle an incoming request to /login/sso/redirect @@ -114,7 +117,22 @@ class SamlHandler: # the dict. self.expire_sessions() - user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) + try: + user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) + except Exception as e: + # If decoding the response or mapping it to a user failed, then log the + # error and tell the user that something went wrong. + logger.error(e) + + request.setResponseCode(400) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader( + b"Content-Length", b"%d" % (len(self._error_html_content),) + ) + request.write(self._error_html_content.encode("utf8")) + finish_request(request) + return + self._auth_handler.complete_sso_login(user_id, request, relay_state) async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url): diff --git a/synapse/rest/saml2/response_resource.py b/synapse/rest/saml2/response_resource.py index 69ecc5e4b4..a545c13db7 100644 --- a/synapse/rest/saml2/response_resource.py +++ b/synapse/rest/saml2/response_resource.py @@ -14,7 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.http.server import DirectServeResource, wrap_html_request_handler +from synapse.http.server import ( + DirectServeResource, + finish_request, + wrap_html_request_handler, +) class SAML2ResponseResource(DirectServeResource): @@ -24,8 +28,20 @@ class SAML2ResponseResource(DirectServeResource): def __init__(self, hs): super().__init__() + self._error_html_content = hs.config.saml2_error_html_content self._saml_handler = hs.get_saml_handler() + async def _async_render_GET(self, request): + # We're not expecting any GET request on that resource if everything goes right, + # but some IdPs sometimes end up responding with a 302 redirect on this endpoint. + # In this case, just tell the user that something went wrong and they should + # try to authenticate again. + request.setResponseCode(400) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(self._error_html_content),)) + request.write(self._error_html_content.encode("utf8")) + finish_request(request) + @wrap_html_request_handler async def _async_render_POST(self, request): return await self._saml_handler.handle_saml_response(request) From 51c094c4ace1ee70f2ca3cb1766121885dbb92da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 14:00:29 +0000 Subject: [PATCH 242/382] Update sample config --- docs/sample_config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6f6f6fd54b..01957a90dd 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1347,6 +1347,13 @@ saml2_config: # #grandfathered_mxid_source_attribute: upn + # Path to a file containing HTML content to serve in case an error happens + # when the user gets redirected from the SAML IdP back to Synapse. + # If no file is provided, this defaults to some minimalistic HTML telling the + # user that something went wrong and they should try authenticating again. + # + #error_html_path: /path/to/static/content/saml_error.html + # Enable CAS for registration and login. From 156f2718673f88188627c76952102ef08ea34256 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 14:01:24 +0000 Subject: [PATCH 243/382] Changelog --- changelog.d/7058.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7058.feature diff --git a/changelog.d/7058.feature b/changelog.d/7058.feature new file mode 100644 index 0000000000..53ea485e03 --- /dev/null +++ b/changelog.d/7058.feature @@ -0,0 +1 @@ +Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. From 5ec2077bf905ef2edb5e4d6d6028fdc6aaa99c90 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 14:04:20 +0000 Subject: [PATCH 244/382] Lint --- synapse/config/saml2_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index db035bdb5d..d3e281604f 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -344,7 +344,7 @@ class SAML2Config(Config): # The default is 'uid'. # #grandfathered_mxid_source_attribute: upn - + # Path to a file containing HTML content to serve in case an error happens # when the user gets redirected from the SAML IdP back to Synapse. # If no file is provided, this defaults to some minimalistic HTML telling the From fe593ef99097f16e7c325c574364536d4b221c92 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 14:19:06 +0000 Subject: [PATCH 245/382] Attempt at appeasing the gods of mypy --- synapse/logging/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 1eccc0e83f..56805120be 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -511,7 +511,7 @@ class PreserveLoggingContext(object): __slots__ = ["current_context", "new_context", "has_parent"] - def __init__(self, new_context: Optional[LoggingContext] = None) -> None: + def __init__(self, new_context: Optional[LoggingContextOrSentinel] = None) -> None: if new_context is None: self.new_context = LoggingContext.sentinel # type: LoggingContextOrSentinel else: From dc6fb56c5ffb41d907b7fd645a701c2d9684afc3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 14:40:28 +0000 Subject: [PATCH 246/382] Hopefully mypy is happy now --- synapse/logging/context.py | 3 ++- synapse/storage/database.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 56805120be..860b99a4c6 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -210,7 +210,7 @@ class LoggingContext(object): class Sentinel(object): """Sentinel to represent the root context""" - __slots__ = ["previous_context", "alive", "request", "scope"] + __slots__ = ["previous_context", "alive", "request", "scope", "tag"] def __init__(self) -> None: # Minimal set for compatibility with LoggingContext @@ -218,6 +218,7 @@ class LoggingContext(object): self.alive = None self.request = None self.scope = None + self.tag = None def __str__(self): return "sentinel" diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 609db40616..e61595336c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -29,7 +29,11 @@ from twisted.internet import defer from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig -from synapse.logging.context import LoggingContext, make_deferred_yieldable +from synapse.logging.context import ( + LoggingContext, + LoggingContextOrSentinel, + make_deferred_yieldable, +) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine @@ -543,7 +547,9 @@ class Database(object): Returns: Deferred: The result of func """ - parent_context = LoggingContext.current_context() + parent_context = ( + LoggingContext.current_context() + ) # type: Optional[LoggingContextOrSentinel] if parent_context == LoggingContext.sentinel: logger.warning( "Starting db connection from sentinel context: metrics will be lost" From 8f826f98ac5d0a08f6726d3157c94265bbb2501c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 10 Mar 2020 17:22:45 +0000 Subject: [PATCH 247/382] Rephrase default message --- synapse/config/saml2_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index d3e281604f..07895c4315 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -32,8 +32,8 @@ SAML2_ERROR_DEFAULT_HTML = """

Oops! Something went wrong

- Try logging in again from the application and if the problem persists - please contact the administrator. + Try logging in again from your Matrix client and if the problem persists + please contact the server's administrator.

From 42ac4ca47709a4a9fb8b71a60c4b92cc615b0908 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 10 Mar 2020 21:26:55 +0100 Subject: [PATCH 248/382] Update synapse/config/registration.py Co-Authored-By: Brendan Abolivier --- synapse/config/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d4897ec9b6..ee737eb40d 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -335,7 +335,7 @@ class RegistrationConfig(Config): #msisdn: http://localhost:8090 # Delegate SMS sending to this local process # If disabled, don't let users set their own display names/avatars - # other than for the very first time (unless they are a server admin). + # (unless they are a server admin) other than for the very first time. # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # From 751d51dd128be154c01f23f5f614317689336812 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 10 Mar 2020 21:41:25 +0100 Subject: [PATCH 249/382] Update sample_config.yaml --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8333800a10..5940a6506b 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1058,7 +1058,7 @@ account_threepid_delegates: #msisdn: http://localhost:8090 # Delegate SMS sending to this local process # If disabled, don't let users set their own display names/avatars -# other than for the very first time (unless they are a server admin). +# (unless they are a server admin) other than for the very first time. # Useful when provisioning users based on the contents of a 3rd party # directory and to avoid ambiguities. # From 69ce55c51082d03e549863f2149b4cf10cb1de19 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 15:21:25 +0000 Subject: [PATCH 250/382] Don't filter out dummy events when we're checking the visibility of state --- synapse/handlers/message.py | 2 +- synapse/visibility.py | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0c84c6cec4..b743fc2dcc 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -160,7 +160,7 @@ class MessageHandler(object): raise NotFoundError("Can't find event for token %s" % (at_token,)) visible_events = yield filter_events_for_client( - self.storage, user_id, last_events, apply_retention_policies=False + self.storage, user_id, last_events, filter_send_to_client=False ) event = last_events[0] diff --git a/synapse/visibility.py b/synapse/visibility.py index a48a4f3dfe..1d538b206d 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -49,7 +49,7 @@ def filter_events_for_client( events, is_peeking=False, always_include_ids=frozenset(), - apply_retention_policies=True, + filter_send_to_client=True, ): """ Check which events a user is allowed to see. If the user can see the event but its @@ -65,10 +65,9 @@ def filter_events_for_client( events always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) - apply_retention_policies (bool): Whether to filter out events that's older than - allowed by the room's retention policy. Useful when this function is called - to e.g. check whether a user should be allowed to see the state at a given - event rather than to know if it should send an event to a user's client(s). + filter_send_to_client (bool): Whether we're checking an event that's going to be + sent to a client. This might not always be the case since this function can + also be called to check whether a user can see the state at a given point. Returns: Deferred[list[synapse.events.EventBase]] @@ -96,7 +95,7 @@ def filter_events_for_client( erased_senders = yield storage.main.are_users_erased((e.sender for e in events)) - if apply_retention_policies: + if not filter_send_to_client: room_ids = {e.room_id for e in events} retention_policies = {} @@ -119,7 +118,7 @@ def filter_events_for_client( the original event if they can see it as normal. """ - if event.type == "org.matrix.dummy_event": + if event.type == "org.matrix.dummy_event" and filter_send_to_client: return None if not event.is_state() and event.sender in ignore_list: @@ -134,7 +133,7 @@ def filter_events_for_client( # Don't try to apply the room's retention policy if the event is a state event, as # MSC1763 states that retention is only considered for non-state events. - if apply_retention_policies and not event.is_state(): + if filter_send_to_client and not event.is_state(): retention_policy = retention_policies[event.room_id] max_lifetime = retention_policy.get("max_lifetime") From 9c0775e86ab39b193670723927a1caf67f6bfc11 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 15:32:07 +0000 Subject: [PATCH 251/382] Fix condition --- synapse/visibility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index 1d538b206d..d0b2241e48 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -95,7 +95,7 @@ def filter_events_for_client( erased_senders = yield storage.main.are_users_erased((e.sender for e in events)) - if not filter_send_to_client: + if filter_send_to_client: room_ids = {e.room_id for e in events} retention_policies = {} From 2dce68c65110d4fe41efcc7150c9c6300ac71d2c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 17:04:18 +0000 Subject: [PATCH 252/382] Also don't filter out events sent by ignored users when checking state visibility --- synapse/visibility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index d0b2241e48..82a2132427 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -121,7 +121,7 @@ def filter_events_for_client( if event.type == "org.matrix.dummy_event" and filter_send_to_client: return None - if not event.is_state() and event.sender in ignore_list: + if not event.is_state() and event.sender in ignore_list and filter_send_to_client: return None # Until MSC2261 has landed we can't redact malicious alias events, so for From 1cde4cf3f15413b941c699ac5048c464a49137a4 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 18:03:56 +0000 Subject: [PATCH 253/382] Changelog --- changelog.d/7066.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7066.bugfix diff --git a/changelog.d/7066.bugfix b/changelog.d/7066.bugfix new file mode 100644 index 0000000000..94bb096287 --- /dev/null +++ b/changelog.d/7066.bugfix @@ -0,0 +1 @@ +Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. From e38c44b418328e79e8da3e8ed259ee51d2f1c215 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 18:06:07 +0000 Subject: [PATCH 254/382] Lint --- synapse/visibility.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index 82a2132427..fce5855413 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -121,7 +121,11 @@ def filter_events_for_client( if event.type == "org.matrix.dummy_event" and filter_send_to_client: return None - if not event.is_state() and event.sender in ignore_list and filter_send_to_client: + if ( + not event.is_state() + and event.sender in ignore_list + and filter_send_to_client + ): return None # Until MSC2261 has landed we can't redact malicious alias events, so for From 37a9873f6360a8e6f243c3d3d081ff7abc0f9da1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 18:43:41 +0000 Subject: [PATCH 255/382] Also don't fail on aliases events in this case --- synapse/visibility.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index fce5855413..56603eb276 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -132,7 +132,7 @@ def filter_events_for_client( # now we temporarily filter out m.room.aliases entirely to mitigate # abuse, while we spec a better solution to advertising aliases # on rooms. - if event.type == EventTypes.Aliases: + if event.type == EventTypes.Aliases and filter_send_to_client: return None # Don't try to apply the room's retention policy if the event is a state event, as From 8120a238a465de576ad4d171e3072b28e5df32ac Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 18:49:41 +0000 Subject: [PATCH 256/382] Refactor a bit --- synapse/visibility.py | 48 ++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index 56603eb276..bab41182b9 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -118,34 +118,36 @@ def filter_events_for_client( the original event if they can see it as normal. """ - if event.type == "org.matrix.dummy_event" and filter_send_to_client: - return None + # Only run some checks if these events aren't about to be sent to clients. This is + # because, if this is not the case, we're probably only checking if the users can + # see events in the room at that point in the DAG, and that shouldn't be decided + # on those checks. + if filter_send_to_client: + if event.type == "org.matrix.dummy_event": + return None - if ( - not event.is_state() - and event.sender in ignore_list - and filter_send_to_client - ): - return None + if not event.is_state() and event.sender in ignore_list: + return None - # Until MSC2261 has landed we can't redact malicious alias events, so for - # now we temporarily filter out m.room.aliases entirely to mitigate - # abuse, while we spec a better solution to advertising aliases - # on rooms. - if event.type == EventTypes.Aliases and filter_send_to_client: - return None + # Until MSC2261 has landed we can't redact malicious alias events, so for + # now we temporarily filter out m.room.aliases entirely to mitigate + # abuse, while we spec a better solution to advertising aliases + # on rooms. + if event.type == EventTypes.Aliases: + return None - # Don't try to apply the room's retention policy if the event is a state event, as - # MSC1763 states that retention is only considered for non-state events. - if filter_send_to_client and not event.is_state(): - retention_policy = retention_policies[event.room_id] - max_lifetime = retention_policy.get("max_lifetime") + # Don't try to apply the room's retention policy if the event is a state + # event, as MSC1763 states that retention is only considered for non-state + # events. + if not event.is_state(): + retention_policy = retention_policies[event.room_id] + max_lifetime = retention_policy.get("max_lifetime") - if max_lifetime is not None: - oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime + if max_lifetime is not None: + oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime - if event.origin_server_ts < oldest_allowed_ts: - return None + if event.origin_server_ts < oldest_allowed_ts: + return None if event.event_id in always_include_ids: return event From b8cfe79ffcc1184547673264563884e0188e47a7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 19:33:16 +0000 Subject: [PATCH 257/382] Move the default SAML2 error HTML to a dedicated file Also add some JS to it to process any error we might have in the URI (see #6893). --- synapse/config/saml2_config.py | 29 +++++++----------- synapse/res/templates/saml_error.html | 44 +++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 18 deletions(-) create mode 100644 synapse/res/templates/saml_error.html diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 07895c4315..882aa3bb5b 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -15,6 +15,9 @@ # limitations under the License. import logging +import os + +import pkg_resources from synapse.python_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module, load_python_module @@ -27,18 +30,6 @@ DEFAULT_USER_MAPPING_PROVIDER = ( "synapse.handlers.saml_handler.DefaultSamlMappingProvider" ) -SAML2_ERROR_DEFAULT_HTML = """ - - -

Oops! Something went wrong

-

- Try logging in again from your Matrix client and if the problem persists - please contact the server's administrator. -

- - -""" - def _dict_merge(merge_dict, into_dict): """Do a deep merge of two dicts @@ -172,12 +163,14 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "5m") ) - if "error_html_path" in config: - self.saml2_error_html_content = self.read_file( - config["error_html_path"], "saml2_config.error_html_path", - ) - else: - self.saml2_error_html_content = SAML2_ERROR_DEFAULT_HTML + error_html_path = config.get("error_html_path") + if not error_html_path: + template_dir = pkg_resources.resource_filename("synapse", "res/templates") + error_html_path = os.path.join(template_dir, "saml_error.html") + + self.saml2_error_html_content = self.read_file( + error_html_path, "saml2_config.error_html_path", + ) def _default_saml_config_dict( self, required_attributes: set, optional_attributes: set diff --git a/synapse/res/templates/saml_error.html b/synapse/res/templates/saml_error.html new file mode 100644 index 0000000000..c112ac833f --- /dev/null +++ b/synapse/res/templates/saml_error.html @@ -0,0 +1,44 @@ + + + + + SSO error + + +

Oops! Something went wrong during authentication.

+

+ If you are seeing this page after clicking a link sent to you via email, make + sure you only click the confirmation link once, and that you open the + validation link in the same client you're logging in from. +

+

+ Try logging in again from your Matrix client and if the problem persists + please contact the server's administrator. +

+ + + + \ No newline at end of file From e55a240681a2d3adf34eb48198475e9255b53358 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 19:37:04 +0000 Subject: [PATCH 258/382] Changelog --- changelog.d/7067.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7067.feature diff --git a/changelog.d/7067.feature b/changelog.d/7067.feature new file mode 100644 index 0000000000..53ea485e03 --- /dev/null +++ b/changelog.d/7067.feature @@ -0,0 +1 @@ +Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. From 900bca970790f01fd1416b217e678f6ea6325f95 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 19:40:30 +0000 Subject: [PATCH 259/382] Update wording and config --- docs/sample_config.yaml | 3 +++ synapse/config/saml2_config.py | 3 +++ synapse/res/templates/saml_error.html | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 301e6ae6b7..36be995726 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1365,6 +1365,9 @@ saml2_config: # If no file is provided, this defaults to some minimalistic HTML telling the # user that something went wrong and they should try authenticating again. # + # See https://github.com/matrix-org/synapse/blob/master/synapse/res/templates/saml_error.html + # for an example. + # #error_html_path: /path/to/static/content/saml_error.html diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 882aa3bb5b..1526f72748 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -343,6 +343,9 @@ class SAML2Config(Config): # If no file is provided, this defaults to some minimalistic HTML telling the # user that something went wrong and they should try authenticating again. # + # See https://github.com/matrix-org/synapse/blob/master/synapse/res/templates/saml_error.html + # for an example. + # #error_html_path: /path/to/static/content/saml_error.html """ % { "config_dir_path": config_dir_path diff --git a/synapse/res/templates/saml_error.html b/synapse/res/templates/saml_error.html index c112ac833f..223d3a74bc 100644 --- a/synapse/res/templates/saml_error.html +++ b/synapse/res/templates/saml_error.html @@ -24,8 +24,8 @@ // we just don't print anything specific. let searchStr = ""; if (window.location.search) { - // For some reason window.location.searchParams isn't always defined when - // window.location.search is, so we can't just use it right away. + // window.location.searchParams isn't always defined when + // window.location.search is, so it's more reliable to parse the latter. searchStr = window.location.search; } else if (window.location.hash) { // Replace the # with a ? so that URLSearchParams does the right thing and From f9e98176bf211593c7cb8661ea5ac97de9a61e31 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 20:31:31 +0000 Subject: [PATCH 260/382] Put the file in the templates directory --- docs/sample_config.yaml | 23 ++++++++++++++++------- synapse/config/saml2_config.py | 33 +++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 36be995726..91eff4c8ad 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1360,15 +1360,24 @@ saml2_config: # #grandfathered_mxid_source_attribute: upn - # Path to a file containing HTML content to serve in case an error happens - # when the user gets redirected from the SAML IdP back to Synapse. - # If no file is provided, this defaults to some minimalistic HTML telling the - # user that something went wrong and they should try authenticating again. + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. # - # See https://github.com/matrix-org/synapse/blob/master/synapse/res/templates/saml_error.html - # for an example. + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. # - #error_html_path: /path/to/static/content/saml_error.html + # Synapse will look for the following templates in this directory: + # + # * HTML page to display to users if something goes wrong during the + # authentication process: 'saml_error.html'. + # + # This template doesn't currently need any variable to render. + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 1526f72748..3113f11ebb 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -163,13 +163,13 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "5m") ) - error_html_path = config.get("error_html_path") - if not error_html_path: - template_dir = pkg_resources.resource_filename("synapse", "res/templates") - error_html_path = os.path.join(template_dir, "saml_error.html") + template_dir = saml2_config.get("template_dir") + if not template_dir: + template_dir = pkg_resources.resource_filename("synapse", "res/templates",) self.saml2_error_html_content = self.read_file( - error_html_path, "saml2_config.error_html_path", + os.path.join(template_dir, "saml_error.html"), + "saml2_config.saml_error", ) def _default_saml_config_dict( @@ -338,15 +338,24 @@ class SAML2Config(Config): # #grandfathered_mxid_source_attribute: upn - # Path to a file containing HTML content to serve in case an error happens - # when the user gets redirected from the SAML IdP back to Synapse. - # If no file is provided, this defaults to some minimalistic HTML telling the - # user that something went wrong and they should try authenticating again. + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. # - # See https://github.com/matrix-org/synapse/blob/master/synapse/res/templates/saml_error.html - # for an example. + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. # - #error_html_path: /path/to/static/content/saml_error.html + # Synapse will look for the following templates in this directory: + # + # * HTML page to display to users if something goes wrong during the + # authentication process: 'saml_error.html'. + # + # This template doesn't currently need any variable to render. + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" """ % { "config_dir_path": config_dir_path } From 0de9f9486a242c8dbee4b9bc65cad166b863094f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 11 Mar 2020 20:39:18 +0000 Subject: [PATCH 261/382] Lint --- synapse/config/saml2_config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 3113f11ebb..8fe64d90f8 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -168,8 +168,7 @@ class SAML2Config(Config): template_dir = pkg_resources.resource_filename("synapse", "res/templates",) self.saml2_error_html_content = self.read_file( - os.path.join(template_dir, "saml_error.html"), - "saml2_config.saml_error", + os.path.join(template_dir, "saml_error.html"), "saml2_config.saml_error", ) def _default_saml_config_dict( From 77d0a4507b1c8ce3a1195851e87e723287332786 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 12 Mar 2020 11:36:27 -0400 Subject: [PATCH 262/382] Add type annotations and comments to auth handler (#7063) --- changelog.d/7063.misc | 1 + synapse/handlers/auth.py | 193 +++++++++++++++++++++------------------ tox.ini | 1 + 3 files changed, 106 insertions(+), 89 deletions(-) create mode 100644 changelog.d/7063.misc diff --git a/changelog.d/7063.misc b/changelog.d/7063.misc new file mode 100644 index 0000000000..e7b1cd3cd8 --- /dev/null +++ b/changelog.d/7063.misc @@ -0,0 +1 @@ +Add type annotations and comments to the auth handler. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7ca90f91c4..7860f9625e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -18,10 +18,10 @@ import logging import time import unicodedata import urllib.parse -from typing import Any +from typing import Any, Dict, Iterable, List, Optional import attr -import bcrypt +import bcrypt # type: ignore[import] import pymacaroons from twisted.internet import defer @@ -45,7 +45,7 @@ from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread from synapse.module_api import ModuleApi from synapse.push.mailer import load_jinja2_templates -from synapse.types import UserID +from synapse.types import Requester, UserID from synapse.util.caches.expiringcache import ExpiringCache from ._base import BaseHandler @@ -63,11 +63,11 @@ class AuthHandler(BaseHandler): """ super(AuthHandler, self).__init__(hs) - self.checkers = {} # type: dict[str, UserInteractiveAuthChecker] + self.checkers = {} # type: Dict[str, UserInteractiveAuthChecker] for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) if inst.is_enabled(): - self.checkers[inst.AUTH_TYPE] = inst + self.checkers[inst.AUTH_TYPE] = inst # type: ignore self.bcrypt_rounds = hs.config.bcrypt_rounds @@ -124,7 +124,9 @@ class AuthHandler(BaseHandler): self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) @defer.inlineCallbacks - def validate_user_via_ui_auth(self, requester, request_body, clientip): + def validate_user_via_ui_auth( + self, requester: Requester, request_body: Dict[str, Any], clientip: str + ): """ Checks that the user is who they claim to be, via a UI auth. @@ -133,11 +135,11 @@ class AuthHandler(BaseHandler): that it isn't stolen by re-authenticating them. Args: - requester (Requester): The user, as given by the access token + requester: The user, as given by the access token - request_body (dict): The body of the request sent by the client + request_body: The body of the request sent by the client - clientip (str): The IP address of the client. + clientip: The IP address of the client. Returns: defer.Deferred[dict]: the parameters for this request (which may @@ -208,7 +210,9 @@ class AuthHandler(BaseHandler): return self.checkers.keys() @defer.inlineCallbacks - def check_auth(self, flows, clientdict, clientip): + def check_auth( + self, flows: List[List[str]], clientdict: Dict[str, Any], clientip: str + ): """ Takes a dictionary sent by the client in the login / registration protocol and handles the User-Interactive Auth flow. @@ -223,14 +227,14 @@ class AuthHandler(BaseHandler): decorator. Args: - flows (list): A list of login flows. Each flow is an ordered list of - strings representing auth-types. At least one full - flow must be completed in order for auth to be successful. + flows: A list of login flows. Each flow is an ordered list of + strings representing auth-types. At least one full + flow must be completed in order for auth to be successful. clientdict: The dictionary from the client root level, not the 'auth' key: this method prompts for auth if none is sent. - clientip (str): The IP address of the client. + clientip: The IP address of the client. Returns: defer.Deferred[dict, dict, str]: a deferred tuple of @@ -250,7 +254,7 @@ class AuthHandler(BaseHandler): """ authdict = None - sid = None + sid = None # type: Optional[str] if clientdict and "auth" in clientdict: authdict = clientdict["auth"] del clientdict["auth"] @@ -283,9 +287,9 @@ class AuthHandler(BaseHandler): creds = session["creds"] # check auth type currently being presented - errordict = {} + errordict = {} # type: Dict[str, Any] if "type" in authdict: - login_type = authdict["type"] + login_type = authdict["type"] # type: str try: result = yield self._check_auth_dict(authdict, clientip) if result: @@ -326,7 +330,7 @@ class AuthHandler(BaseHandler): raise InteractiveAuthIncompleteError(ret) @defer.inlineCallbacks - def add_oob_auth(self, stagetype, authdict, clientip): + def add_oob_auth(self, stagetype: str, authdict: Dict[str, Any], clientip: str): """ Adds the result of out-of-band authentication into an existing auth session. Currently used for adding the result of fallback auth. @@ -348,7 +352,7 @@ class AuthHandler(BaseHandler): return True return False - def get_session_id(self, clientdict): + def get_session_id(self, clientdict: Dict[str, Any]) -> Optional[str]: """ Gets the session ID for a client given the client dictionary @@ -356,7 +360,7 @@ class AuthHandler(BaseHandler): clientdict: The dictionary sent by the client in the request Returns: - str|None: The string session ID the client sent. If the client did + The string session ID the client sent. If the client did not send a session ID, returns None. """ sid = None @@ -366,40 +370,42 @@ class AuthHandler(BaseHandler): sid = authdict["session"] return sid - def set_session_data(self, session_id, key, value): + def set_session_data(self, session_id: str, key: str, value: Any) -> None: """ Store a key-value pair into the sessions data associated with this request. This data is stored server-side and cannot be modified by the client. Args: - session_id (string): The ID of this session as returned from check_auth - key (string): The key to store the data under - value (any): The data to store + session_id: The ID of this session as returned from check_auth + key: The key to store the data under + value: The data to store """ sess = self._get_session_info(session_id) sess.setdefault("serverdict", {})[key] = value self._save_session(sess) - def get_session_data(self, session_id, key, default=None): + def get_session_data( + self, session_id: str, key: str, default: Optional[Any] = None + ) -> Any: """ Retrieve data stored with set_session_data Args: - session_id (string): The ID of this session as returned from check_auth - key (string): The key to store the data under - default (any): Value to return if the key has not been set + session_id: The ID of this session as returned from check_auth + key: The key to store the data under + default: Value to return if the key has not been set """ sess = self._get_session_info(session_id) return sess.setdefault("serverdict", {}).get(key, default) @defer.inlineCallbacks - def _check_auth_dict(self, authdict, clientip): + def _check_auth_dict(self, authdict: Dict[str, Any], clientip: str): """Attempt to validate the auth dict provided by a client Args: - authdict (object): auth dict provided by the client - clientip (str): IP address of the client + authdict: auth dict provided by the client + clientip: IP address of the client Returns: Deferred: result of the stage verification. @@ -425,10 +431,10 @@ class AuthHandler(BaseHandler): (canonical_id, callback) = yield self.validate_login(user_id, authdict) return canonical_id - def _get_params_recaptcha(self): + def _get_params_recaptcha(self) -> dict: return {"public_key": self.hs.config.recaptcha_public_key} - def _get_params_terms(self): + def _get_params_terms(self) -> dict: return { "policies": { "privacy_policy": { @@ -445,7 +451,9 @@ class AuthHandler(BaseHandler): } } - def _auth_dict_for_flows(self, flows, session): + def _auth_dict_for_flows( + self, flows: List[List[str]], session: Dict[str, Any] + ) -> Dict[str, Any]: public_flows = [] for f in flows: public_flows.append(f) @@ -455,7 +463,7 @@ class AuthHandler(BaseHandler): LoginType.TERMS: self._get_params_terms, } - params = {} + params = {} # type: Dict[str, Any] for f in public_flows: for stage in f: @@ -468,7 +476,13 @@ class AuthHandler(BaseHandler): "params": params, } - def _get_session_info(self, session_id): + def _get_session_info(self, session_id: Optional[str]) -> dict: + """ + Gets or creates a session given a session ID. + + The session can be used to track data across multiple requests, e.g. for + interactive authentication. + """ if session_id not in self.sessions: session_id = None @@ -481,7 +495,9 @@ class AuthHandler(BaseHandler): return self.sessions[session_id] @defer.inlineCallbacks - def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms): + def get_access_token_for_user_id( + self, user_id: str, device_id: Optional[str], valid_until_ms: Optional[int] + ): """ Creates a new access token for the user with the given user ID. @@ -491,11 +507,11 @@ class AuthHandler(BaseHandler): The device will be recorded in the table if it is not there already. Args: - user_id (str): canonical User ID - device_id (str|None): the device ID to associate with the tokens. + user_id: canonical User ID + device_id: the device ID to associate with the tokens. None to leave the tokens unassociated with a device (deprecated: we should always have a device ID) - valid_until_ms (int|None): when the token is valid until. None for + valid_until_ms: when the token is valid until. None for no expiry. Returns: The access token for the user's session. @@ -530,13 +546,13 @@ class AuthHandler(BaseHandler): return access_token @defer.inlineCallbacks - def check_user_exists(self, user_id): + def check_user_exists(self, user_id: str): """ Checks to see if a user with the given id exists. Will check case insensitively, but return None if there are multiple inexact matches. Args: - (unicode|bytes) user_id: complete @user:id + user_id: complete @user:id Returns: defer.Deferred: (unicode) canonical_user_id, or None if zero or @@ -551,7 +567,7 @@ class AuthHandler(BaseHandler): return None @defer.inlineCallbacks - def _find_user_id_and_pwd_hash(self, user_id): + def _find_user_id_and_pwd_hash(self, user_id: str): """Checks to see if a user with the given id exists. Will check case insensitively, but will return None if there are multiple inexact matches. @@ -581,7 +597,7 @@ class AuthHandler(BaseHandler): ) return result - def get_supported_login_types(self): + def get_supported_login_types(self) -> Iterable[str]: """Get a the login types supported for the /login API By default this is just 'm.login.password' (unless password_enabled is @@ -589,20 +605,20 @@ class AuthHandler(BaseHandler): other login types. Returns: - Iterable[str]: login types + login types """ return self._supported_login_types @defer.inlineCallbacks - def validate_login(self, username, login_submission): + def validate_login(self, username: str, login_submission: Dict[str, Any]): """Authenticates the user for the /login API Also used by the user-interactive auth flow to validate m.login.password auth types. Args: - username (str): username supplied by the user - login_submission (dict): the whole of the login submission + username: username supplied by the user + login_submission: the whole of the login submission (including 'type' and other relevant fields) Returns: Deferred[str, func]: canonical user id, and optional callback @@ -690,13 +706,13 @@ class AuthHandler(BaseHandler): raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN) @defer.inlineCallbacks - def check_password_provider_3pid(self, medium, address, password): + def check_password_provider_3pid(self, medium: str, address: str, password: str): """Check if a password provider is able to validate a thirdparty login Args: - medium (str): The medium of the 3pid (ex. email). - address (str): The address of the 3pid (ex. jdoe@example.com). - password (str): The password of the user. + medium: The medium of the 3pid (ex. email). + address: The address of the 3pid (ex. jdoe@example.com). + password: The password of the user. Returns: Deferred[(str|None, func|None)]: A tuple of `(user_id, @@ -724,15 +740,15 @@ class AuthHandler(BaseHandler): return None, None @defer.inlineCallbacks - def _check_local_password(self, user_id, password): + def _check_local_password(self, user_id: str, password: str): """Authenticate a user against the local password database. user_id is checked case insensitively, but will return None if there are multiple inexact matches. Args: - user_id (unicode): complete @user:id - password (unicode): the provided password + user_id: complete @user:id + password: the provided password Returns: Deferred[unicode] the canonical_user_id, or Deferred[None] if unknown user/bad password @@ -755,7 +771,7 @@ class AuthHandler(BaseHandler): return user_id @defer.inlineCallbacks - def validate_short_term_login_token_and_get_user_id(self, login_token): + def validate_short_term_login_token_and_get_user_id(self, login_token: str): auth_api = self.hs.get_auth() user_id = None try: @@ -769,11 +785,11 @@ class AuthHandler(BaseHandler): return user_id @defer.inlineCallbacks - def delete_access_token(self, access_token): + def delete_access_token(self, access_token: str): """Invalidate a single access token Args: - access_token (str): access token to be deleted + access_token: access token to be deleted Returns: Deferred @@ -798,15 +814,17 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def delete_access_tokens_for_user( - self, user_id, except_token_id=None, device_id=None + self, + user_id: str, + except_token_id: Optional[str] = None, + device_id: Optional[str] = None, ): """Invalidate access tokens belonging to a user Args: - user_id (str): ID of user the tokens belong to - except_token_id (str|None): access_token ID which should *not* be - deleted - device_id (str|None): ID of device the tokens are associated with. + user_id: ID of user the tokens belong to + except_token_id: access_token ID which should *not* be deleted + device_id: ID of device the tokens are associated with. If None, tokens associated with any device (or no device) will be deleted Returns: @@ -830,7 +848,7 @@ class AuthHandler(BaseHandler): ) @defer.inlineCallbacks - def add_threepid(self, user_id, medium, address, validated_at): + def add_threepid(self, user_id: str, medium: str, address: str, validated_at: int): # check if medium has a valid value if medium not in ["email", "msisdn"]: raise SynapseError( @@ -856,19 +874,20 @@ class AuthHandler(BaseHandler): ) @defer.inlineCallbacks - def delete_threepid(self, user_id, medium, address, id_server=None): + def delete_threepid( + self, user_id: str, medium: str, address: str, id_server: Optional[str] = None + ): """Attempts to unbind the 3pid on the identity servers and deletes it from the local database. Args: - user_id (str) - medium (str) - address (str) - id_server (str|None): Use the given identity server when unbinding + user_id: ID of user to remove the 3pid from. + medium: The medium of the 3pid being removed: "email" or "msisdn". + address: The 3pid address to remove. + id_server: Use the given identity server when unbinding any threepids. If None then will attempt to unbind using the identity server specified when binding (if known). - Returns: Deferred[bool]: Returns True if successfully unbound the 3pid on the identity server, False if identity server doesn't support the @@ -887,17 +906,18 @@ class AuthHandler(BaseHandler): yield self.store.user_delete_threepid(user_id, medium, address) return result - def _save_session(self, session): + def _save_session(self, session: Dict[str, Any]) -> None: + """Update the last used time on the session to now and add it back to the session store.""" # TODO: Persistent storage logger.debug("Saving session %s", session) session["last_used"] = self.hs.get_clock().time_msec() self.sessions[session["id"]] = session - def hash(self, password): + def hash(self, password: str): """Computes a secure hash of password. Args: - password (unicode): Password to hash. + password: Password to hash. Returns: Deferred(unicode): Hashed password. @@ -914,12 +934,12 @@ class AuthHandler(BaseHandler): return defer_to_thread(self.hs.get_reactor(), _do_hash) - def validate_hash(self, password, stored_hash): + def validate_hash(self, password: str, stored_hash: bytes): """Validates that self.hash(password) == stored_hash. Args: - password (unicode): Password to hash. - stored_hash (bytes): Expected hash value. + password: Password to hash. + stored_hash: Expected hash value. Returns: Deferred(bool): Whether self.hash(password) == stored_hash. @@ -1007,7 +1027,9 @@ class MacaroonGenerator(object): hs = attr.ib() - def generate_access_token(self, user_id, extra_caveats=None): + def generate_access_token( + self, user_id: str, extra_caveats: Optional[List[str]] = None + ) -> str: extra_caveats = extra_caveats or [] macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = access") @@ -1020,16 +1042,9 @@ class MacaroonGenerator(object): macaroon.add_first_party_caveat(caveat) return macaroon.serialize() - def generate_short_term_login_token(self, user_id, duration_in_ms=(2 * 60 * 1000)): - """ - - Args: - user_id (unicode): - duration_in_ms (int): - - Returns: - unicode - """ + def generate_short_term_login_token( + self, user_id: str, duration_in_ms: int = (2 * 60 * 1000) + ) -> str: macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = login") now = self.hs.get_clock().time_msec() @@ -1037,12 +1052,12 @@ class MacaroonGenerator(object): macaroon.add_first_party_caveat("time < %d" % (expiry,)) return macaroon.serialize() - def generate_delete_pusher_token(self, user_id): + def generate_delete_pusher_token(self, user_id: str) -> str: macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = delete_pusher") return macaroon.serialize() - def _generate_base_macaroon(self, user_id): + def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon: macaroon = pymacaroons.Macaroon( location=self.hs.config.server_name, identifier="key", diff --git a/tox.ini b/tox.ini index 7622aa19f1..8b4c37c2ee 100644 --- a/tox.ini +++ b/tox.ini @@ -185,6 +185,7 @@ commands = mypy \ synapse/federation/federation_client.py \ synapse/federation/sender \ synapse/federation/transport \ + synapse/handlers/auth.py \ synapse/handlers/directory.py \ synapse/handlers/presence.py \ synapse/handlers/sync.py \ From ebfcbbff9c75ee3e3009b04ba5388c33f2d7e8da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 13 Mar 2020 19:09:22 +0000 Subject: [PATCH 263/382] Use innerText instead of innerHTML --- synapse/res/templates/saml_error.html | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/res/templates/saml_error.html b/synapse/res/templates/saml_error.html index 223d3a74bc..bfd6449c5d 100644 --- a/synapse/res/templates/saml_error.html +++ b/synapse/res/templates/saml_error.html @@ -37,7 +37,8 @@ // to print one. let errorDesc = new URLSearchParams(searchStr).get("error_description") if (errorDesc) { - document.getElementById("errormsg").innerHTML = ` ("${errorDesc}")`; + + document.getElementById("errormsg").innerText = ` ("${errorDesc}")`; } From beb19cf61a79e4bfb06b4b1fffd51388b64698ca Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 16 Mar 2020 12:16:30 +0000 Subject: [PATCH 264/382] Fix buggy condition in account validity handler (#7074) --- changelog.d/7074.bugfix | 1 + synapse/handlers/account_validity.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7074.bugfix diff --git a/changelog.d/7074.bugfix b/changelog.d/7074.bugfix new file mode 100644 index 0000000000..38d7455971 --- /dev/null +++ b/changelog.d/7074.bugfix @@ -0,0 +1 @@ +Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 829f52eca1..590135d19c 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -44,7 +44,11 @@ class AccountValidityHandler(object): self._account_validity = self.hs.config.account_validity - if self._account_validity.renew_by_email_enabled and load_jinja2_templates: + if ( + self._account_validity.enabled + and self._account_validity.renew_by_email_enabled + and load_jinja2_templates + ): # Don't do email-specific configuration if renewal by email is disabled. try: app_name = self.hs.config.email_app_name From 7df04ca0e6c4140f4f30720db0b9b5148a865287 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 16 Mar 2020 22:31:47 +0000 Subject: [PATCH 265/382] Populate the room version from state events (#7070) Fixes #7065 This is basically the same as https://github.com/matrix-org/synapse/pull/6847 except it tries to populate events from `state_events` rather than `current_state_events`, since the latter might have been cleared from the state of some rooms too early, leaving them with a `NULL` room version. --- changelog.d/7070.bugfix | 1 + .../57/rooms_version_column_3.sql.postgres | 39 +++++++++++++++++++ .../57/rooms_version_column_3.sql.sqlite | 23 +++++++++++ 3 files changed, 63 insertions(+) create mode 100644 changelog.d/7070.bugfix create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite diff --git a/changelog.d/7070.bugfix b/changelog.d/7070.bugfix new file mode 100644 index 0000000000..9031927546 --- /dev/null +++ b/changelog.d/7070.bugfix @@ -0,0 +1 @@ +Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres new file mode 100644 index 0000000000..92aaadde0d --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.postgres @@ -0,0 +1,39 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- When we first added the room_version column to the rooms table, it was populated from +-- the current_state_events table. However, there was an issue causing a background +-- update to clean up the current_state_events table for rooms where the server is no +-- longer participating, before that column could be populated. Therefore, some rooms had +-- a NULL room_version. + +-- The rooms_version_column_2.sql.* delta files were introduced to make the populating +-- synchronous instead of running it in a background update, which fixed this issue. +-- However, all of the instances of Synapse installed or updated in the meantime got +-- their rooms table corrupted with NULL room_versions. + +-- This query fishes out the room versions from the create event using the state_events +-- table instead of the current_state_events one, as the former still have all of the +-- create events. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json::json->'content'->>'room_version','1') + FROM state_events se INNER JOIN event_json ej USING (event_id) + WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key='' + LIMIT 1 +) WHERE rooms.room_version IS NULL; + +-- see also rooms_version_column_3.sql.sqlite which has a copy of the above query, using +-- sqlite syntax for the json extraction. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite new file mode 100644 index 0000000000..e19dab97cb --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_3.sql.sqlite @@ -0,0 +1,23 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- see rooms_version_column_3.sql.postgres for details of what's going on here. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1') + FROM state_events se INNER JOIN event_json ej USING (event_id) + WHERE se.room_id=rooms.room_id AND se.type='m.room.create' AND se.state_key='' + LIMIT 1 +) WHERE rooms.room_version IS NULL; From 6a35046363a6f5d41199256c80eef4ea7e385986 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 17 Mar 2020 11:25:01 +0000 Subject: [PATCH 266/382] Revert "Add options to disable setting profile info for prevent changes. (#7053)" This reverts commit 54dd28621b070ca67de9f773fe9a89e1f4dc19da, reversing changes made to 6640460d054e8f4444046a34bdf638921b31c01e. --- changelog.d/7053.feature | 1 - docs/sample_config.yaml | 13 - synapse/config/registration.py | 17 -- synapse/handlers/profile.py | 16 -- synapse/rest/client/v2_alpha/account.py | 16 -- tests/handlers/test_profile.py | 33 +-- tests/rest/client/v2_alpha/test_account.py | 303 --------------------- 7 files changed, 1 insertion(+), 398 deletions(-) delete mode 100644 changelog.d/7053.feature diff --git a/changelog.d/7053.feature b/changelog.d/7053.feature deleted file mode 100644 index 00f47b2a14..0000000000 --- a/changelog.d/7053.feature +++ /dev/null @@ -1 +0,0 @@ -Add options to prevent users from changing their profile or associated 3PIDs. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 91eff4c8ad..2ff0dd05a2 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1057,19 +1057,6 @@ account_threepid_delegates: #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process -# If disabled, don't let users set their own display names/avatars -# (unless they are a server admin) other than for the very first time. -# Useful when provisioning users based on the contents of a 3rd party -# directory and to avoid ambiguities. -# -#enable_set_displayname: true -#enable_set_avatar_url: true - -# If false, stop users from trying to change the 3PIDs associated with -# their accounts. -# -#enable_3pid_changes: true - # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/config/registration.py b/synapse/config/registration.py index ee737eb40d..9bb3beedbc 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -129,10 +129,6 @@ class RegistrationConfig(Config): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) - self.enable_set_displayname = config.get("enable_set_displayname", True) - self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) - self.enable_3pid_changes = config.get("enable_3pid_changes", True) - self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False ) @@ -334,19 +330,6 @@ class RegistrationConfig(Config): #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - # If disabled, don't let users set their own display names/avatars - # (unless they are a server admin) other than for the very first time. - # Useful when provisioning users based on the contents of a 3rd party - # directory and to avoid ambiguities. - # - #enable_set_displayname: true - #enable_set_avatar_url: true - - # If false, stop users from trying to change the 3PIDs associated with - # their accounts. - # - #enable_3pid_changes: true - # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6aa1c0f5e0..50ce0c585b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -157,15 +157,6 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") - if not by_admin and not self.hs.config.enable_set_displayname: - profile = yield self.store.get_profileinfo(target_user.localpart) - if profile.display_name: - raise SynapseError( - 400, - "Changing display name is disabled on this server", - Codes.FORBIDDEN, - ) - if len(new_displayname) > MAX_DISPLAYNAME_LEN: raise SynapseError( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) @@ -227,13 +218,6 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") - if not by_admin and not self.hs.config.enable_set_avatar_url: - profile = yield self.store.get_profileinfo(target_user.localpart) - if profile.avatar_url: - raise SynapseError( - 400, "Changing avatar is disabled on this server", Codes.FORBIDDEN - ) - if len(new_avatar_url) > MAX_AVATAR_URL_LEN: raise SynapseError( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e40136f2f3..dc837d6c75 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -599,11 +599,6 @@ class ThreepidRestServlet(RestServlet): return 200, {"threepids": threepids} async def on_POST(self, request): - if not self.hs.config.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -648,11 +643,6 @@ class ThreepidAddRestServlet(RestServlet): @interactive_auth_handler async def on_POST(self, request): - if not self.hs.config.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -748,16 +738,10 @@ class ThreepidDeleteRestServlet(RestServlet): def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() - self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() async def on_POST(self, request): - if not self.hs.config.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - body = parse_json_object_from_request(request) assert_params_in_dict(body, ["medium", "address"]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index e600b9777b..d60c124eec 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -19,7 +19,7 @@ from mock import Mock, NonCallableMock from twisted.internet import defer import synapse.types -from synapse.api.errors import AuthError, SynapseError +from synapse.api.errors import AuthError from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID @@ -70,7 +70,6 @@ class ProfileTestCase(unittest.TestCase): yield self.store.create_profile(self.frank.localpart) self.handler = hs.get_profile_handler() - self.hs = hs @defer.inlineCallbacks def test_get_my_name(self): @@ -91,19 +90,6 @@ class ProfileTestCase(unittest.TestCase): "Frank Jr.", ) - @defer.inlineCallbacks - def test_set_my_name_if_disabled(self): - self.hs.config.enable_set_displayname = False - - # Set first displayname is allowed, if displayname is null - yield self.store.set_profile_displayname(self.frank.localpart, "Frank") - - d = self.handler.set_displayname( - self.frank, synapse.types.create_requester(self.frank), "Frank Jr." - ) - - yield self.assertFailure(d, SynapseError) - @defer.inlineCallbacks def test_set_my_name_noauth(self): d = self.handler.set_displayname( @@ -161,20 +147,3 @@ class ProfileTestCase(unittest.TestCase): (yield self.store.get_profile_avatar_url(self.frank.localpart)), "http://my.server/pic.gif", ) - - @defer.inlineCallbacks - def test_set_my_avatar_if_disabled(self): - self.hs.config.enable_set_avatar_url = False - - # Set first time avatar is allowed, if avatar is null - yield self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" - ) - - d = self.handler.set_avatar_url( - self.frank, - synapse.types.create_requester(self.frank), - "http://my.server/pic.gif", - ) - - yield self.assertFailure(d, SynapseError) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 99cc9163f3..c3facc00eb 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -24,7 +24,6 @@ import pkg_resources import synapse.rest.admin from synapse.api.constants import LoginType, Membership -from synapse.api.errors import Codes from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register @@ -326,305 +325,3 @@ class DeactivateTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(request.code, 200) - - -class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): - - servlets = [ - account.register_servlets, - login.register_servlets, - synapse.rest.admin.register_servlets_for_client_rest_resource, - ] - - def make_homeserver(self, reactor, clock): - config = self.default_config() - - # Email config. - self.email_attempts = [] - - def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): - self.email_attempts.append(msg) - return - - config["email"] = { - "enable_notifs": False, - "template_dir": os.path.abspath( - pkg_resources.resource_filename("synapse", "res/templates") - ), - "smtp_host": "127.0.0.1", - "smtp_port": 20, - "require_transport_security": False, - "smtp_user": None, - "smtp_pass": None, - "notif_from": "test@example.com", - } - config["public_baseurl"] = "https://example.com" - - self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail) - return self.hs - - def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() - - self.user_id = self.register_user("kermit", "test") - self.user_id_tok = self.login("kermit", "test") - self.email = "test@example.com" - self.url_3pid = b"account/3pid" - - def test_add_email(self): - """Test add mail to profile - """ - client_secret = "foobar" - session_id = self._request_token(self.email, client_secret) - - self.assertEquals(len(self.email_attempts), 1) - link = self._get_link_from_email() - - self._validate_token(link) - - request, channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - - self.render(request) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) - - def test_add_email_if_disabled(self): - """Test add mail to profile if disabled - """ - self.hs.config.enable_3pid_changes = False - - client_secret = "foobar" - session_id = self._request_token(self.email, client_secret) - - self.assertEquals(len(self.email_attempts), 1) - link = self._get_link_from_email() - - self._validate_token(link) - - request, channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - self.render(request) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def test_delete_email(self): - """Test delete mail from profile - """ - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=self.user_id, - medium="email", - address=self.email, - validated_at=0, - added_at=0, - ) - ) - - request, channel = self.make_request( - "POST", - b"account/3pid/delete", - {"medium": "email", "address": self.email}, - access_token=self.user_id_tok, - ) - self.render(request) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def test_delete_email_if_disabled(self): - """Test delete mail from profile if disabled - """ - self.hs.config.enable_3pid_changes = False - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=self.user_id, - medium="email", - address=self.email, - validated_at=0, - added_at=0, - ) - ) - - request, channel = self.make_request( - "POST", - b"account/3pid/delete", - {"medium": "email", "address": self.email}, - access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) - - def test_cant_add_email_without_clicking_link(self): - """Test that we do actually need to click the link in the email - """ - client_secret = "foobar" - session_id = self._request_token(self.email, client_secret) - - self.assertEquals(len(self.email_attempts), 1) - - # Attempt to add email without clicking the link - request, channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - self.render(request) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def test_no_valid_token(self): - """Test that we do actually need to request a token and can't just - make a session up. - """ - client_secret = "foobar" - session_id = "weasle" - - # Attempt to add email without even requesting an email - request, channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - self.render(request) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) - self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) - - # Get user - request, channel = self.make_request( - "GET", self.url_3pid, access_token=self.user_id_tok, - ) - self.render(request) - - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def _request_token(self, email, client_secret): - request, channel = self.make_request( - "POST", - b"account/3pid/email/requestToken", - {"client_secret": client_secret, "email": email, "send_attempt": 1}, - ) - self.render(request) - self.assertEquals(200, channel.code, channel.result) - - return channel.json_body["sid"] - - def _validate_token(self, link): - # Remove the host - path = link.replace("https://example.com", "") - - request, channel = self.make_request("GET", path, shorthand=False) - self.render(request) - self.assertEquals(200, channel.code, channel.result) - - def _get_link_from_email(self): - assert self.email_attempts, "No emails have been sent" - - raw_msg = self.email_attempts[-1].decode("UTF-8") - mail = Parser().parsestr(raw_msg) - - text = None - for part in mail.walk(): - if part.get_content_type() == "text/plain": - text = part.get_payload(decode=True).decode("UTF-8") - break - - if not text: - self.fail("Could not find text portion of email to parse") - - match = re.search(r"https://example.com\S+", text) - assert match, "Could not find link in email" - - return match.group(0) From 60724c46b7dc5300243fd97d5a485564b3e00afe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Mar 2020 07:37:04 -0400 Subject: [PATCH 267/382] Remove special casing of `m.room.aliases` events (#7034) --- changelog.d/7034.removal | 1 + synapse/handlers/room.py | 16 +--------- synapse/rest/client/v1/room.py | 12 -------- tests/rest/admin/test_admin.py | 7 +++++ tests/rest/client/v1/test_directory.py | 41 ++++++++++++++++---------- 5 files changed, 35 insertions(+), 42 deletions(-) create mode 100644 changelog.d/7034.removal diff --git a/changelog.d/7034.removal b/changelog.d/7034.removal new file mode 100644 index 0000000000..be8d20e14f --- /dev/null +++ b/changelog.d/7034.removal @@ -0,0 +1 @@ +Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8ee870f0bb..f580ab2e9f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -292,16 +292,6 @@ class RoomCreationHandler(BaseHandler): except AuthError as e: logger.warning("Unable to update PLs in old room: %s", e) - new_pl_content = copy_power_levels_contents(old_room_pl_state.content) - - # pre-msc2260 rooms may not have the right setting for aliases. If no other - # value is set, set it now. - events_default = new_pl_content.get("events_default", 0) - new_pl_content.setdefault("events", {}).setdefault( - EventTypes.Aliases, events_default - ) - - logger.debug("Setting correct PLs in new room to %s", new_pl_content) yield self.event_creation_handler.create_and_send_nonmember_event( requester, { @@ -309,7 +299,7 @@ class RoomCreationHandler(BaseHandler): "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), - "content": new_pl_content, + "content": old_room_pl_state.content, }, ratelimit=False, ) @@ -814,10 +804,6 @@ class RoomCreationHandler(BaseHandler): EventTypes.RoomHistoryVisibility: 100, EventTypes.CanonicalAlias: 50, EventTypes.RoomAvatar: 50, - # MSC2260: Allow everybody to send alias events by default - # This will be reudundant on pre-MSC2260 rooms, since the - # aliases event is special-cased. - EventTypes.Aliases: 0, EventTypes.Tombstone: 100, EventTypes.ServerACL: 100, }, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 64f51406fb..bffd43de5f 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -189,12 +189,6 @@ class RoomStateEventRestServlet(TransactionRestServlet): content = parse_json_object_from_request(request) - if event_type == EventTypes.Aliases: - # MSC2260 - raise SynapseError( - 400, "Cannot send m.room.aliases events via /rooms/{room_id}/state" - ) - event_dict = { "type": event_type, "content": content, @@ -242,12 +236,6 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) content = parse_json_object_from_request(request) - if event_type == EventTypes.Aliases: - # MSC2260 - raise SynapseError( - 400, "Cannot send m.room.aliases events via /rooms/{room_id}/send" - ) - event_dict = { "type": event_type, "content": content, diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index e5984aaad8..0342aed416 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -868,6 +868,13 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Set this new alias as the canonical alias for this room + self.helper.send_state( + room_id, + "m.room.aliases", + {"aliases": [test_alias]}, + tok=self.admin_user_tok, + state_key="test", + ) self.helper.send_state( room_id, "m.room.canonical_alias", diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 914cf54927..633b7dbda0 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -51,30 +51,26 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.user = self.register_user("user", "test") self.user_tok = self.login("user", "test") - def test_cannot_set_alias_via_state_event(self): - self.ensure_user_joined_room() - url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % ( - self.room_id, - self.hs.hostname, - ) - - data = {"aliases": [self.random_alias(5)]} - request_data = json.dumps(data) - - request, channel = self.make_request( - "PUT", url, request_data, access_token=self.user_tok - ) - self.render(request) - self.assertEqual(channel.code, 400, channel.result) + def test_state_event_not_in_room(self): + self.ensure_user_left_room() + self.set_alias_via_state_event(403) def test_directory_endpoint_not_in_room(self): self.ensure_user_left_room() self.set_alias_via_directory(403) + def test_state_event_in_room_too_long(self): + self.ensure_user_joined_room() + self.set_alias_via_state_event(400, alias_length=256) + def test_directory_in_room_too_long(self): self.ensure_user_joined_room() self.set_alias_via_directory(400, alias_length=256) + def test_state_event_in_room(self): + self.ensure_user_joined_room() + self.set_alias_via_state_event(200) + def test_directory_in_room(self): self.ensure_user_joined_room() self.set_alias_via_directory(200) @@ -106,6 +102,21 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(channel.code, 200, channel.result) + def set_alias_via_state_event(self, expected_code, alias_length=5): + url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % ( + self.room_id, + self.hs.hostname, + ) + + data = {"aliases": [self.random_alias(alias_length)]} + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.user_tok + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + def set_alias_via_directory(self, expected_code, alias_length=5): url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length) data = {"room_id": self.room_id} From 7581d30e9f939263f9ab07644f269b6e7cd2d226 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Mar 2020 08:04:49 -0400 Subject: [PATCH 268/382] Remove unused federation endpoint (`query_auth`) (#7026) --- changelog.d/7026.removal | 1 + synapse/federation/federation_base.py | 82 ------------------------- synapse/federation/federation_client.py | 80 +++++++++++++++++++++++- synapse/federation/federation_server.py | 51 --------------- synapse/federation/transport/server.py | 12 ---- 5 files changed, 80 insertions(+), 146 deletions(-) create mode 100644 changelog.d/7026.removal diff --git a/changelog.d/7026.removal b/changelog.d/7026.removal new file mode 100644 index 0000000000..4c8c563bb0 --- /dev/null +++ b/changelog.d/7026.removal @@ -0,0 +1 @@ +Remove the unused query_auth federation endpoint per MSC2451. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 190ea1fba1..5c991e5412 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -39,10 +39,8 @@ from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, make_deferred_yieldable, - preserve_fn, ) from synapse.types import JsonDict, get_domain_from_id -from synapse.util import unwrapFirstError logger = logging.getLogger(__name__) @@ -57,86 +55,6 @@ class FederationBase(object): self.store = hs.get_datastore() self._clock = hs.get_clock() - @defer.inlineCallbacks - def _check_sigs_and_hash_and_fetch( - self, - origin: str, - pdus: List[EventBase], - room_version: str, - outlier: bool = False, - include_none: bool = False, - ): - """Takes a list of PDUs and checks the signatures and hashs of each - one. If a PDU fails its signature check then we check if we have it in - the database and if not then request if from the originating server of - that PDU. - - If a PDU fails its content hash check then it is redacted. - - The given list of PDUs are not modified, instead the function returns - a new list. - - Args: - origin - pdu - room_version - outlier: Whether the events are outliers or not - include_none: Whether to include None in the returned list - for events that have failed their checks - - Returns: - Deferred : A list of PDUs that have valid signatures and hashes. - """ - deferreds = self._check_sigs_and_hashes(room_version, pdus) - - @defer.inlineCallbacks - def handle_check_result(pdu: EventBase, deferred: Deferred): - try: - res = yield make_deferred_yieldable(deferred) - except SynapseError: - res = None - - if not res: - # Check local db. - res = yield self.store.get_event( - pdu.event_id, allow_rejected=True, allow_none=True - ) - - if not res and pdu.origin != origin: - try: - # This should not exist in the base implementation, until - # this is fixed, ignore it for typing. See issue #6997. - res = yield defer.ensureDeferred( - self.get_pdu( # type: ignore - destinations=[pdu.origin], - event_id=pdu.event_id, - room_version=room_version, - outlier=outlier, - timeout=10000, - ) - ) - except SynapseError: - pass - - if not res: - logger.warning( - "Failed to find copy of %s with valid signature", pdu.event_id - ) - - return res - - handle = preserve_fn(handle_check_result) - deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] - - valid_pdus = yield make_deferred_yieldable( - defer.gatherResults(deferreds2, consumeErrors=True) - ).addErrback(unwrapFirstError) - - if include_none: - return valid_pdus - else: - return [p for p in valid_pdus if p] - def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred: return make_deferred_yieldable( self._check_sigs_and_hashes(room_version, [pdu])[0] diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b5538bc07a..8c6b839478 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -33,6 +33,7 @@ from typing import ( from prometheus_client import Counter from twisted.internet import defer +from twisted.internet.defer import Deferred from synapse.api.constants import EventTypes, Membership from synapse.api.errors import ( @@ -51,7 +52,7 @@ from synapse.api.room_versions import ( ) from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.logging.utils import log_function from synapse.types import JsonDict from synapse.util import unwrapFirstError @@ -345,6 +346,83 @@ class FederationClient(FederationBase): return state_event_ids, auth_event_ids + async def _check_sigs_and_hash_and_fetch( + self, + origin: str, + pdus: List[EventBase], + room_version: str, + outlier: bool = False, + include_none: bool = False, + ) -> List[EventBase]: + """Takes a list of PDUs and checks the signatures and hashs of each + one. If a PDU fails its signature check then we check if we have it in + the database and if not then request if from the originating server of + that PDU. + + If a PDU fails its content hash check then it is redacted. + + The given list of PDUs are not modified, instead the function returns + a new list. + + Args: + origin + pdu + room_version + outlier: Whether the events are outliers or not + include_none: Whether to include None in the returned list + for events that have failed their checks + + Returns: + Deferred : A list of PDUs that have valid signatures and hashes. + """ + deferreds = self._check_sigs_and_hashes(room_version, pdus) + + @defer.inlineCallbacks + def handle_check_result(pdu: EventBase, deferred: Deferred): + try: + res = yield make_deferred_yieldable(deferred) + except SynapseError: + res = None + + if not res: + # Check local db. + res = yield self.store.get_event( + pdu.event_id, allow_rejected=True, allow_none=True + ) + + if not res and pdu.origin != origin: + try: + res = yield defer.ensureDeferred( + self.get_pdu( + destinations=[pdu.origin], + event_id=pdu.event_id, + room_version=room_version, # type: ignore + outlier=outlier, + timeout=10000, + ) + ) + except SynapseError: + pass + + if not res: + logger.warning( + "Failed to find copy of %s with valid signature", pdu.event_id + ) + + return res + + handle = preserve_fn(handle_check_result) + deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] + + valid_pdus = await make_deferred_yieldable( + defer.gatherResults(deferreds2, consumeErrors=True) + ).addErrback(unwrapFirstError) + + if include_none: + return valid_pdus + else: + return [p for p in valid_pdus if p] + async def get_event_auth(self, destination, room_id, event_id): res = await self.transport_layer.get_event_auth(destination, room_id, event_id) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 7f9da49326..275b9c99d7 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -470,57 +470,6 @@ class FederationServer(FederationBase): res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} return 200, res - async def on_query_auth_request(self, origin, content, room_id, event_id): - """ - Content is a dict with keys:: - auth_chain (list): A list of events that give the auth chain. - missing (list): A list of event_ids indicating what the other - side (`origin`) think we're missing. - rejects (dict): A mapping from event_id to a 2-tuple of reason - string and a proof (or None) of why the event was rejected. - The keys of this dict give the list of events the `origin` has - rejected. - - Args: - origin (str) - content (dict) - event_id (str) - - Returns: - Deferred: Results in `dict` with the same format as `content` - """ - with (await self._server_linearizer.queue((origin, room_id))): - origin_host, _ = parse_server_name(origin) - await self.check_server_matches_acl(origin_host, room_id) - - room_version = await self.store.get_room_version(room_id) - - auth_chain = [ - event_from_pdu_json(e, room_version) for e in content["auth_chain"] - ] - - signed_auth = await self._check_sigs_and_hash_and_fetch( - origin, auth_chain, outlier=True, room_version=room_version.identifier - ) - - ret = await self.handler.on_query_auth( - origin, - event_id, - room_id, - signed_auth, - content.get("rejects", []), - content.get("missing", []), - ) - - time_now = self._clock.time_msec() - send_content = { - "auth_chain": [e.get_pdu_json(time_now) for e in ret["auth_chain"]], - "rejects": ret.get("rejects", []), - "missing": ret.get("missing", []), - } - - return 200, send_content - @log_function def on_query_client_keys(self, origin, content): return self.on_query_request("client_keys", content) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 92a9ae2320..af4595498c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -643,17 +643,6 @@ class FederationClientKeysClaimServlet(BaseFederationServlet): return 200, response -class FederationQueryAuthServlet(BaseFederationServlet): - PATH = "/query_auth/(?P[^/]*)/(?P[^/]*)" - - async def on_POST(self, origin, content, query, context, event_id): - new_content = await self.handler.on_query_auth_request( - origin, content, context, event_id - ) - - return 200, new_content - - class FederationGetMissingEventsServlet(BaseFederationServlet): # TODO(paul): Why does this path alone end with "/?" optional? PATH = "/get_missing_events/(?P[^/]*)/?" @@ -1412,7 +1401,6 @@ FEDERATION_SERVLET_CLASSES = ( FederationV2SendLeaveServlet, FederationV1InviteServlet, FederationV2InviteServlet, - FederationQueryAuthServlet, FederationGetMissingEventsServlet, FederationEventAuthServlet, FederationClientKeysQueryServlet, From 5e477c1debfd932ced56ec755204d6ead4ce8ec8 Mon Sep 17 00:00:00 2001 From: The Stranjer <791672+TheStranjer@users.noreply.github.com> Date: Tue, 17 Mar 2020 09:29:09 -0400 Subject: [PATCH 269/382] Set charset to utf-8 when adding headers for certain text content types (#7044) Fixes #7043 --- changelog.d/7044.bugfix | 1 + synapse/rest/media/v1/_base.py | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7044.bugfix diff --git a/changelog.d/7044.bugfix b/changelog.d/7044.bugfix new file mode 100644 index 0000000000..790088ddb4 --- /dev/null +++ b/changelog.d/7044.bugfix @@ -0,0 +1 @@ +Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index ba28dd089d..503f2bed98 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -30,6 +30,22 @@ from synapse.util.stringutils import is_ascii logger = logging.getLogger(__name__) +# list all text content types that will have the charset default to UTF-8 when +# none is given +TEXT_CONTENT_TYPES = [ + "text/css", + "text/csv", + "text/html", + "text/calendar", + "text/plain", + "text/javascript", + "application/json", + "application/ld+json", + "application/rtf", + "image/svg+xml", + "text/xml", +] + def parse_media_id(request): try: @@ -96,7 +112,14 @@ def add_file_headers(request, media_type, file_size, upload_name): def _quote(x): return urllib.parse.quote(x.encode("utf-8")) - request.setHeader(b"Content-Type", media_type.encode("UTF-8")) + # Default to a UTF-8 charset for text content types. + # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16' + if media_type.lower() in TEXT_CONTENT_TYPES: + content_type = media_type + "; charset=UTF-8" + else: + content_type = media_type + + request.setHeader(b"Content-Type", content_type.encode("UTF-8")) if upload_name: # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. # From 4ce50519cdfe482ef5833488295f0235ad9fe0a9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 17 Mar 2020 18:08:43 +0000 Subject: [PATCH 270/382] Update postgres.md fix broken link --- docs/postgres.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/postgres.md b/docs/postgres.md index e0793ecee8..ca7ef1cf3a 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -72,8 +72,7 @@ underneath the database, or if a different version of the locale is used on any replicas. The safest way to fix the issue is to take a dump and recreate the database with -the correct `COLLATE` and `CTYPE` parameters (as per -[docs/postgres.md](docs/postgres.md)). It is also possible to change the +the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the parameters on a live database and run a `REINDEX` on the entire database, however extreme care must be taken to avoid database corruption. From c37db0211e36cd298426ff8811e547b0acd10bf4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 17 Mar 2020 22:32:25 +0100 Subject: [PATCH 271/382] Share SSL contexts for non-federation requests (#7094) Extends #5794 etc to the SimpleHttpClient so that it also applies to non-federation requests. Fixes #7092. --- changelog.d/7094.misc | 1 + synapse/crypto/context_factory.py | 68 ++++++++++++------- synapse/http/client.py | 3 - .../federation/matrix_federation_agent.py | 2 +- synapse/server.py | 6 +- tests/config/test_tls.py | 29 +++++--- .../test_matrix_federation_agent.py | 6 +- 7 files changed, 71 insertions(+), 44 deletions(-) create mode 100644 changelog.d/7094.misc diff --git a/changelog.d/7094.misc b/changelog.d/7094.misc new file mode 100644 index 0000000000..aa093ee3c0 --- /dev/null +++ b/changelog.d/7094.misc @@ -0,0 +1 @@ +Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index e93f0b3705..a5a2a7815d 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -75,7 +75,7 @@ class ServerContextFactory(ContextFactory): @implementer(IPolicyForHTTPS) -class ClientTLSOptionsFactory(object): +class FederationPolicyForHTTPS(object): """Factory for Twisted SSLClientConnectionCreators that are used to make connections to remote servers for federation. @@ -103,15 +103,15 @@ class ClientTLSOptionsFactory(object): # let us do). minTLS = _TLS_VERSION_MAP[config.federation_client_minimum_tls_version] - self._verify_ssl = CertificateOptions( + _verify_ssl = CertificateOptions( trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS ) - self._verify_ssl_context = self._verify_ssl.getContext() - self._verify_ssl_context.set_info_callback(self._context_info_cb) + self._verify_ssl_context = _verify_ssl.getContext() + self._verify_ssl_context.set_info_callback(_context_info_cb) - self._no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS) - self._no_verify_ssl_context = self._no_verify_ssl.getContext() - self._no_verify_ssl_context.set_info_callback(self._context_info_cb) + _no_verify_ssl = CertificateOptions(insecurelyLowerMinimumTo=minTLS) + self._no_verify_ssl_context = _no_verify_ssl.getContext() + self._no_verify_ssl_context.set_info_callback(_context_info_cb) def get_options(self, host: bytes): @@ -136,23 +136,6 @@ class ClientTLSOptionsFactory(object): return SSLClientConnectionCreator(host, ssl_context, should_verify) - @staticmethod - def _context_info_cb(ssl_connection, where, ret): - """The 'information callback' for our openssl context object.""" - # we assume that the app_data on the connection object has been set to - # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator) - tls_protocol = ssl_connection.get_app_data() - try: - # ... we further assume that SSLClientConnectionCreator has set the - # '_synapse_tls_verifier' attribute to a ConnectionVerifier object. - tls_protocol._synapse_tls_verifier.verify_context_info_cb( - ssl_connection, where - ) - except: # noqa: E722, taken from the twisted implementation - logger.exception("Error during info_callback") - f = Failure() - tls_protocol.failVerification(f) - def creatorForNetloc(self, hostname, port): """Implements the IPolicyForHTTPS interace so that this can be passed directly to agents. @@ -160,6 +143,43 @@ class ClientTLSOptionsFactory(object): return self.get_options(hostname) +@implementer(IPolicyForHTTPS) +class RegularPolicyForHTTPS(object): + """Factory for Twisted SSLClientConnectionCreators that are used to make connections + to remote servers, for other than federation. + + Always uses the same OpenSSL context object, which uses the default OpenSSL CA + trust root. + """ + + def __init__(self): + trust_root = platformTrust() + self._ssl_context = CertificateOptions(trustRoot=trust_root).getContext() + self._ssl_context.set_info_callback(_context_info_cb) + + def creatorForNetloc(self, hostname, port): + return SSLClientConnectionCreator(hostname, self._ssl_context, True) + + +def _context_info_cb(ssl_connection, where, ret): + """The 'information callback' for our openssl context objects. + + Note: Once this is set as the info callback on a Context object, the Context should + only be used with the SSLClientConnectionCreator. + """ + # we assume that the app_data on the connection object has been set to + # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator) + tls_protocol = ssl_connection.get_app_data() + try: + # ... we further assume that SSLClientConnectionCreator has set the + # '_synapse_tls_verifier' attribute to a ConnectionVerifier object. + tls_protocol._synapse_tls_verifier.verify_context_info_cb(ssl_connection, where) + except: # noqa: E722, taken from the twisted implementation + logger.exception("Error during info_callback") + f = Failure() + tls_protocol.failVerification(f) + + @implementer(IOpenSSLClientConnectionCreator) class SSLClientConnectionCreator(object): """Creates openssl connection objects for client connections. diff --git a/synapse/http/client.py b/synapse/http/client.py index d4c285445e..3797545824 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -244,9 +244,6 @@ class SimpleHttpClient(object): pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5)) pool.cachedConnectionTimeout = 2 * 60 - # The default context factory in Twisted 14.0.0 (which we require) is - # BrowserLikePolicyForHTTPS which will do regular cert validation - # 'like a browser' self.agent = ProxyAgent( self.reactor, connectTimeout=15, diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 647d26dc56..f5f917f5ae 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -45,7 +45,7 @@ class MatrixFederationAgent(object): Args: reactor (IReactor): twisted reactor to use for underlying requests - tls_client_options_factory (ClientTLSOptionsFactory|None): + tls_client_options_factory (FederationPolicyForHTTPS|None): factory to use for fetching client tls options, or none to disable TLS. _srv_resolver (SrvResolver|None): diff --git a/synapse/server.py b/synapse/server.py index fd2f69e928..1b980371de 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -26,7 +26,6 @@ import logging import os from twisted.mail.smtp import sendmail -from twisted.web.client import BrowserLikePolicyForHTTPS from synapse.api.auth import Auth from synapse.api.filtering import Filtering @@ -35,6 +34,7 @@ from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory +from synapse.crypto.context_factory import RegularPolicyForHTTPS from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.spamcheck import SpamChecker @@ -310,7 +310,7 @@ class HomeServer(object): return ( InsecureInterceptableContextFactory() if self.config.use_insecure_ssl_client_just_for_testing_do_not_use - else BrowserLikePolicyForHTTPS() + else RegularPolicyForHTTPS() ) def build_simple_http_client(self): @@ -420,7 +420,7 @@ class HomeServer(object): return PusherPool(self) def build_http_client(self): - tls_client_options_factory = context_factory.ClientTLSOptionsFactory( + tls_client_options_factory = context_factory.FederationPolicyForHTTPS( self.config ) return MatrixFederationHttpClient(self, tls_client_options_factory) diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index 1be6ff563b..ec32d4b1ca 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -23,7 +23,7 @@ from OpenSSL import SSL from synapse.config._base import Config, RootConfig from synapse.config.tls import ConfigError, TlsConfig -from synapse.crypto.context_factory import ClientTLSOptionsFactory +from synapse.crypto.context_factory import FederationPolicyForHTTPS from tests.unittest import TestCase @@ -180,12 +180,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= t = TestConfig() t.read_config(config, config_dir_path="", data_dir_path="") - cf = ClientTLSOptionsFactory(t) + cf = FederationPolicyForHTTPS(t) + options = _get_ssl_context_options(cf._verify_ssl_context) # The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2 - self.assertNotEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0) - self.assertNotEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0) - self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0) + self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0) + self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0) + self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0) def test_tls_client_minimum_set_passed_through_1_0(self): """ @@ -195,12 +196,13 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= t = TestConfig() t.read_config(config, config_dir_path="", data_dir_path="") - cf = ClientTLSOptionsFactory(t) + cf = FederationPolicyForHTTPS(t) + options = _get_ssl_context_options(cf._verify_ssl_context) # The context has not had any of the NO_TLS set. - self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0) - self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0) - self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0) + self.assertEqual(options & SSL.OP_NO_TLSv1, 0) + self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0) + self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0) def test_acme_disabled_in_generated_config_no_acme_domain_provied(self): """ @@ -273,7 +275,7 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= t = TestConfig() t.read_config(config, config_dir_path="", data_dir_path="") - cf = ClientTLSOptionsFactory(t) + cf = FederationPolicyForHTTPS(t) # Not in the whitelist opts = cf.get_options(b"notexample.com") @@ -282,3 +284,10 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= # Caught by the wildcard opts = cf.get_options(idna.encode("テスト.ドメイン.テスト")) self.assertFalse(opts._verifier._verify_certs) + + +def _get_ssl_context_options(ssl_context: SSL.Context) -> int: + """get the options bits from an openssl context object""" + # the OpenSSL.SSL.Context wrapper doesn't expose get_options, so we have to + # use the low-level interface + return SSL._lib.SSL_CTX_get_options(ssl_context._context) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index cfcd98ff7d..fdc1d918ff 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -31,7 +31,7 @@ from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS from synapse.config.homeserver import HomeServerConfig -from synapse.crypto.context_factory import ClientTLSOptionsFactory +from synapse.crypto.context_factory import FederationPolicyForHTTPS from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.http.federation.srv_resolver import Server from synapse.http.federation.well_known_resolver import ( @@ -79,7 +79,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self._config = config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") - self.tls_factory = ClientTLSOptionsFactory(config) + self.tls_factory = FederationPolicyForHTTPS(config) self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) @@ -715,7 +715,7 @@ class MatrixFederationAgentTests(unittest.TestCase): config = default_config("test", parse=True) # Build a new agent and WellKnownResolver with a different tls factory - tls_factory = ClientTLSOptionsFactory(config) + tls_factory = FederationPolicyForHTTPS(config) agent = MatrixFederationAgent( reactor=self.reactor, tls_client_options_factory=tls_factory, From 6d110ddea4b4c300a1d062442da060d021a280cf Mon Sep 17 00:00:00 2001 From: Richard von Kellner Date: Tue, 17 Mar 2020 22:48:23 +0100 Subject: [PATCH 272/382] Update INSTALL.md updated CentOS8 install instructions (#6925) --- INSTALL.md | 13 +++++++++++-- changelog.d/6925.doc | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6925.doc diff --git a/INSTALL.md b/INSTALL.md index ffb82bdcc3..c0926ba590 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -124,12 +124,21 @@ sudo pacman -S base-devel python python-pip \ #### CentOS/Fedora -Installing prerequisites on CentOS 7 or Fedora 25: +Installing prerequisites on CentOS 8 or Fedora>26: + +``` +sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ + libwebp-devel tk-devel redhat-rpm-config \ + python3-virtualenv libffi-devel openssl-devel +sudo dnf groupinstall "Development Tools" +``` + +Installing prerequisites on CentOS 7 or Fedora<=25: ``` sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \ - python-virtualenv libffi-devel openssl-devel + python3-virtualenv libffi-devel openssl-devel sudo yum groupinstall "Development Tools" ``` diff --git a/changelog.d/6925.doc b/changelog.d/6925.doc new file mode 100644 index 0000000000..b8e6c73630 --- /dev/null +++ b/changelog.d/6925.doc @@ -0,0 +1 @@ +Updated CentOS8 install instructions. Contributed by Richard Kellner. From 6e6476ef07c2d72fbea85603f2eb2a61a6866732 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 18 Mar 2020 10:13:55 +0000 Subject: [PATCH 273/382] Comments from review --- synapse/app/generic_worker.py | 3 +++ synapse/replication/slave/storage/devices.py | 3 +++ synapse/storage/data_stores/main/devices.py | 27 ++++++++++++++------ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d596852419..cdc078cf11 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -775,6 +775,9 @@ class FederationSenderHandler(object): # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: + # The entities are either user IDs (starting with '@') whose devices + # have changed, or remote servers that we need to tell about + # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: self.federation_sender.send_device_messages(host) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 01a4f85884..23b1650e41 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -72,6 +72,9 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto def _invalidate_caches_for_devices(self, token, rows): for row in rows: + # The entities are either user IDs (starting with '@') whose devices + # have changed, or remote servers that we need to tell about + # changes. if row.entity.startswith("@"): self._device_list_stream_cache.entity_has_changed(row.entity, token) self.get_cached_devices_for_user.invalidate((row.entity,)) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 06e1d9f033..4c19c02bbc 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Tuple from six import iteritems @@ -31,7 +32,7 @@ from synapse.logging.opentracing import ( ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause -from synapse.storage.database import Database +from synapse.storage.database import Database, LoggingTransaction from synapse.types import Collection, get_verify_key_from_cross_signing_key from synapse.util.caches.descriptors import ( Cache, @@ -574,10 +575,12 @@ class DeviceWorkerStore(SQLBaseStore): else: return set() - def get_all_device_list_changes_for_remotes(self, from_key, to_key): - """Return a list of `(stream_id, user_id, destination)` which is the - combined list of changes to devices, and which destinations need to be - poked. `destination` may be None if no destinations need to be poked. + async def get_all_device_list_changes_for_remotes( + self, from_key: int, to_key: int + ) -> List[Tuple[int, str]]: + """Return a list of `(stream_id, entity)` which is the combined list of + changes to devices and which destinations need to be poked. Entity is + either a user ID (starting with '@') or a remote destination. """ # This query Does The Right Thing where it'll correctly apply the @@ -591,7 +594,7 @@ class DeviceWorkerStore(SQLBaseStore): WHERE ? < stream_id AND stream_id <= ? """ - return self.db.execute( + return await self.db.execute( "get_all_device_list_changes_for_remotes", None, sql, from_key, to_key ) @@ -1018,11 +1021,19 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): return stream_ids[-1] - def _add_device_change_to_stream_txn(self, txn, user_id, device_ids, stream_ids): + def _add_device_change_to_stream_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_ids: Collection[str], + stream_ids: List[str], + ): txn.call_after( self._device_list_stream_cache.entity_has_changed, user_id, stream_ids[-1], ) + min_stream_id = stream_ids[0] + # Delete older entries in the table, as we really only care about # when the latest change happened. txn.executemany( @@ -1030,7 +1041,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): DELETE FROM device_lists_stream WHERE user_id = ? AND device_id = ? AND stream_id < ? """, - [(user_id, device_id, stream_ids[0]) for device_id in device_ids], + [(user_id, device_id, min_stream_id) for device_id in device_ids], ) self.db.simple_insert_many_txn( From 88b41986dbc54e8601ad4d889f4ebff952858b4f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 18 Mar 2020 07:50:00 -0400 Subject: [PATCH 274/382] Add an option to the set password API to choose whether to logout other devices. (#7085) --- changelog.d/7085.feature | 1 + docs/admin_api/user_admin_api.rst | 6 +++- synapse/handlers/set_password.py | 39 +++++++++++++++---------- synapse/rest/admin/users.py | 6 ++-- synapse/rest/client/v2_alpha/account.py | 5 +++- 5 files changed, 38 insertions(+), 19 deletions(-) create mode 100644 changelog.d/7085.feature diff --git a/changelog.d/7085.feature b/changelog.d/7085.feature new file mode 100644 index 0000000000..df6d0f990d --- /dev/null +++ b/changelog.d/7085.feature @@ -0,0 +1 @@ +Add an optional parameter to control whether other sessions are logged out when a user's password is modified. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 6b02d963e6..9ce10119ff 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -38,6 +38,7 @@ The parameter ``threepids`` is optional. The parameter ``avatar_url`` is optional. The parameter ``admin`` is optional and defaults to 'false'. The parameter ``deactivated`` is optional and defaults to 'false'. +The parameter ``password`` is optional. If provided the user's password is updated and all devices are logged out. If the user already exists then optional parameters default to the current value. List Accounts @@ -168,11 +169,14 @@ with a body of: .. code:: json { - "new_password": "" + "new_password": "", + "logout_devices": true, } including an ``access_token`` of a server admin. +The parameter ``new_password`` is required. +The parameter ``logout_devices`` is optional and defaults to ``true``. Get whether a user is a server administrator or not =================================================== diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index d90c9e0108..12657ca698 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Optional from twisted.internet import defer from synapse.api.errors import Codes, StoreError, SynapseError +from synapse.types import Requester from ._base import BaseHandler @@ -32,14 +34,17 @@ class SetPasswordHandler(BaseHandler): self._device_handler = hs.get_device_handler() @defer.inlineCallbacks - def set_password(self, user_id, newpassword, requester=None): + def set_password( + self, + user_id: str, + new_password: str, + logout_devices: bool, + requester: Optional[Requester] = None, + ): if not self.hs.config.password_localdb_enabled: raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) - password_hash = yield self._auth_handler.hash(newpassword) - - except_device_id = requester.device_id if requester else None - except_access_token_id = requester.access_token_id if requester else None + password_hash = yield self._auth_handler.hash(new_password) try: yield self.store.user_set_password_hash(user_id, password_hash) @@ -48,14 +53,18 @@ class SetPasswordHandler(BaseHandler): raise SynapseError(404, "Unknown user", Codes.NOT_FOUND) raise e - # we want to log out all of the user's other sessions. First delete - # all his other devices. - yield self._device_handler.delete_all_devices_for_user( - user_id, except_device_id=except_device_id - ) + # Optionally, log out all of the user's other sessions. + if logout_devices: + except_device_id = requester.device_id if requester else None + except_access_token_id = requester.access_token_id if requester else None - # and now delete any access tokens which weren't associated with - # devices (or were associated with this device). - yield self._auth_handler.delete_access_tokens_for_user( - user_id, except_token_id=except_access_token_id - ) + # First delete all of their other devices. + yield self._device_handler.delete_all_devices_for_user( + user_id, except_device_id=except_device_id + ) + + # and now delete any access tokens which weren't associated with + # devices (or were associated with this device). + yield self._auth_handler.delete_access_tokens_for_user( + user_id, except_token_id=except_access_token_id + ) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 80f959248d..8551ac19b8 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -221,8 +221,9 @@ class UserRestServletV2(RestServlet): raise SynapseError(400, "Invalid password") else: new_password = body["password"] + logout_devices = True await self.set_password_handler.set_password( - target_user.to_string(), new_password, requester + target_user.to_string(), new_password, logout_devices, requester ) if "deactivated" in body: @@ -536,9 +537,10 @@ class ResetPasswordRestServlet(RestServlet): params = parse_json_object_from_request(request) assert_params_in_dict(params, ["new_password"]) new_password = params["new_password"] + logout_devices = params.get("logout_devices", True) await self._set_password_handler.set_password( - target_user_id, new_password, requester + target_user_id, new_password, logout_devices, requester ) return 200, {} diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index dc837d6c75..631cc74cb4 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -265,8 +265,11 @@ class PasswordRestServlet(RestServlet): assert_params_in_dict(params, ["new_password"]) new_password = params["new_password"] + logout_devices = params.get("logout_devices", True) - await self._set_password_handler.set_password(user_id, new_password, requester) + await self._set_password_handler.set_password( + user_id, new_password, logout_devices, requester + ) return 200, {} From 4a17a647a9508b70de35130fd82e3e21474270a9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 18 Mar 2020 16:46:41 +0000 Subject: [PATCH 275/382] Improve get auth chain difference algorithm. (#7095) It was originally implemented by pulling the full auth chain of all state sets out of the database and doing set comparison. However, that can take a lot work if the state and auth chains are large. Instead, lets try and fetch the auth chains at the same time and calculate the difference on the fly, allowing us to bail early if all the auth chains converge. Assuming that the auth chains do converge more often than not, this should improve performance. Hopefully. --- changelog.d/7095.misc | 1 + synapse/state/__init__.py | 28 +--- synapse/state/v2.py | 32 +--- .../data_stores/main/event_federation.py | 150 ++++++++++++++++- tests/state/test_v2.py | 13 +- tests/storage/test_event_federation.py | 157 ++++++++++++++++-- 6 files changed, 310 insertions(+), 71 deletions(-) create mode 100644 changelog.d/7095.misc diff --git a/changelog.d/7095.misc b/changelog.d/7095.misc new file mode 100644 index 0000000000..44fc9f616f --- /dev/null +++ b/changelog.d/7095.misc @@ -0,0 +1 @@ +Attempt to improve performance of state res v2 algorithm. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index df7a4f6a89..4afefc6b1d 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -662,28 +662,16 @@ class StateResolutionStore(object): allow_rejected=allow_rejected, ) - def get_auth_chain(self, event_ids: List[str], ignore_events: Set[str]): - """Gets the full auth chain for a set of events (including rejected - events). - - Includes the given event IDs in the result. - - Note that: - 1. All events must be state events. - 2. For v1 rooms this may not have the full auth chain in the - presence of rejected events - - Args: - event_ids: The event IDs of the events to fetch the auth chain for. - Must be state events. - ignore_events: Set of events to exclude from the returned auth - chain. + def get_auth_chain_difference(self, state_sets: List[Set[str]]): + """Given sets of state events figure out the auth chain difference (as + per state res v2 algorithm). + This equivalent to fetching the full auth chain for each set of state + and returning the events that don't appear in each and every auth + chain. Returns: - Deferred[list[str]]: List of event IDs of the auth chain. + Deferred[Set[str]]: Set of event IDs. """ - return self.store.get_auth_chain_ids( - event_ids, include_given=True, ignore_events=ignore_events, - ) + return self.store.get_auth_chain_difference(state_sets) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 0ffe6d8c14..18484e2fa6 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -227,36 +227,12 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): Returns: Deferred[set[str]]: Set of event IDs """ - common = set(itervalues(state_sets[0])).intersection( - *(itervalues(s) for s in state_sets[1:]) + + difference = yield state_res_store.get_auth_chain_difference( + [set(state_set.values()) for state_set in state_sets] ) - auth_sets = [] - for state_set in state_sets: - auth_ids = { - eid - for key, eid in iteritems(state_set) - if ( - key[0] in (EventTypes.Member, EventTypes.ThirdPartyInvite) - or key - in ( - (EventTypes.PowerLevels, ""), - (EventTypes.Create, ""), - (EventTypes.JoinRules, ""), - ) - ) - and eid not in common - } - - auth_chain = yield state_res_store.get_auth_chain(auth_ids, common) - auth_ids.update(auth_chain) - - auth_sets.append(auth_ids) - - intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) - union = set().union(*auth_sets) - - return union - intersection + return difference def _seperate(state_sets): diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py index 49a7b8b433..62d4e9f599 100644 --- a/synapse/storage/data_stores/main/event_federation.py +++ b/synapse/storage/data_stores/main/event_federation.py @@ -14,7 +14,7 @@ # limitations under the License. import itertools import logging -from typing import List, Optional, Set +from typing import Dict, List, Optional, Set, Tuple from six.moves.queue import Empty, PriorityQueue @@ -103,6 +103,154 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas return list(results) + def get_auth_chain_difference(self, state_sets: List[Set[str]]): + """Given sets of state events figure out the auth chain difference (as + per state res v2 algorithm). + + This equivalent to fetching the full auth chain for each set of state + and returning the events that don't appear in each and every auth + chain. + + Returns: + Deferred[Set[str]] + """ + + return self.db.runInteraction( + "get_auth_chain_difference", + self._get_auth_chain_difference_txn, + state_sets, + ) + + def _get_auth_chain_difference_txn( + self, txn, state_sets: List[Set[str]] + ) -> Set[str]: + + # Algorithm Description + # ~~~~~~~~~~~~~~~~~~~~~ + # + # The idea here is to basically walk the auth graph of each state set in + # tandem, keeping track of which auth events are reachable by each state + # set. If we reach an auth event we've already visited (via a different + # state set) then we mark that auth event and all ancestors as reachable + # by the state set. This requires that we keep track of the auth chains + # in memory. + # + # Doing it in a such a way means that we can stop early if all auth + # events we're currently walking are reachable by all state sets. + # + # *Note*: We can't stop walking an event's auth chain if it is reachable + # by all state sets. This is because other auth chains we're walking + # might be reachable only via the original auth chain. For example, + # given the following auth chain: + # + # A -> C -> D -> E + # / / + # B -´---------´ + # + # and state sets {A} and {B} then walking the auth chains of A and B + # would immediately show that C is reachable by both. However, if we + # stopped at C then we'd only reach E via the auth chain of B and so E + # would errornously get included in the returned difference. + # + # The other thing that we do is limit the number of auth chains we walk + # at once, due to practical limits (i.e. we can only query the database + # with a limited set of parameters). We pick the auth chains we walk + # each iteration based on their depth, in the hope that events with a + # lower depth are likely reachable by those with higher depths. + # + # We could use any ordering that we believe would give a rough + # topological ordering, e.g. origin server timestamp. If the ordering + # chosen is not topological then the algorithm still produces the right + # result, but perhaps a bit more inefficiently. This is why it is safe + # to use "depth" here. + + initial_events = set(state_sets[0]).union(*state_sets[1:]) + + # Dict from events in auth chains to which sets *cannot* reach them. + # I.e. if the set is empty then all sets can reach the event. + event_to_missing_sets = { + event_id: {i for i, a in enumerate(state_sets) if event_id not in a} + for event_id in initial_events + } + + # We need to get the depth of the initial events for sorting purposes. + sql = """ + SELECT depth, event_id FROM events + WHERE %s + ORDER BY depth ASC + """ + clause, args = make_in_list_sql_clause( + txn.database_engine, "event_id", initial_events + ) + txn.execute(sql % (clause,), args) + + # The sorted list of events whose auth chains we should walk. + search = txn.fetchall() # type: List[Tuple[int, str]] + + # Map from event to its auth events + event_to_auth_events = {} # type: Dict[str, Set[str]] + + base_sql = """ + SELECT a.event_id, auth_id, depth + FROM event_auth AS a + INNER JOIN events AS e ON (e.event_id = a.auth_id) + WHERE + """ + + while search: + # Check whether all our current walks are reachable by all state + # sets. If so we can bail. + if all(not event_to_missing_sets[eid] for _, eid in search): + break + + # Fetch the auth events and their depths of the N last events we're + # currently walking + search, chunk = search[:-100], search[-100:] + clause, args = make_in_list_sql_clause( + txn.database_engine, "a.event_id", [e_id for _, e_id in chunk] + ) + txn.execute(base_sql + clause, args) + + for event_id, auth_event_id, auth_event_depth in txn: + event_to_auth_events.setdefault(event_id, set()).add(auth_event_id) + + sets = event_to_missing_sets.get(auth_event_id) + if sets is None: + # First time we're seeing this event, so we add it to the + # queue of things to fetch. + search.append((auth_event_depth, auth_event_id)) + + # Assume that this event is unreachable from any of the + # state sets until proven otherwise + sets = event_to_missing_sets[auth_event_id] = set( + range(len(state_sets)) + ) + else: + # We've previously seen this event, so look up its auth + # events and recursively mark all ancestors as reachable + # by the current event's state set. + a_ids = event_to_auth_events.get(auth_event_id) + while a_ids: + new_aids = set() + for a_id in a_ids: + event_to_missing_sets[a_id].intersection_update( + event_to_missing_sets[event_id] + ) + + b = event_to_auth_events.get(a_id) + if b: + new_aids.update(b) + + a_ids = new_aids + + # Mark that the auth event is reachable by the approriate sets. + sets.intersection_update(event_to_missing_sets[event_id]) + + search.sort() + + # Return all events where not all sets can reach them. + return {eid for eid, n in event_to_missing_sets.items() if n} + def get_oldest_events_in_room(self, room_id): return self.db.runInteraction( "get_oldest_events_in_room", self._get_oldest_events_in_room_txn, room_id diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 5059ade850..a44960203e 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -603,7 +603,7 @@ class TestStateResolutionStore(object): return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} - def get_auth_chain(self, event_ids, ignore_events): + def _get_auth_chain(self, event_ids): """Gets the full auth chain for a set of events (including rejected events). @@ -617,9 +617,6 @@ class TestStateResolutionStore(object): Args: event_ids (list): The event IDs of the events to fetch the auth chain for. Must be state events. - ignore_events: Set of events to exclude from the returned auth - chain. - Returns: Deferred[list[str]]: List of event IDs of the auth chain. """ @@ -629,7 +626,7 @@ class TestStateResolutionStore(object): stack = list(event_ids) while stack: event_id = stack.pop() - if event_id in result or event_id in ignore_events: + if event_id in result: continue result.add(event_id) @@ -639,3 +636,9 @@ class TestStateResolutionStore(object): stack.append(aid) return list(result) + + def get_auth_chain_difference(self, auth_sets): + chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] + + common = set(chains[0]).intersection(*chains[1:]) + return set(chains[0]).union(*chains[1:]) - common diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index a331517f4d..3aeec0dc0f 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -13,19 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - import tests.unittest import tests.utils -class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) +class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() - @defer.inlineCallbacks def test_get_prev_events_for_room(self): room_id = "@ROOM:local" @@ -61,15 +56,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): ) for i in range(0, 20): - yield self.store.db.runInteraction("insert", insert_event, i) + self.get_success(self.store.db.runInteraction("insert", insert_event, i)) # this should get the last ten - r = yield self.store.get_prev_events_for_room(room_id) + r = self.get_success(self.store.get_prev_events_for_room(room_id)) self.assertEqual(10, len(r)) for i in range(0, 10): self.assertEqual("$event_%i:local" % (19 - i), r[i]) - @defer.inlineCallbacks def test_get_rooms_with_many_extremities(self): room1 = "#room1" room2 = "#room2" @@ -86,25 +80,154 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): ) for i in range(0, 20): - yield self.store.db.runInteraction("insert", insert_event, i, room1) - yield self.store.db.runInteraction("insert", insert_event, i, room2) - yield self.store.db.runInteraction("insert", insert_event, i, room3) + self.get_success( + self.store.db.runInteraction("insert", insert_event, i, room1) + ) + self.get_success( + self.store.db.runInteraction("insert", insert_event, i, room2) + ) + self.get_success( + self.store.db.runInteraction("insert", insert_event, i, room3) + ) # Test simple case - r = yield self.store.get_rooms_with_many_extremities(5, 5, []) + r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, [])) self.assertEqual(len(r), 3) # Does filter work? - r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1]) + r = self.get_success(self.store.get_rooms_with_many_extremities(5, 5, [room1])) self.assertTrue(room2 in r) self.assertTrue(room3 in r) self.assertEqual(len(r), 2) - r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1, room2]) + r = self.get_success( + self.store.get_rooms_with_many_extremities(5, 5, [room1, room2]) + ) self.assertEqual(r, [room3]) # Does filter and limit work? - r = yield self.store.get_rooms_with_many_extremities(5, 1, [room1]) + r = self.get_success(self.store.get_rooms_with_many_extremities(5, 1, [room1])) self.assertTrue(r == [room2] or r == [room3]) + + def test_auth_difference(self): + room_id = "@ROOM:local" + + # The silly auth graph we use to test the auth difference algorithm, + # where the top are the most recent events. + # + # A B + # \ / + # D E + # \ | + # ` F C + # | /| + # G ´ | + # | \ | + # H I + # | | + # K J + + auth_graph = { + "a": ["e"], + "b": ["e"], + "c": ["g", "i"], + "d": ["f"], + "e": ["f"], + "f": ["g"], + "g": ["h", "i"], + "h": ["k"], + "i": ["j"], + "k": [], + "j": [], + } + + depth_map = { + "a": 7, + "b": 7, + "c": 4, + "d": 6, + "e": 6, + "f": 5, + "g": 3, + "h": 2, + "i": 2, + "k": 1, + "j": 1, + } + + # We rudely fiddle with the appropriate tables directly, as that's much + # easier than constructing events properly. + + def insert_event(txn, event_id, stream_ordering): + + depth = depth_map[event_id] + + self.store.db.simple_insert_txn( + txn, + table="events", + values={ + "event_id": event_id, + "room_id": room_id, + "depth": depth, + "topological_ordering": depth, + "type": "m.test", + "processed": True, + "outlier": False, + "stream_ordering": stream_ordering, + }, + ) + + self.store.db.simple_insert_many_txn( + txn, + table="event_auth", + values=[ + {"event_id": event_id, "room_id": room_id, "auth_id": a} + for a in auth_graph[event_id] + ], + ) + + next_stream_ordering = 0 + for event_id in auth_graph: + next_stream_ordering += 1 + self.get_success( + self.store.db.runInteraction( + "insert", insert_event, event_id, next_stream_ordering + ) + ) + + # Now actually test that various combinations give the right result: + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a"}, {"b"}]) + ) + self.assertSetEqual(difference, {"a", "b"}) + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a"}, {"b"}, {"c"}]) + ) + self.assertSetEqual(difference, {"a", "b", "c", "e", "f"}) + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a", "c"}, {"b"}]) + ) + self.assertSetEqual(difference, {"a", "b", "c"}) + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a"}, {"b"}, {"d"}]) + ) + self.assertSetEqual(difference, {"a", "b", "d", "e"}) + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a"}, {"b"}, {"c"}, {"d"}]) + ) + self.assertSetEqual(difference, {"a", "b", "c", "d", "e", "f"}) + + difference = self.get_success( + self.store.get_auth_chain_difference([{"a"}, {"b"}, {"e"}]) + ) + self.assertSetEqual(difference, {"a", "b"}) + + difference = self.get_success(self.store.get_auth_chain_difference([{"a"}])) + self.assertSetEqual(difference, set()) From 443162e57724c34099215732eda690ea25cb1e4c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Mar 2020 10:48:45 +0100 Subject: [PATCH 276/382] Move pusherpool startup into _base.setup (#7104) This should be safe to do on all workers/masters because it is guarded by a config option which will ensure it is only actually done on the worker assigned as a pusher. --- changelog.d/7104.misc | 1 + synapse/app/_base.py | 1 + synapse/app/homeserver.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7104.misc diff --git a/changelog.d/7104.misc b/changelog.d/7104.misc new file mode 100644 index 0000000000..ec5c004bbe --- /dev/null +++ b/changelog.d/7104.misc @@ -0,0 +1 @@ +Merge worker apps together. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 9ffd23c6df..4d84f4595a 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -276,6 +276,7 @@ def start(hs, listeners=None): # It is now safe to start your Synapse. hs.start_listening(listeners) hs.get_datastore().db.start_profiling() + hs.get_pusherpool().start() setup_sentry(hs) setup_sdnotify(hs) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e0fdddfdc9..f2b56a636f 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -408,7 +408,6 @@ def setup(config_options): _base.start(hs, config.listeners) - hs.get_pusherpool().start() hs.get_datastore().db.updates.start_doing_background_updates() except Exception: # Print the exception and bail out. From 8c75667ad7810b4c05e40f7665e724a40aaf4d64 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Mar 2020 11:00:24 +0100 Subject: [PATCH 277/382] Add prometheus metrics for the number of active pushers (#7103) --- changelog.d/7103.feature | 1 + synapse/metrics/__init__.py | 12 ++++++---- synapse/metrics/background_process_metrics.py | 5 ++-- synapse/push/pusherpool.py | 24 ++++++++++++++++++- tox.ini | 2 ++ 5 files changed, 36 insertions(+), 8 deletions(-) create mode 100644 changelog.d/7103.feature diff --git a/changelog.d/7103.feature b/changelog.d/7103.feature new file mode 100644 index 0000000000..413e7f29d7 --- /dev/null +++ b/changelog.d/7103.feature @@ -0,0 +1 @@ +Add prometheus metrics for the number of active pushers. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 0dba997a23..d2fd29acb4 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -20,7 +20,7 @@ import os import platform import threading import time -from typing import Dict, Union +from typing import Callable, Dict, Iterable, Optional, Tuple, Union import six @@ -59,10 +59,12 @@ class RegistryProxy(object): @attr.s(hash=True) class LaterGauge(object): - name = attr.ib() - desc = attr.ib() - labels = attr.ib(hash=False) - caller = attr.ib() + name = attr.ib(type=str) + desc = attr.ib(type=str) + labels = attr.ib(hash=False, type=Optional[Iterable[str]]) + # callback: should either return a value (if there are no labels for this metric), + # or dict mapping from a label tuple to a value + caller = attr.ib(type=Callable[[], Union[Dict[Tuple[str, ...], float], float]]) def collect(self): diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index b65bcd8806..8449ef82f7 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -17,6 +17,7 @@ import logging import threading from asyncio import iscoroutine from functools import wraps +from typing import Dict, Set import six @@ -80,13 +81,13 @@ _background_process_db_sched_duration = Counter( # map from description to a counter, so that we can name our logcontexts # incrementally. (It actually duplicates _background_process_start_count, but # it's much simpler to do so than to try to combine them.) -_background_process_counts = {} # type: dict[str, int] +_background_process_counts = {} # type: Dict[str, int] # map from description to the currently running background processes. # # it's kept as a dict of sets rather than a big set so that we can keep track # of process descriptions that no longer have any active processes. -_background_processes = {} # type: dict[str, set[_BackgroundProcess]] +_background_processes = {} # type: Dict[str, Set[_BackgroundProcess]] # A lock that covers the above dicts _bg_metrics_lock = threading.Lock() diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 01789a9fb4..bf721759df 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -15,11 +15,16 @@ # limitations under the License. import logging +from collections import defaultdict +from typing import Dict, Tuple, Union from twisted.internet import defer +from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.push import PusherConfigException +from synapse.push.emailpusher import EmailPusher +from synapse.push.httppusher import HttpPusher from synapse.push.pusher import PusherFactory from synapse.util.async_helpers import concurrently_execute @@ -47,7 +52,24 @@ class PusherPool: self._should_start_pushers = _hs.config.start_pushers self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() - self.pushers = {} + + # map from user id to app_id:pushkey to pusher + self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] + + def count_pushers(): + results = defaultdict(int) # type: Dict[Tuple[str, str], int] + for pushers in self.pushers.values(): + for pusher in pushers.values(): + k = (type(pusher).__name__, pusher.app_id) + results[k] += 1 + return results + + LaterGauge( + name="synapse_pushers", + desc="the number of active pushers", + labels=["kind", "app_id"], + caller=count_pushers, + ) def start(self): """Starts the pushers off in a background process. diff --git a/tox.ini b/tox.ini index 8b4c37c2ee..8e3f09e638 100644 --- a/tox.ini +++ b/tox.ini @@ -191,7 +191,9 @@ commands = mypy \ synapse/handlers/sync.py \ synapse/handlers/ui_auth \ synapse/logging/ \ + synapse/metrics \ synapse/module_api \ + synapse/push/pusherpool.py \ synapse/replication \ synapse/rest \ synapse/spam_checker_api \ From e913823a220b89a205a09efe53116fab435dfdfb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Mar 2020 11:28:49 +0100 Subject: [PATCH 278/382] Fix concurrent modification errors in pusher metrics (#7106) add a lock to try to make this metric actually work --- changelog.d/7106.feature | 1 + synapse/push/pusherpool.py | 27 ++++++++++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 changelog.d/7106.feature diff --git a/changelog.d/7106.feature b/changelog.d/7106.feature new file mode 100644 index 0000000000..413e7f29d7 --- /dev/null +++ b/changelog.d/7106.feature @@ -0,0 +1 @@ +Add prometheus metrics for the number of active pushers. diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index bf721759df..88d203aa44 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -16,6 +16,7 @@ import logging from collections import defaultdict +from threading import Lock from typing import Dict, Tuple, Union from twisted.internet import defer @@ -56,12 +57,17 @@ class PusherPool: # map from user id to app_id:pushkey to pusher self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] + # a lock for the pushers dict, since `count_pushers` is called from an different + # and we otherwise get concurrent modification errors + self._pushers_lock = Lock() + def count_pushers(): results = defaultdict(int) # type: Dict[Tuple[str, str], int] - for pushers in self.pushers.values(): - for pusher in pushers.values(): - k = (type(pusher).__name__, pusher.app_id) - results[k] += 1 + with self._pushers_lock: + for pushers in self.pushers.values(): + for pusher in pushers.values(): + k = (type(pusher).__name__, pusher.app_id) + results[k] += 1 return results LaterGauge( @@ -293,11 +299,12 @@ class PusherPool: return appid_pushkey = "%s:%s" % (pusherdict["app_id"], pusherdict["pushkey"]) - byuser = self.pushers.setdefault(pusherdict["user_name"], {}) - if appid_pushkey in byuser: - byuser[appid_pushkey].on_stop() - byuser[appid_pushkey] = p + with self._pushers_lock: + byuser = self.pushers.setdefault(pusherdict["user_name"], {}) + if appid_pushkey in byuser: + byuser[appid_pushkey].on_stop() + byuser[appid_pushkey] = p # Check if there *may* be push to process. We do this as this check is a # lot cheaper to do than actually fetching the exact rows we need to @@ -326,7 +333,9 @@ class PusherPool: if appid_pushkey in byuser: logger.info("Stopping pusher %s / %s", user_id, appid_pushkey) byuser[appid_pushkey].on_stop() - del byuser[appid_pushkey] + with self._pushers_lock: + del byuser[appid_pushkey] + yield self.store.delete_pusher_by_app_id_pushkey_user_id( app_id, pushkey, user_id ) From 782b811789b697b822a4daca67c455efeb7a60d4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Mar 2020 10:45:40 +0000 Subject: [PATCH 279/382] update grafana dashboard --- contrib/grafana/synapse.json | 245 ++++++++++++++++++++++++++++++----- 1 file changed, 213 insertions(+), 32 deletions(-) diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 5b1bfd1679..656a442597 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -18,7 +18,7 @@ "gnetId": null, "graphTooltip": 0, "id": 1, - "iteration": 1561447718159, + "iteration": 1584612489167, "links": [ { "asDropdown": true, @@ -34,6 +34,7 @@ "panels": [ { "collapsed": false, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -52,12 +53,14 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, "y": 1 }, + "hiddenSeries": false, "id": 75, "legend": { "avg": false, @@ -72,7 +75,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -151,6 +156,7 @@ "editable": true, "error": false, "fill": 1, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 9, @@ -158,6 +164,7 @@ "x": 12, "y": 1 }, + "hiddenSeries": false, "id": 33, "legend": { "avg": false, @@ -172,7 +179,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -302,12 +311,14 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, "y": 10 }, + "hiddenSeries": false, "id": 107, "legend": { "avg": false, @@ -322,7 +333,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -425,12 +438,14 @@ "dashes": false, "datasource": "$datasource", "fill": 0, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, "y": 19 }, + "hiddenSeries": false, "id": 118, "legend": { "avg": false, @@ -445,7 +460,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -542,6 +559,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -1361,6 +1379,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -1732,6 +1751,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -2439,6 +2459,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -2635,6 +2656,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -2650,11 +2672,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 61 + "y": 33 }, "id": 79, "legend": { @@ -2670,6 +2693,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -2684,8 +2710,13 @@ "expr": "sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "txn rate", + "legendFormat": "successful txn rate", "refId": "A" + }, + { + "expr": "sum(rate(synapse_util_metrics_block_count{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))", + "legendFormat": "failed txn rate", + "refId": "B" } ], "thresholds": [], @@ -2736,11 +2767,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 61 + "y": 33 }, "id": 83, "legend": { @@ -2756,6 +2788,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -2829,11 +2864,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 70 + "y": 42 }, "id": 109, "legend": { @@ -2849,6 +2885,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -2923,11 +2962,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 70 + "y": 42 }, "id": 111, "legend": { @@ -2943,6 +2983,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3009,6 +3052,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -3024,12 +3068,14 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { - "h": 7, + "h": 8, "w": 12, "x": 0, - "y": 62 + "y": 34 }, + "hiddenSeries": false, "id": 51, "legend": { "avg": false, @@ -3044,6 +3090,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3112,6 +3161,95 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "hiddenSeries": false, + "id": 134, + "legend": { + "avg": false, + "current": false, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})", + "legendFormat": "{{kind}} {{app_id}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active pusher instances by app", + "tooltip": { + "shared": false, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, @@ -3120,6 +3258,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -3523,6 +3662,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -3540,6 +3680,7 @@ "editable": true, "error": false, "fill": 1, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 13, @@ -3562,6 +3703,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3630,6 +3774,7 @@ "editable": true, "error": false, "fill": 1, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 13, @@ -3652,6 +3797,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3720,6 +3868,7 @@ "editable": true, "error": false, "fill": 1, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 13, @@ -3742,6 +3891,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3810,6 +3962,7 @@ "editable": true, "error": false, "fill": 1, + "fillGradient": 0, "grid": {}, "gridPos": { "h": 13, @@ -3832,6 +3985,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -3921,6 +4077,7 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -4010,6 +4167,7 @@ "linewidth": 2, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -4076,6 +4234,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -4540,6 +4699,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -5060,6 +5220,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -5079,7 +5240,7 @@ "h": 7, "w": 12, "x": 0, - "y": 67 + "y": 39 }, "id": 2, "legend": { @@ -5095,6 +5256,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5198,7 +5360,7 @@ "h": 7, "w": 12, "x": 12, - "y": 67 + "y": 39 }, "id": 41, "legend": { @@ -5214,6 +5376,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5286,7 +5449,7 @@ "h": 7, "w": 12, "x": 0, - "y": 74 + "y": 46 }, "id": 42, "legend": { @@ -5302,6 +5465,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5373,7 +5537,7 @@ "h": 7, "w": 12, "x": 12, - "y": 74 + "y": 46 }, "id": 43, "legend": { @@ -5389,6 +5553,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5460,7 +5625,7 @@ "h": 7, "w": 12, "x": 0, - "y": 81 + "y": 53 }, "id": 113, "legend": { @@ -5476,6 +5641,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5546,7 +5712,7 @@ "h": 7, "w": 12, "x": 12, - "y": 81 + "y": 53 }, "id": 115, "legend": { @@ -5562,6 +5728,7 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": {}, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5573,7 +5740,7 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{reason_type}}", @@ -5628,6 +5795,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -5643,11 +5811,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 13 + "y": 40 }, "id": 67, "legend": { @@ -5663,7 +5832,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5679,7 +5850,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} ", + "legendFormat": "{{job}}-{{index}} {{name}}", "refId": "A" } ], @@ -5731,11 +5902,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 13 + "y": 40 }, "id": 71, "legend": { @@ -5751,7 +5923,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5819,11 +5993,12 @@ "dashes": false, "datasource": "$datasource", "fill": 1, + "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 22 + "y": 49 }, "id": 121, "interval": "", @@ -5840,7 +6015,9 @@ "linewidth": 1, "links": [], "nullPointMode": "connected", - "options": {}, + "options": { + "dataLinks": [] + }, "paceLength": 10, "percentage": false, "pointradius": 5, @@ -5909,6 +6086,7 @@ }, { "collapsed": true, + "datasource": null, "gridPos": { "h": 1, "w": 24, @@ -6607,7 +6785,7 @@ } ], "refresh": "5m", - "schemaVersion": 18, + "schemaVersion": 22, "style": "dark", "tags": [ "matrix" @@ -6616,7 +6794,7 @@ "list": [ { "current": { - "tags": [], + "selected": true, "text": "Prometheus", "value": "Prometheus" }, @@ -6638,6 +6816,7 @@ "auto_count": 100, "auto_min": "30s", "current": { + "selected": false, "text": "auto", "value": "$__auto_interval_bucket_size" }, @@ -6719,9 +6898,9 @@ "allFormat": "regex wildcard", "allValue": "", "current": { - "text": "All", + "text": "synapse", "value": [ - "$__all" + "synapse" ] }, "datasource": "$datasource", @@ -6751,7 +6930,9 @@ "allValue": ".*", "current": { "text": "All", - "value": "$__all" + "value": [ + "$__all" + ] }, "datasource": "$datasource", "definition": "", @@ -6810,5 +6991,5 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 10 + "version": 19 } \ No newline at end of file From e43e78b985c586133fedd9779eaf19e1a16ad68b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Mar 2020 11:07:16 +0000 Subject: [PATCH 280/382] 1.12.0rc1 --- CHANGES.md | 77 ++++++++++++++++++++++++++++++++++++++++ changelog.d/6309.misc | 1 - changelog.d/6315.feature | 1 - changelog.d/6572.bugfix | 1 - changelog.d/6615.misc | 1 - changelog.d/6874.misc | 1 - changelog.d/6875.misc | 1 - changelog.d/6925.doc | 1 - changelog.d/6941.removal | 1 - changelog.d/6952.misc | 1 - changelog.d/6953.misc | 1 - changelog.d/6954.misc | 1 - changelog.d/6956.misc | 1 - changelog.d/6957.misc | 1 - changelog.d/6962.bugfix | 1 - changelog.d/6964.misc | 1 - changelog.d/6965.feature | 1 - changelog.d/6966.removal | 1 - changelog.d/6967.bugfix | 1 - changelog.d/6968.bugfix | 1 - changelog.d/6970.removal | 1 - changelog.d/6971.feature | 1 - changelog.d/6979.misc | 1 - changelog.d/6982.feature | 1 - changelog.d/6983.misc | 1 - changelog.d/6984.docker | 1 - changelog.d/6985.misc | 1 - changelog.d/6986.feature | 1 - changelog.d/6987.misc | 1 - changelog.d/6990.bugfix | 1 - changelog.d/6991.misc | 1 - changelog.d/6995.misc | 1 - changelog.d/7002.misc | 1 - changelog.d/7003.misc | 1 - changelog.d/7015.misc | 1 - changelog.d/7018.bugfix | 1 - changelog.d/7019.misc | 1 - changelog.d/7020.misc | 1 - changelog.d/7026.removal | 1 - changelog.d/7030.feature | 1 - changelog.d/7034.removal | 1 - changelog.d/7035.bugfix | 1 - changelog.d/7037.feature | 1 - changelog.d/7044.bugfix | 1 - changelog.d/7045.misc | 1 - changelog.d/7048.doc | 1 - changelog.d/7055.misc | 1 - changelog.d/7058.feature | 1 - changelog.d/7063.misc | 1 - changelog.d/7066.bugfix | 1 - changelog.d/7067.feature | 1 - changelog.d/7070.bugfix | 1 - changelog.d/7074.bugfix | 1 - changelog.d/7085.feature | 1 - changelog.d/7094.misc | 1 - changelog.d/7095.misc | 1 - changelog.d/7103.feature | 1 - changelog.d/7104.misc | 1 - changelog.d/7106.feature | 1 - synapse/__init__.py | 2 +- 60 files changed, 78 insertions(+), 59 deletions(-) delete mode 100644 changelog.d/6309.misc delete mode 100644 changelog.d/6315.feature delete mode 100644 changelog.d/6572.bugfix delete mode 100644 changelog.d/6615.misc delete mode 100644 changelog.d/6874.misc delete mode 100644 changelog.d/6875.misc delete mode 100644 changelog.d/6925.doc delete mode 100644 changelog.d/6941.removal delete mode 100644 changelog.d/6952.misc delete mode 100644 changelog.d/6953.misc delete mode 100644 changelog.d/6954.misc delete mode 100644 changelog.d/6956.misc delete mode 100644 changelog.d/6957.misc delete mode 100644 changelog.d/6962.bugfix delete mode 100644 changelog.d/6964.misc delete mode 100644 changelog.d/6965.feature delete mode 100644 changelog.d/6966.removal delete mode 100644 changelog.d/6967.bugfix delete mode 100644 changelog.d/6968.bugfix delete mode 100644 changelog.d/6970.removal delete mode 100644 changelog.d/6971.feature delete mode 100644 changelog.d/6979.misc delete mode 100644 changelog.d/6982.feature delete mode 100644 changelog.d/6983.misc delete mode 100644 changelog.d/6984.docker delete mode 100644 changelog.d/6985.misc delete mode 100644 changelog.d/6986.feature delete mode 100644 changelog.d/6987.misc delete mode 100644 changelog.d/6990.bugfix delete mode 100644 changelog.d/6991.misc delete mode 100644 changelog.d/6995.misc delete mode 100644 changelog.d/7002.misc delete mode 100644 changelog.d/7003.misc delete mode 100644 changelog.d/7015.misc delete mode 100644 changelog.d/7018.bugfix delete mode 100644 changelog.d/7019.misc delete mode 100644 changelog.d/7020.misc delete mode 100644 changelog.d/7026.removal delete mode 100644 changelog.d/7030.feature delete mode 100644 changelog.d/7034.removal delete mode 100644 changelog.d/7035.bugfix delete mode 100644 changelog.d/7037.feature delete mode 100644 changelog.d/7044.bugfix delete mode 100644 changelog.d/7045.misc delete mode 100644 changelog.d/7048.doc delete mode 100644 changelog.d/7055.misc delete mode 100644 changelog.d/7058.feature delete mode 100644 changelog.d/7063.misc delete mode 100644 changelog.d/7066.bugfix delete mode 100644 changelog.d/7067.feature delete mode 100644 changelog.d/7070.bugfix delete mode 100644 changelog.d/7074.bugfix delete mode 100644 changelog.d/7085.feature delete mode 100644 changelog.d/7094.misc delete mode 100644 changelog.d/7095.misc delete mode 100644 changelog.d/7103.feature delete mode 100644 changelog.d/7104.misc delete mode 100644 changelog.d/7106.feature diff --git a/CHANGES.md b/CHANGES.md index dc9ca05ad1..18ffcea4cd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,80 @@ +Synapse 1.12.0rc1 (2020-03-19) +============================== + +Features +-------- + +- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432): + - Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. ([\#6965](https://github.com/matrix-org/synapse/issues/6965)) + - Validate the alt_aliases property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971)) + - Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986)) + - Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037)) + - Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941)) + - Synapse no longer uses room alias events to calculate room names for email notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966)) + - The room list endpoint no longer returns a list of aliases. ([\#6970](https://github.com/matrix-org/synapse/issues/6970)) + - Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. ([\#7034](https://github.com/matrix-org/synapse/issues/7034)) +- Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. ([\#6315](https://github.com/matrix-org/synapse/issues/6315)) +- Check that server_name is correctly set before running database updates. ([\#6982](https://github.com/matrix-org/synapse/issues/6982)) +- Break down monthly active users by `appservice_id` and emit via Prometheus. ([\#7030](https://github.com/matrix-org/synapse/issues/7030)) +- Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. ([\#7058](https://github.com/matrix-org/synapse/issues/7058), [\#7067](https://github.com/matrix-org/synapse/issues/7067)) +- Add an optional parameter to control whether other sessions are logged out when a user's password is modified. ([\#7085](https://github.com/matrix-org/synapse/issues/7085)) +- Add prometheus metrics for the number of active pushers. ([\#7103](https://github.com/matrix-org/synapse/issues/7103), [\#7106](https://github.com/matrix-org/synapse/issues/7106)) +- Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. ([\#7094](https://github.com/matrix-org/synapse/issues/7094)) + + +Bugfixes +-------- + +- When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. ([\#6572](https://github.com/matrix-org/synapse/issues/6572)) +- Fix a couple of bugs in email configuration handling. ([\#6962](https://github.com/matrix-org/synapse/issues/6962)) +- Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. ([\#6967](https://github.com/matrix-org/synapse/issues/6967)) +- Fix `duplicate key` error which was logged when rejoining a room over federation. ([\#6968](https://github.com/matrix-org/synapse/issues/6968)) +- Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. ([\#6990](https://github.com/matrix-org/synapse/issues/6990)) +- Fix py35-old CI by using native tox package. ([\#7018](https://github.com/matrix-org/synapse/issues/7018)) +- Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. ([\#7035](https://github.com/matrix-org/synapse/issues/7035)) +- Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. ([\#7044](https://github.com/matrix-org/synapse/issues/7044)) +- Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. ([\#7066](https://github.com/matrix-org/synapse/issues/7066)) +- Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. ([\#7070](https://github.com/matrix-org/synapse/issues/7070)) +- Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. ([\#7074](https://github.com/matrix-org/synapse/issues/7074)) + + +Improved Documentation +---------------------- + +- Updated CentOS8 install instructions. Contributed by Richard Kellner. ([\#6925](https://github.com/matrix-org/synapse/issues/6925)) +- Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. ([\#6984](https://github.com/matrix-org/synapse/issues/6984)) +- Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. ([\#7048](https://github.com/matrix-org/synapse/issues/7048)) + + +Deprecations and Removals +------------------------- + +- Remove the unused query_auth federation endpoint per MSC2451. ([\#7026](https://github.com/matrix-org/synapse/issues/7026)) + + +Internal Changes +---------------- + +- Add type hints to `logging/context.py`. ([\#6309](https://github.com/matrix-org/synapse/issues/6309)) +- Add some clarifications to `README.md` in the database schema directory. ([\#6615](https://github.com/matrix-org/synapse/issues/6615)) +- Refactoring work in preparation for changing the event redaction algorithm. ([\#6874](https://github.com/matrix-org/synapse/issues/6874), [\#6875](https://github.com/matrix-org/synapse/issues/6875), [\#6983](https://github.com/matrix-org/synapse/issues/6983), [\#7003](https://github.com/matrix-org/synapse/issues/7003)) +- Improve performance of v2 state resolution for large rooms. ([\#6952](https://github.com/matrix-org/synapse/issues/6952), [\#7095](https://github.com/matrix-org/synapse/issues/7095)) +- Reduce time spent doing GC, by freezing objects on startup. ([\#6953](https://github.com/matrix-org/synapse/issues/6953)) +- Minor perfermance fixes to `get_auth_chain_ids`. ([\#6954](https://github.com/matrix-org/synapse/issues/6954)) +- Don't record remote cross-signing keys in the `devices` table. ([\#6956](https://github.com/matrix-org/synapse/issues/6956)) +- Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. ([\#6957](https://github.com/matrix-org/synapse/issues/6957)) +- Merge worker apps together. ([\#6964](https://github.com/matrix-org/synapse/issues/6964), [\#7002](https://github.com/matrix-org/synapse/issues/7002), [\#7055](https://github.com/matrix-org/synapse/issues/7055), [\#7104](https://github.com/matrix-org/synapse/issues/7104)) +- Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. ([\#6979](https://github.com/matrix-org/synapse/issues/6979)) +- Update warning for incorrect database collation/ctype to include link to documentation. ([\#6985](https://github.com/matrix-org/synapse/issues/6985)) +- Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987)) +- Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019)) +- Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995)) +- Change date in [INSTALL.md#tls-certificates] for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015)) +- Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020)) +- Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045)) +- Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063)) + + Synapse 1.11.1 (2020-03-03) =========================== diff --git a/changelog.d/6309.misc b/changelog.d/6309.misc deleted file mode 100644 index 1aa7294617..0000000000 --- a/changelog.d/6309.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `logging/context.py`. diff --git a/changelog.d/6315.feature b/changelog.d/6315.feature deleted file mode 100644 index c5377dd1e9..0000000000 --- a/changelog.d/6315.feature +++ /dev/null @@ -1 +0,0 @@ -Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. diff --git a/changelog.d/6572.bugfix b/changelog.d/6572.bugfix deleted file mode 100644 index 4f708f409f..0000000000 --- a/changelog.d/6572.bugfix +++ /dev/null @@ -1 +0,0 @@ -When a user's profile is updated via the admin API, also generate a displayname/avatar update for that user in each room. diff --git a/changelog.d/6615.misc b/changelog.d/6615.misc deleted file mode 100644 index 9f93152565..0000000000 --- a/changelog.d/6615.misc +++ /dev/null @@ -1 +0,0 @@ -Add some clarifications to `README.md` in the database schema directory. diff --git a/changelog.d/6874.misc b/changelog.d/6874.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6874.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6875.misc b/changelog.d/6875.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6875.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6925.doc b/changelog.d/6925.doc deleted file mode 100644 index b8e6c73630..0000000000 --- a/changelog.d/6925.doc +++ /dev/null @@ -1 +0,0 @@ -Updated CentOS8 install instructions. Contributed by Richard Kellner. diff --git a/changelog.d/6941.removal b/changelog.d/6941.removal deleted file mode 100644 index 8573be84b3..0000000000 --- a/changelog.d/6941.removal +++ /dev/null @@ -1 +0,0 @@ -Stop sending m.room.aliases events during room creation and upgrade. diff --git a/changelog.d/6952.misc b/changelog.d/6952.misc deleted file mode 100644 index e26dc5cab8..0000000000 --- a/changelog.d/6952.misc +++ /dev/null @@ -1 +0,0 @@ -Improve perf of v2 state res for large rooms. diff --git a/changelog.d/6953.misc b/changelog.d/6953.misc deleted file mode 100644 index 0ab52041cf..0000000000 --- a/changelog.d/6953.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce time spent doing GC by freezing objects on startup. diff --git a/changelog.d/6954.misc b/changelog.d/6954.misc deleted file mode 100644 index 8b84ce2f19..0000000000 --- a/changelog.d/6954.misc +++ /dev/null @@ -1 +0,0 @@ -Minor perf fixes to `get_auth_chain_ids`. diff --git a/changelog.d/6956.misc b/changelog.d/6956.misc deleted file mode 100644 index 5cb0894182..0000000000 --- a/changelog.d/6956.misc +++ /dev/null @@ -1 +0,0 @@ -Don't record remote cross-signing keys in the `devices` table. diff --git a/changelog.d/6957.misc b/changelog.d/6957.misc deleted file mode 100644 index 4f98030110..0000000000 --- a/changelog.d/6957.misc +++ /dev/null @@ -1 +0,0 @@ -Use flake8-comprehensions to enforce good hygiene of list/set/dict comprehensions. diff --git a/changelog.d/6962.bugfix b/changelog.d/6962.bugfix deleted file mode 100644 index 9f5229d400..0000000000 --- a/changelog.d/6962.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a couple of bugs in email configuration handling. diff --git a/changelog.d/6964.misc b/changelog.d/6964.misc deleted file mode 100644 index ec5c004bbe..0000000000 --- a/changelog.d/6964.misc +++ /dev/null @@ -1 +0,0 @@ -Merge worker apps together. diff --git a/changelog.d/6965.feature b/changelog.d/6965.feature deleted file mode 100644 index 6ad9956e40..0000000000 --- a/changelog.d/6965.feature +++ /dev/null @@ -1 +0,0 @@ -Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. diff --git a/changelog.d/6966.removal b/changelog.d/6966.removal deleted file mode 100644 index 69673d9139..0000000000 --- a/changelog.d/6966.removal +++ /dev/null @@ -1 +0,0 @@ -Synapse no longer uses room alias events to calculate room names for email notifications. diff --git a/changelog.d/6967.bugfix b/changelog.d/6967.bugfix deleted file mode 100644 index b65f80cf1d..0000000000 --- a/changelog.d/6967.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an issue affecting worker-based deployments where replication would stop working, necessitating a full restart, after joining a large room. diff --git a/changelog.d/6968.bugfix b/changelog.d/6968.bugfix deleted file mode 100644 index 9965bfc0c3..0000000000 --- a/changelog.d/6968.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `duplicate key` error which was logged when rejoining a room over federation. diff --git a/changelog.d/6970.removal b/changelog.d/6970.removal deleted file mode 100644 index 89bd363b95..0000000000 --- a/changelog.d/6970.removal +++ /dev/null @@ -1 +0,0 @@ -The room list endpoint no longer returns a list of aliases. diff --git a/changelog.d/6971.feature b/changelog.d/6971.feature deleted file mode 100644 index ccf02a61df..0000000000 --- a/changelog.d/6971.feature +++ /dev/null @@ -1 +0,0 @@ -Validate the alt_aliases property of canonical alias events. diff --git a/changelog.d/6979.misc b/changelog.d/6979.misc deleted file mode 100644 index c57b398c2f..0000000000 --- a/changelog.d/6979.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `store_room` call from `FederationHandler._process_received_pdu`. diff --git a/changelog.d/6982.feature b/changelog.d/6982.feature deleted file mode 100644 index 934cc5141a..0000000000 --- a/changelog.d/6982.feature +++ /dev/null @@ -1 +0,0 @@ -Check that server_name is correctly set before running database updates. diff --git a/changelog.d/6983.misc b/changelog.d/6983.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6983.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6984.docker b/changelog.d/6984.docker deleted file mode 100644 index 84a55e1267..0000000000 --- a/changelog.d/6984.docker +++ /dev/null @@ -1 +0,0 @@ -Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. diff --git a/changelog.d/6985.misc b/changelog.d/6985.misc deleted file mode 100644 index ba367fa9af..0000000000 --- a/changelog.d/6985.misc +++ /dev/null @@ -1 +0,0 @@ -Update warning for incorrect database collation/ctype to include link to documentation. diff --git a/changelog.d/6986.feature b/changelog.d/6986.feature deleted file mode 100644 index 16dea8bd7f..0000000000 --- a/changelog.d/6986.feature +++ /dev/null @@ -1 +0,0 @@ -Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. diff --git a/changelog.d/6987.misc b/changelog.d/6987.misc deleted file mode 100644 index 7ff74cda55..0000000000 --- a/changelog.d/6987.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type annotations to the database storage classes. diff --git a/changelog.d/6990.bugfix b/changelog.d/6990.bugfix deleted file mode 100644 index 8c1c48f4d4..0000000000 --- a/changelog.d/6990.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent user from setting 'deactivated' to anything other than a bool on the v2 PUT /users Admin API. \ No newline at end of file diff --git a/changelog.d/6991.misc b/changelog.d/6991.misc deleted file mode 100644 index 5130f4e8af..0000000000 --- a/changelog.d/6991.misc +++ /dev/null @@ -1 +0,0 @@ -Port `synapse.handlers.presence` to async/await. diff --git a/changelog.d/6995.misc b/changelog.d/6995.misc deleted file mode 100644 index 884b4cf4ee..0000000000 --- a/changelog.d/6995.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type annotations to the federation base & client classes. diff --git a/changelog.d/7002.misc b/changelog.d/7002.misc deleted file mode 100644 index ec5c004bbe..0000000000 --- a/changelog.d/7002.misc +++ /dev/null @@ -1 +0,0 @@ -Merge worker apps together. diff --git a/changelog.d/7003.misc b/changelog.d/7003.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/7003.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/7015.misc b/changelog.d/7015.misc deleted file mode 100644 index 9709dc606e..0000000000 --- a/changelog.d/7015.misc +++ /dev/null @@ -1 +0,0 @@ -Change date in INSTALL.md#tls-certificates for last date of getting TLS certificates to November 2019. \ No newline at end of file diff --git a/changelog.d/7018.bugfix b/changelog.d/7018.bugfix deleted file mode 100644 index d1b6c1d464..0000000000 --- a/changelog.d/7018.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix py35-old CI by using native tox package. diff --git a/changelog.d/7019.misc b/changelog.d/7019.misc deleted file mode 100644 index 5130f4e8af..0000000000 --- a/changelog.d/7019.misc +++ /dev/null @@ -1 +0,0 @@ -Port `synapse.handlers.presence` to async/await. diff --git a/changelog.d/7020.misc b/changelog.d/7020.misc deleted file mode 100644 index 188b4378cb..0000000000 --- a/changelog.d/7020.misc +++ /dev/null @@ -1 +0,0 @@ -Port `synapse.rest.keys` to async/await. diff --git a/changelog.d/7026.removal b/changelog.d/7026.removal deleted file mode 100644 index 4c8c563bb0..0000000000 --- a/changelog.d/7026.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the unused query_auth federation endpoint per MSC2451. diff --git a/changelog.d/7030.feature b/changelog.d/7030.feature deleted file mode 100644 index fcfdb8d8a1..0000000000 --- a/changelog.d/7030.feature +++ /dev/null @@ -1 +0,0 @@ -Break down monthly active users by `appservice_id` and emit via Prometheus. diff --git a/changelog.d/7034.removal b/changelog.d/7034.removal deleted file mode 100644 index be8d20e14f..0000000000 --- a/changelog.d/7034.removal +++ /dev/null @@ -1 +0,0 @@ -Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. diff --git a/changelog.d/7035.bugfix b/changelog.d/7035.bugfix deleted file mode 100644 index 56292dc8ac..0000000000 --- a/changelog.d/7035.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing `org.matrix.dummy_event` to be included in responses from `/sync`. diff --git a/changelog.d/7037.feature b/changelog.d/7037.feature deleted file mode 100644 index 4bc1b3b19f..0000000000 --- a/changelog.d/7037.feature +++ /dev/null @@ -1 +0,0 @@ -Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). diff --git a/changelog.d/7044.bugfix b/changelog.d/7044.bugfix deleted file mode 100644 index 790088ddb4..0000000000 --- a/changelog.d/7044.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug that renders UTF-8 text files incorrectly when loaded from media. Contributed by @TheStranjer. diff --git a/changelog.d/7045.misc b/changelog.d/7045.misc deleted file mode 100644 index 74c1abea56..0000000000 --- a/changelog.d/7045.misc +++ /dev/null @@ -1 +0,0 @@ -Add a type check to `is_verified` when processing room keys. diff --git a/changelog.d/7048.doc b/changelog.d/7048.doc deleted file mode 100644 index c9666f333e..0000000000 --- a/changelog.d/7048.doc +++ /dev/null @@ -1 +0,0 @@ -Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. diff --git a/changelog.d/7055.misc b/changelog.d/7055.misc deleted file mode 100644 index ec5c004bbe..0000000000 --- a/changelog.d/7055.misc +++ /dev/null @@ -1 +0,0 @@ -Merge worker apps together. diff --git a/changelog.d/7058.feature b/changelog.d/7058.feature deleted file mode 100644 index 53ea485e03..0000000000 --- a/changelog.d/7058.feature +++ /dev/null @@ -1 +0,0 @@ -Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. diff --git a/changelog.d/7063.misc b/changelog.d/7063.misc deleted file mode 100644 index e7b1cd3cd8..0000000000 --- a/changelog.d/7063.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations and comments to the auth handler. diff --git a/changelog.d/7066.bugfix b/changelog.d/7066.bugfix deleted file mode 100644 index 94bb096287..0000000000 --- a/changelog.d/7066.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug that would cause Synapse to respond with an error about event visibility if a client tried to request the state of a room at a given token. diff --git a/changelog.d/7067.feature b/changelog.d/7067.feature deleted file mode 100644 index 53ea485e03..0000000000 --- a/changelog.d/7067.feature +++ /dev/null @@ -1 +0,0 @@ -Render a configurable and comprehensible error page if something goes wrong during the SAML2 authentication process. diff --git a/changelog.d/7070.bugfix b/changelog.d/7070.bugfix deleted file mode 100644 index 9031927546..0000000000 --- a/changelog.d/7070.bugfix +++ /dev/null @@ -1 +0,0 @@ -Repair a data-corruption issue which was introduced in Synapse 1.10, and fixed in Synapse 1.11, and which could cause `/sync` to return with 404 errors about missing events and unknown rooms. diff --git a/changelog.d/7074.bugfix b/changelog.d/7074.bugfix deleted file mode 100644 index 38d7455971..0000000000 --- a/changelog.d/7074.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. diff --git a/changelog.d/7085.feature b/changelog.d/7085.feature deleted file mode 100644 index df6d0f990d..0000000000 --- a/changelog.d/7085.feature +++ /dev/null @@ -1 +0,0 @@ -Add an optional parameter to control whether other sessions are logged out when a user's password is modified. diff --git a/changelog.d/7094.misc b/changelog.d/7094.misc deleted file mode 100644 index aa093ee3c0..0000000000 --- a/changelog.d/7094.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance when making HTTPS requests to sygnal, sydent, etc, by sharing the SSL context object between connections. diff --git a/changelog.d/7095.misc b/changelog.d/7095.misc deleted file mode 100644 index 44fc9f616f..0000000000 --- a/changelog.d/7095.misc +++ /dev/null @@ -1 +0,0 @@ -Attempt to improve performance of state res v2 algorithm. diff --git a/changelog.d/7103.feature b/changelog.d/7103.feature deleted file mode 100644 index 413e7f29d7..0000000000 --- a/changelog.d/7103.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics for the number of active pushers. diff --git a/changelog.d/7104.misc b/changelog.d/7104.misc deleted file mode 100644 index ec5c004bbe..0000000000 --- a/changelog.d/7104.misc +++ /dev/null @@ -1 +0,0 @@ -Merge worker apps together. diff --git a/changelog.d/7106.feature b/changelog.d/7106.feature deleted file mode 100644 index 413e7f29d7..0000000000 --- a/changelog.d/7106.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics for the number of active pushers. diff --git a/synapse/__init__.py b/synapse/__init__.py index e56ba89ff4..020e0536be 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.11.1" +__version__ = "1.12.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5aa6dff99ea7307e3c38d52a9f3dff1f1e2a9630 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Mar 2020 11:15:48 +0000 Subject: [PATCH 281/382] fix typo --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 18ffcea4cd..79f24fc643 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,9 +4,9 @@ Synapse 1.12.0rc1 (2020-03-19) Features -------- -- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432): +- Changes related to room alias management ([MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)): - Publishing/removing a room from the room directory now requires the user to have a power level capable of modifying the canonical alias, instead of the room aliases. ([\#6965](https://github.com/matrix-org/synapse/issues/6965)) - - Validate the alt_aliases property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971)) + - Validate the `alt_aliases` property of canonical alias events. ([\#6971](https://github.com/matrix-org/synapse/issues/6971)) - Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986)) - Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037)) - Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941)) From 163f23785adb7d531d8ef2ca9bca161a2dcf76c9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Mar 2020 11:25:32 +0000 Subject: [PATCH 282/382] changelog fixes --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 79f24fc643..153ace20b8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,7 +10,7 @@ Features - Users with a power level sufficient to modify the canonical alias of a room can now delete room aliases. ([\#6986](https://github.com/matrix-org/synapse/issues/6986)) - Implement updated authorization rules and redaction rules for aliases events, from [MSC2261](https://github.com/matrix-org/matrix-doc/pull/2261) and [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432). ([\#7037](https://github.com/matrix-org/synapse/issues/7037)) - Stop sending m.room.aliases events during room creation and upgrade. ([\#6941](https://github.com/matrix-org/synapse/issues/6941)) - - Synapse no longer uses room alias events to calculate room names for email notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966)) + - Synapse no longer uses room alias events to calculate room names for push notifications. ([\#6966](https://github.com/matrix-org/synapse/issues/6966)) - The room list endpoint no longer returns a list of aliases. ([\#6970](https://github.com/matrix-org/synapse/issues/6970)) - Remove special handling of aliases events from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260) added in v1.10.0rc1. ([\#7034](https://github.com/matrix-org/synapse/issues/7034)) - Expose the `synctl`, `hash_password` and `generate_config` commands in the snapcraft package. Contributed by @devec0. ([\#6315](https://github.com/matrix-org/synapse/issues/6315)) @@ -49,7 +49,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove the unused query_auth federation endpoint per MSC2451. ([\#7026](https://github.com/matrix-org/synapse/issues/7026)) +- Remove the unused query_auth federation endpoint per [MSC2451](https://github.com/matrix-org/matrix-doc/pull/2451). ([\#7026](https://github.com/matrix-org/synapse/issues/7026)) Internal Changes @@ -69,7 +69,7 @@ Internal Changes - Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987)) - Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019)) - Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995)) -- Change date in [INSTALL.md#tls-certificates] for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015)) +- Change date in [INSTALL.md](./INSTALL.md#tls-certificates) for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015)) - Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020)) - Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045)) - Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063)) From c8c926f9c9d54306a7cc344978de339837070cb5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 19 Mar 2020 11:26:51 +0000 Subject: [PATCH 283/382] more changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 153ace20b8..e3550497a4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -43,6 +43,7 @@ Improved Documentation - Updated CentOS8 install instructions. Contributed by Richard Kellner. ([\#6925](https://github.com/matrix-org/synapse/issues/6925)) - Fix `POSTGRES_INITDB_ARGS` in the `contrib/docker/docker-compose.yml` example docker-compose configuration. ([\#6984](https://github.com/matrix-org/synapse/issues/6984)) +- Change date in [INSTALL.md](./INSTALL.md#tls-certificates) for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015)) - Document that the fallback auth endpoints must be routed to the same worker node as the register endpoints. ([\#7048](https://github.com/matrix-org/synapse/issues/7048)) @@ -69,7 +70,6 @@ Internal Changes - Add some type annotations to the database storage classes. ([\#6987](https://github.com/matrix-org/synapse/issues/6987)) - Port `synapse.handlers.presence` to async/await. ([\#6991](https://github.com/matrix-org/synapse/issues/6991), [\#7019](https://github.com/matrix-org/synapse/issues/7019)) - Add some type annotations to the federation base & client classes. ([\#6995](https://github.com/matrix-org/synapse/issues/6995)) -- Change date in [INSTALL.md](./INSTALL.md#tls-certificates) for last date of getting TLS certificates to November 2019. ([\#7015](https://github.com/matrix-org/synapse/issues/7015)) - Port `synapse.rest.keys` to async/await. ([\#7020](https://github.com/matrix-org/synapse/issues/7020)) - Add a type check to `is_verified` when processing room keys. ([\#7045](https://github.com/matrix-org/synapse/issues/7045)) - Add type annotations and comments to the auth handler. ([\#7063](https://github.com/matrix-org/synapse/issues/7063)) From c2db6599c820d97e3c8a02d782e90af80121c903 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 19 Mar 2020 08:22:56 -0400 Subject: [PATCH 284/382] Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors (#7089). --- changelog.d/7089.bugfix | 1 + synapse/federation/federation_base.py | 24 +++++++++--------------- synapse/federation/federation_client.py | 19 ++++++++----------- synapse/federation/federation_server.py | 8 ++++---- 4 files changed, 22 insertions(+), 30 deletions(-) create mode 100644 changelog.d/7089.bugfix diff --git a/changelog.d/7089.bugfix b/changelog.d/7089.bugfix new file mode 100644 index 0000000000..f1f440f23a --- /dev/null +++ b/changelog.d/7089.bugfix @@ -0,0 +1 @@ +Fix a bug in the federation API which could cause occasional "Failed to get PDU" errors. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 5c991e5412..b0b0eba41e 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -25,11 +25,7 @@ from twisted.python.failure import Failure from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError -from synapse.api.room_versions import ( - KNOWN_ROOM_VERSIONS, - EventFormatVersions, - RoomVersion, -) +from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.crypto.event_signing import check_event_content_hash from synapse.crypto.keyring import Keyring from synapse.events import EventBase, make_event_from_dict @@ -55,13 +51,15 @@ class FederationBase(object): self.store = hs.get_datastore() self._clock = hs.get_clock() - def _check_sigs_and_hash(self, room_version: str, pdu: EventBase) -> Deferred: + def _check_sigs_and_hash( + self, room_version: RoomVersion, pdu: EventBase + ) -> Deferred: return make_deferred_yieldable( self._check_sigs_and_hashes(room_version, [pdu])[0] ) def _check_sigs_and_hashes( - self, room_version: str, pdus: List[EventBase] + self, room_version: RoomVersion, pdus: List[EventBase] ) -> List[Deferred]: """Checks that each of the received events is correctly signed by the sending server. @@ -146,7 +144,7 @@ class PduToCheckSig( def _check_sigs_on_pdus( - keyring: Keyring, room_version: str, pdus: Iterable[EventBase] + keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase] ) -> List[Deferred]: """Check that the given events are correctly signed @@ -191,10 +189,6 @@ def _check_sigs_on_pdus( for p in pdus ] - v = KNOWN_ROOM_VERSIONS.get(room_version) - if not v: - raise RuntimeError("Unrecognized room version %s" % (room_version,)) - # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)] @@ -204,7 +198,7 @@ def _check_sigs_on_pdus( ( p.sender_domain, p.redacted_pdu_json, - p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_sender @@ -227,7 +221,7 @@ def _check_sigs_on_pdus( # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - if v.event_format == EventFormatVersions.V1: + if room_version.event_format == EventFormatVersions.V1: pdus_to_check_event_id = [ p for p in pdus_to_check @@ -239,7 +233,7 @@ def _check_sigs_on_pdus( ( get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, - p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, p.pdu.event_id, ) for p in pdus_to_check_event_id diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8c6b839478..a0071fec94 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -220,8 +220,7 @@ class FederationClient(FederationBase): # FIXME: We should handle signature failures more gracefully. pdus[:] = await make_deferred_yieldable( defer.gatherResults( - self._check_sigs_and_hashes(room_version.identifier, pdus), - consumeErrors=True, + self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True, ).addErrback(unwrapFirstError) ) @@ -291,9 +290,7 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = await self._check_sigs_and_hash( - room_version.identifier, pdu - ) + signed_pdu = await self._check_sigs_and_hash(room_version, pdu) break @@ -350,7 +347,7 @@ class FederationClient(FederationBase): self, origin: str, pdus: List[EventBase], - room_version: str, + room_version: RoomVersion, outlier: bool = False, include_none: bool = False, ) -> List[EventBase]: @@ -396,7 +393,7 @@ class FederationClient(FederationBase): self.get_pdu( destinations=[pdu.origin], event_id=pdu.event_id, - room_version=room_version, # type: ignore + room_version=room_version, outlier=outlier, timeout=10000, ) @@ -434,7 +431,7 @@ class FederationClient(FederationBase): ] signed_auth = await self._check_sigs_and_hash_and_fetch( - destination, auth_chain, outlier=True, room_version=room_version.identifier + destination, auth_chain, outlier=True, room_version=room_version ) signed_auth.sort(key=lambda e: e.depth) @@ -661,7 +658,7 @@ class FederationClient(FederationBase): destination, list(pdus.values()), outlier=True, - room_version=room_version.identifier, + room_version=room_version, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} @@ -756,7 +753,7 @@ class FederationClient(FederationBase): pdu = event_from_pdu_json(pdu_dict, room_version) # Check signatures are correct. - pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) # FIXME: We should handle signature failures more gracefully. @@ -948,7 +945,7 @@ class FederationClient(FederationBase): ] signed_events = await self._check_sigs_and_hash_and_fetch( - destination, events, outlier=False, room_version=room_version.identifier + destination, events, outlier=False, room_version=room_version ) except HttpResponseException as e: if not e.code == 400: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 275b9c99d7..89d521bc31 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -409,7 +409,7 @@ class FederationServer(FederationBase): pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) - pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} @@ -425,7 +425,7 @@ class FederationServer(FederationBase): logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() @@ -455,7 +455,7 @@ class FederationServer(FederationBase): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) await self.handler.on_send_leave_request(origin, pdu) return {} @@ -611,7 +611,7 @@ class FederationServer(FederationBase): logger.info("Accepting join PDU %s from %s", pdu.event_id, origin) # We've already checked that we know the room version by this point - room_version = await self.store.get_room_version_id(pdu.room_id) + room_version = await self.store.get_room_version(pdu.room_id) # Check signature. try: From caec7d4fa0041697b7714e638477772f0a827ff6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 20 Mar 2020 07:20:02 -0400 Subject: [PATCH 285/382] Convert some of the media REST code to async/await (#7110) --- changelog.d/7110.misc | 1 + synapse/rest/media/v1/media_repository.py | 110 ++++++++---------- synapse/rest/media/v1/preview_url_resource.py | 37 +++--- synapse/rest/media/v1/thumbnail_resource.py | 54 ++++----- 4 files changed, 91 insertions(+), 111 deletions(-) create mode 100644 changelog.d/7110.misc diff --git a/changelog.d/7110.misc b/changelog.d/7110.misc new file mode 100644 index 0000000000..fac5bc0403 --- /dev/null +++ b/changelog.d/7110.misc @@ -0,0 +1 @@ +Convert some of synapse.rest.media to async/await. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 490b1b45a8..fd10d42f2f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -24,7 +24,6 @@ from six import iteritems import twisted.internet.error import twisted.web.http -from twisted.internet import defer from twisted.web.resource import Resource from synapse.api.errors import ( @@ -114,15 +113,14 @@ class MediaRepository(object): "update_recently_accessed_media", self._update_recently_accessed ) - @defer.inlineCallbacks - def _update_recently_accessed(self): + async def _update_recently_accessed(self): remote_media = self.recently_accessed_remotes self.recently_accessed_remotes = set() local_media = self.recently_accessed_locals self.recently_accessed_locals = set() - yield self.store.update_cached_last_access_time( + await self.store.update_cached_last_access_time( local_media, remote_media, self.clock.time_msec() ) @@ -138,8 +136,7 @@ class MediaRepository(object): else: self.recently_accessed_locals.add(media_id) - @defer.inlineCallbacks - def create_content( + async def create_content( self, media_type, upload_name, content, content_length, auth_user ): """Store uploaded content for a local user and return the mxc URL @@ -158,11 +155,11 @@ class MediaRepository(object): file_info = FileInfo(server_name=None, file_id=media_id) - fname = yield self.media_storage.store_file(content, file_info) + fname = await self.media_storage.store_file(content, file_info) logger.info("Stored local media in file %r", fname) - yield self.store.store_local_media( + await self.store.store_local_media( media_id=media_id, media_type=media_type, time_now_ms=self.clock.time_msec(), @@ -171,12 +168,11 @@ class MediaRepository(object): user_id=auth_user, ) - yield self._generate_thumbnails(None, media_id, media_id, media_type) + await self._generate_thumbnails(None, media_id, media_id, media_type) return "mxc://%s/%s" % (self.server_name, media_id) - @defer.inlineCallbacks - def get_local_media(self, request, media_id, name): + async def get_local_media(self, request, media_id, name): """Responds to reqests for local media, if exists, or returns 404. Args: @@ -190,7 +186,7 @@ class MediaRepository(object): Deferred: Resolves once a response has successfully been written to request """ - media_info = yield self.store.get_local_media(media_id) + media_info = await self.store.get_local_media(media_id) if not media_info or media_info["quarantined_by"]: respond_404(request) return @@ -204,13 +200,12 @@ class MediaRepository(object): file_info = FileInfo(None, media_id, url_cache=url_cache) - responder = yield self.media_storage.fetch_media(file_info) - yield respond_with_responder( + responder = await self.media_storage.fetch_media(file_info) + await respond_with_responder( request, responder, media_type, media_length, upload_name ) - @defer.inlineCallbacks - def get_remote_media(self, request, server_name, media_id, name): + async def get_remote_media(self, request, server_name, media_id, name): """Respond to requests for remote media. Args: @@ -236,8 +231,8 @@ class MediaRepository(object): # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) - with (yield self.remote_media_linearizer.queue(key)): - responder, media_info = yield self._get_remote_media_impl( + with (await self.remote_media_linearizer.queue(key)): + responder, media_info = await self._get_remote_media_impl( server_name, media_id ) @@ -246,14 +241,13 @@ class MediaRepository(object): media_type = media_info["media_type"] media_length = media_info["media_length"] upload_name = name if name else media_info["upload_name"] - yield respond_with_responder( + await respond_with_responder( request, responder, media_type, media_length, upload_name ) else: respond_404(request) - @defer.inlineCallbacks - def get_remote_media_info(self, server_name, media_id): + async def get_remote_media_info(self, server_name, media_id): """Gets the media info associated with the remote file, downloading if necessary. @@ -274,8 +268,8 @@ class MediaRepository(object): # We linearize here to ensure that we don't try and download remote # media multiple times concurrently key = (server_name, media_id) - with (yield self.remote_media_linearizer.queue(key)): - responder, media_info = yield self._get_remote_media_impl( + with (await self.remote_media_linearizer.queue(key)): + responder, media_info = await self._get_remote_media_impl( server_name, media_id ) @@ -286,8 +280,7 @@ class MediaRepository(object): return media_info - @defer.inlineCallbacks - def _get_remote_media_impl(self, server_name, media_id): + async def _get_remote_media_impl(self, server_name, media_id): """Looks for media in local cache, if not there then attempt to download from remote server. @@ -299,7 +292,7 @@ class MediaRepository(object): Returns: Deferred[(Responder, media_info)] """ - media_info = yield self.store.get_cached_remote_media(server_name, media_id) + media_info = await self.store.get_cached_remote_media(server_name, media_id) # file_id is the ID we use to track the file locally. If we've already # seen the file then reuse the existing ID, otherwise genereate a new @@ -317,19 +310,18 @@ class MediaRepository(object): logger.info("Media is quarantined") raise NotFoundError() - responder = yield self.media_storage.fetch_media(file_info) + responder = await self.media_storage.fetch_media(file_info) if responder: return responder, media_info # Failed to find the file anywhere, lets download it. - media_info = yield self._download_remote_file(server_name, media_id, file_id) + media_info = await self._download_remote_file(server_name, media_id, file_id) - responder = yield self.media_storage.fetch_media(file_info) + responder = await self.media_storage.fetch_media(file_info) return responder, media_info - @defer.inlineCallbacks - def _download_remote_file(self, server_name, media_id, file_id): + async def _download_remote_file(self, server_name, media_id, file_id): """Attempt to download the remote file from the given server name, using the given file_id as the local id. @@ -351,7 +343,7 @@ class MediaRepository(object): ("/_matrix/media/v1/download", server_name, media_id) ) try: - length, headers = yield self.client.get_file( + length, headers = await self.client.get_file( server_name, request_path, output_stream=f, @@ -397,7 +389,7 @@ class MediaRepository(object): ) raise SynapseError(502, "Failed to fetch remote media") - yield finish() + await finish() media_type = headers[b"Content-Type"][0].decode("ascii") upload_name = get_filename_from_headers(headers) @@ -405,7 +397,7 @@ class MediaRepository(object): logger.info("Stored remote media in file %r", fname) - yield self.store.store_cached_remote_media( + await self.store.store_cached_remote_media( origin=server_name, media_id=media_id, media_type=media_type, @@ -423,7 +415,7 @@ class MediaRepository(object): "filesystem_id": file_id, } - yield self._generate_thumbnails(server_name, media_id, file_id, media_type) + await self._generate_thumbnails(server_name, media_id, file_id, media_type) return media_info @@ -458,16 +450,15 @@ class MediaRepository(object): return t_byte_source - @defer.inlineCallbacks - def generate_local_exact_thumbnail( + async def generate_local_exact_thumbnail( self, media_id, t_width, t_height, t_method, t_type, url_cache ): - input_path = yield self.media_storage.ensure_media_is_in_local_cache( + input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield defer_to_thread( + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -490,7 +481,7 @@ class MediaRepository(object): thumbnail_type=t_type, ) - output_path = yield self.media_storage.store_file( + output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: @@ -500,22 +491,21 @@ class MediaRepository(object): t_len = os.path.getsize(output_path) - yield self.store.store_local_thumbnail( + await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return output_path - @defer.inlineCallbacks - def generate_remote_exact_thumbnail( + async def generate_remote_exact_thumbnail( self, server_name, file_id, media_id, t_width, t_height, t_method, t_type ): - input_path = yield self.media_storage.ensure_media_is_in_local_cache( + input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=False) ) thumbnailer = Thumbnailer(input_path) - t_byte_source = yield defer_to_thread( + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, thumbnailer, @@ -537,7 +527,7 @@ class MediaRepository(object): thumbnail_type=t_type, ) - output_path = yield self.media_storage.store_file( + output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: @@ -547,7 +537,7 @@ class MediaRepository(object): t_len = os.path.getsize(output_path) - yield self.store.store_remote_media_thumbnail( + await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, @@ -560,8 +550,7 @@ class MediaRepository(object): return output_path - @defer.inlineCallbacks - def _generate_thumbnails( + async def _generate_thumbnails( self, server_name, media_id, file_id, media_type, url_cache=False ): """Generate and store thumbnails for an image. @@ -582,7 +571,7 @@ class MediaRepository(object): if not requirements: return - input_path = yield self.media_storage.ensure_media_is_in_local_cache( + input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=url_cache) ) @@ -600,7 +589,7 @@ class MediaRepository(object): return if thumbnailer.transpose_method is not None: - m_width, m_height = yield defer_to_thread( + m_width, m_height = await defer_to_thread( self.hs.get_reactor(), thumbnailer.transpose ) @@ -620,11 +609,11 @@ class MediaRepository(object): for (t_width, t_height, t_type), t_method in iteritems(thumbnails): # Generate the thumbnail if t_method == "crop": - t_byte_source = yield defer_to_thread( + t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.crop, t_width, t_height, t_type ) elif t_method == "scale": - t_byte_source = yield defer_to_thread( + t_byte_source = await defer_to_thread( self.hs.get_reactor(), thumbnailer.scale, t_width, t_height, t_type ) else: @@ -646,7 +635,7 @@ class MediaRepository(object): url_cache=url_cache, ) - output_path = yield self.media_storage.store_file( + output_path = await self.media_storage.store_file( t_byte_source, file_info ) finally: @@ -656,7 +645,7 @@ class MediaRepository(object): # Write to database if server_name: - yield self.store.store_remote_media_thumbnail( + await self.store.store_remote_media_thumbnail( server_name, media_id, file_id, @@ -667,15 +656,14 @@ class MediaRepository(object): t_len, ) else: - yield self.store.store_local_thumbnail( + await self.store.store_local_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) return {"width": m_width, "height": m_height} - @defer.inlineCallbacks - def delete_old_remote_media(self, before_ts): - old_media = yield self.store.get_remote_media_before(before_ts) + async def delete_old_remote_media(self, before_ts): + old_media = await self.store.get_remote_media_before(before_ts) deleted = 0 @@ -689,7 +677,7 @@ class MediaRepository(object): # TODO: Should we delete from the backup store - with (yield self.remote_media_linearizer.queue(key)): + with (await self.remote_media_linearizer.queue(key)): full_path = self.filepaths.remote_media_filepath(origin, file_id) try: os.remove(full_path) @@ -705,7 +693,7 @@ class MediaRepository(object): ) shutil.rmtree(thumbnail_dir, ignore_errors=True) - yield self.store.delete_remote_media(origin, media_id) + await self.store.delete_remote_media(origin, media_id) deleted += 1 return {"deleted": deleted} diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 07e395cfd1..c46676f8fc 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -165,8 +165,7 @@ class PreviewUrlResource(DirectServeResource): og = await make_deferred_yieldable(defer.maybeDeferred(observable.observe)) respond_with_json_bytes(request, 200, og, send_cors=True) - @defer.inlineCallbacks - def _do_preview(self, url, user, ts): + async def _do_preview(self, url, user, ts): """Check the db, and download the URL and build a preview Args: @@ -179,7 +178,7 @@ class PreviewUrlResource(DirectServeResource): """ # check the URL cache in the DB (which will also provide us with # historical previews, if we have any) - cache_result = yield self.store.get_url_cache(url, ts) + cache_result = await self.store.get_url_cache(url, ts) if ( cache_result and cache_result["expires_ts"] > ts @@ -192,13 +191,13 @@ class PreviewUrlResource(DirectServeResource): og = og.encode("utf8") return og - media_info = yield self._download_url(url, user) + media_info = await self._download_url(url, user) logger.debug("got media_info of '%s'", media_info) if _is_media(media_info["media_type"]): file_id = media_info["filesystem_id"] - dims = yield self.media_repo._generate_thumbnails( + dims = await self.media_repo._generate_thumbnails( None, file_id, file_id, media_info["media_type"], url_cache=True ) @@ -248,14 +247,14 @@ class PreviewUrlResource(DirectServeResource): # request itself and benefit from the same caching etc. But for now we # just rely on the caching on the master request to speed things up. if "og:image" in og and og["og:image"]: - image_info = yield self._download_url( + image_info = await self._download_url( _rebase_url(og["og:image"], media_info["uri"]), user ) if _is_media(image_info["media_type"]): # TODO: make sure we don't choke on white-on-transparent images file_id = image_info["filesystem_id"] - dims = yield self.media_repo._generate_thumbnails( + dims = await self.media_repo._generate_thumbnails( None, file_id, file_id, image_info["media_type"], url_cache=True ) if dims: @@ -293,7 +292,7 @@ class PreviewUrlResource(DirectServeResource): jsonog = json.dumps(og) # store OG in history-aware DB cache - yield self.store.store_url_cache( + await self.store.store_url_cache( url, media_info["response_code"], media_info["etag"], @@ -305,8 +304,7 @@ class PreviewUrlResource(DirectServeResource): return jsonog.encode("utf8") - @defer.inlineCallbacks - def _download_url(self, url, user): + async def _download_url(self, url, user): # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -318,7 +316,7 @@ class PreviewUrlResource(DirectServeResource): with self.media_storage.store_into_file(file_info) as (f, fname, finish): try: logger.debug("Trying to get url '%s'", url) - length, headers, uri, code = yield self.client.get_file( + length, headers, uri, code = await self.client.get_file( url, output_stream=f, max_size=self.max_spider_size ) except SynapseError: @@ -345,7 +343,7 @@ class PreviewUrlResource(DirectServeResource): % (traceback.format_exception_only(sys.exc_info()[0], e),), Codes.UNKNOWN, ) - yield finish() + await finish() try: if b"Content-Type" in headers: @@ -356,7 +354,7 @@ class PreviewUrlResource(DirectServeResource): download_name = get_filename_from_headers(headers) - yield self.store.store_local_media( + await self.store.store_local_media( media_id=file_id, media_type=media_type, time_now_ms=self.clock.time_msec(), @@ -393,8 +391,7 @@ class PreviewUrlResource(DirectServeResource): "expire_url_cache_data", self._expire_url_cache_data ) - @defer.inlineCallbacks - def _expire_url_cache_data(self): + async def _expire_url_cache_data(self): """Clean up expired url cache content, media and thumbnails. """ # TODO: Delete from backup media store @@ -403,12 +400,12 @@ class PreviewUrlResource(DirectServeResource): logger.info("Running url preview cache expiry") - if not (yield self.store.db.updates.has_completed_background_updates()): + if not (await self.store.db.updates.has_completed_background_updates()): logger.info("Still running DB updates; skipping expiry") return # First we delete expired url cache entries - media_ids = yield self.store.get_expired_url_cache(now) + media_ids = await self.store.get_expired_url_cache(now) removed_media = [] for media_id in media_ids: @@ -430,7 +427,7 @@ class PreviewUrlResource(DirectServeResource): except Exception: pass - yield self.store.delete_url_cache(removed_media) + await self.store.delete_url_cache(removed_media) if removed_media: logger.info("Deleted %d entries from url cache", len(removed_media)) @@ -440,7 +437,7 @@ class PreviewUrlResource(DirectServeResource): # may have a room open with a preview url thing open). # So we wait a couple of days before deleting, just in case. expire_before = now - 2 * 24 * 60 * 60 * 1000 - media_ids = yield self.store.get_url_cache_media_before(expire_before) + media_ids = await self.store.get_url_cache_media_before(expire_before) removed_media = [] for media_id in media_ids: @@ -478,7 +475,7 @@ class PreviewUrlResource(DirectServeResource): except Exception: pass - yield self.store.delete_url_cache_media(removed_media) + await self.store.delete_url_cache_media(removed_media) logger.info("Deleted %d media from url cache", len(removed_media)) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index d57480f761..0b87220234 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -16,8 +16,6 @@ import logging -from twisted.internet import defer - from synapse.http.server import ( DirectServeResource, set_cors_headers, @@ -79,11 +77,10 @@ class ThumbnailResource(DirectServeResource): ) self.media_repo.mark_recently_accessed(server_name, media_id) - @defer.inlineCallbacks - def _respond_local_thumbnail( + async def _respond_local_thumbnail( self, request, media_id, width, height, method, m_type ): - media_info = yield self.store.get_local_media(media_id) + media_info = await self.store.get_local_media(media_id) if not media_info: respond_404(request) @@ -93,7 +90,7 @@ class ThumbnailResource(DirectServeResource): respond_404(request) return - thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id) + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) if thumbnail_infos: thumbnail_info = self._select_thumbnail( @@ -114,14 +111,13 @@ class ThumbnailResource(DirectServeResource): t_type = file_info.thumbnail_type t_length = thumbnail_info["thumbnail_length"] - responder = yield self.media_storage.fetch_media(file_info) - yield respond_with_responder(request, responder, t_type, t_length) + responder = await self.media_storage.fetch_media(file_info) + await respond_with_responder(request, responder, t_type, t_length) else: logger.info("Couldn't find any generated thumbnails") respond_404(request) - @defer.inlineCallbacks - def _select_or_generate_local_thumbnail( + async def _select_or_generate_local_thumbnail( self, request, media_id, @@ -130,7 +126,7 @@ class ThumbnailResource(DirectServeResource): desired_method, desired_type, ): - media_info = yield self.store.get_local_media(media_id) + media_info = await self.store.get_local_media(media_id) if not media_info: respond_404(request) @@ -140,7 +136,7 @@ class ThumbnailResource(DirectServeResource): respond_404(request) return - thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id) + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: t_w = info["thumbnail_width"] == desired_width t_h = info["thumbnail_height"] == desired_height @@ -162,15 +158,15 @@ class ThumbnailResource(DirectServeResource): t_type = file_info.thumbnail_type t_length = info["thumbnail_length"] - responder = yield self.media_storage.fetch_media(file_info) + responder = await self.media_storage.fetch_media(file_info) if responder: - yield respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder(request, responder, t_type, t_length) return logger.debug("We don't have a thumbnail of that size. Generating") # Okay, so we generate one. - file_path = yield self.media_repo.generate_local_exact_thumbnail( + file_path = await self.media_repo.generate_local_exact_thumbnail( media_id, desired_width, desired_height, @@ -180,13 +176,12 @@ class ThumbnailResource(DirectServeResource): ) if file_path: - yield respond_with_file(request, desired_type, file_path) + await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") respond_404(request) - @defer.inlineCallbacks - def _select_or_generate_remote_thumbnail( + async def _select_or_generate_remote_thumbnail( self, request, server_name, @@ -196,9 +191,9 @@ class ThumbnailResource(DirectServeResource): desired_method, desired_type, ): - media_info = yield self.media_repo.get_remote_media_info(server_name, media_id) + media_info = await self.media_repo.get_remote_media_info(server_name, media_id) - thumbnail_infos = yield self.store.get_remote_media_thumbnails( + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -224,15 +219,15 @@ class ThumbnailResource(DirectServeResource): t_type = file_info.thumbnail_type t_length = info["thumbnail_length"] - responder = yield self.media_storage.fetch_media(file_info) + responder = await self.media_storage.fetch_media(file_info) if responder: - yield respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder(request, responder, t_type, t_length) return logger.debug("We don't have a thumbnail of that size. Generating") # Okay, so we generate one. - file_path = yield self.media_repo.generate_remote_exact_thumbnail( + file_path = await self.media_repo.generate_remote_exact_thumbnail( server_name, file_id, media_id, @@ -243,21 +238,20 @@ class ThumbnailResource(DirectServeResource): ) if file_path: - yield respond_with_file(request, desired_type, file_path) + await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") respond_404(request) - @defer.inlineCallbacks - def _respond_remote_thumbnail( + async def _respond_remote_thumbnail( self, request, server_name, media_id, width, height, method, m_type ): # TODO: Don't download the whole remote file # We should proxy the thumbnail from the remote server instead of # downloading the remote file and generating our own thumbnails. - media_info = yield self.media_repo.get_remote_media_info(server_name, media_id) + media_info = await self.media_repo.get_remote_media_info(server_name, media_id) - thumbnail_infos = yield self.store.get_remote_media_thumbnails( + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -278,8 +272,8 @@ class ThumbnailResource(DirectServeResource): t_type = file_info.thumbnail_type t_length = thumbnail_info["thumbnail_length"] - responder = yield self.media_storage.fetch_media(file_info) - yield respond_with_responder(request, responder, t_type, t_length) + responder = await self.media_storage.fetch_media(file_info) + await respond_with_responder(request, responder, t_type, t_length) else: logger.info("Failed to find any generated thumbnails") respond_404(request) From fdb13447167da0670dd6ad95fdf4a99cde450eb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 20 Mar 2020 14:40:47 +0000 Subject: [PATCH 286/382] Remove concept of a non-limited stream. (#7011) --- changelog.d/7011.misc | 1 + synapse/handlers/presence.py | 4 +- synapse/handlers/typing.py | 11 ++- synapse/replication/tcp/resource.py | 9 +-- synapse/replication/tcp/streams/_base.py | 68 ++++++++----------- synapse/storage/data_stores/main/devices.py | 10 ++- .../data_stores/main/end_to_end_keys.py | 14 ++-- synapse/storage/data_stores/main/presence.py | 23 ++++--- 8 files changed, 72 insertions(+), 68 deletions(-) create mode 100644 changelog.d/7011.misc diff --git a/changelog.d/7011.misc b/changelog.d/7011.misc new file mode 100644 index 0000000000..41c3b37574 --- /dev/null +++ b/changelog.d/7011.misc @@ -0,0 +1 @@ +Remove concept of a non-limited stream. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 5526015ddb..6912165622 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -747,7 +747,7 @@ class PresenceHandler(object): return False - async def get_all_presence_updates(self, last_id, current_id): + async def get_all_presence_updates(self, last_id, current_id, limit): """ Gets a list of presence update rows from between the given stream ids. Each row has: @@ -762,7 +762,7 @@ class PresenceHandler(object): """ # TODO(markjh): replicate the unpersisted changes. # This could use the in-memory stores for recent changes. - rows = await self.store.get_all_presence_updates(last_id, current_id) + rows = await self.store.get_all_presence_updates(last_id, current_id, limit) return rows def notify_new_event(self): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 391bceb0c4..c7bc14c623 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -15,6 +15,7 @@ import logging from collections import namedtuple +from typing import List from twisted.internet import defer @@ -257,7 +258,13 @@ class TypingHandler(object): "typing_key", self._latest_room_serial, rooms=[member.room_id] ) - async def get_all_typing_updates(self, last_id, current_id): + async def get_all_typing_updates( + self, last_id: int, current_id: int, limit: int + ) -> List[dict]: + """Get up to `limit` typing updates between the given tokens, earliest + updates first. + """ + if last_id == current_id: return [] @@ -275,7 +282,7 @@ class TypingHandler(object): typing = self._room_typing[room_id] rows.append((serial, room_id, list(typing))) rows.sort() - return rows + return rows[:limit] def get_current_token(self): return self._latest_room_serial diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ce9d1fae12..6e2ebaf614 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -166,11 +166,6 @@ class ReplicationStreamer(object): self.pending_updates = False with Measure(self.clock, "repl.stream.get_updates"): - # First we tell the streams that they should update their - # current tokens. - for stream in self.streams: - stream.advance_current_token() - all_streams = self.streams if self._replication_torture_level is not None: @@ -180,7 +175,7 @@ class ReplicationStreamer(object): random.shuffle(all_streams) for stream in all_streams: - if stream.last_token == stream.upto_token: + if stream.last_token == stream.current_token(): continue if self._replication_torture_level: @@ -192,7 +187,7 @@ class ReplicationStreamer(object): "Getting stream: %s: %s -> %s", stream.NAME, stream.last_token, - stream.upto_token, + stream.current_token(), ) try: updates, current_token = await stream.get_updates() diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 7a8b6e9df1..abf5c6c6a8 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -17,10 +17,12 @@ import itertools import logging from collections import namedtuple -from typing import Any, List, Optional +from typing import Any, List, Optional, Tuple import attr +from synapse.types import JsonDict + logger = logging.getLogger(__name__) @@ -119,13 +121,12 @@ class Stream(object): """Base class for the streams. Provides a `get_updates()` function that returns new updates since the last - time it was called up until the point `advance_current_token` was called. + time it was called. """ NAME = None # type: str # The name of the stream # The type of the row. Used by the default impl of parse_row. ROW_TYPE = None # type: Any - _LIMITED = True # Whether the update function takes a limit @classmethod def parse_row(cls, row): @@ -146,26 +147,15 @@ class Stream(object): # The token from which we last asked for updates self.last_token = self.current_token() - # The token that we will get updates up to - self.upto_token = self.current_token() - - def advance_current_token(self): - """Updates `upto_token` to "now", which updates up until which point - get_updates[_since] will fetch rows till. - """ - self.upto_token = self.current_token() - def discard_updates_and_advance(self): """Called when the stream should advance but the updates would be discarded, e.g. when there are no currently connected workers. """ - self.upto_token = self.current_token() - self.last_token = self.upto_token + self.last_token = self.current_token() async def get_updates(self): """Gets all updates since the last time this function was called (or - since the stream was constructed if it hadn't been called before), - until the `upto_token` + since the stream was constructed if it hadn't been called before). Returns: Deferred[Tuple[List[Tuple[int, Any]], int]: @@ -178,44 +168,45 @@ class Stream(object): return updates, current_token - async def get_updates_since(self, from_token): + async def get_updates_since( + self, from_token: int + ) -> Tuple[List[Tuple[int, JsonDict]], int]: """Like get_updates except allows specifying from when we should stream updates Returns: - Deferred[Tuple[List[Tuple[int, Any]], int]: - Resolves to a pair ``(updates, current_token)``, where ``updates`` is a - list of ``(token, row)`` entries. ``row`` will be json-serialised and - sent over the replication steam. + Resolves to a pair `(updates, new_last_token)`, where `updates` is + a list of `(token, row)` entries and `new_last_token` is the new + position in stream. """ - if from_token in ("NOW", "now"): - return [], self.upto_token - current_token = self.upto_token + if from_token in ("NOW", "now"): + return [], self.current_token() + + current_token = self.current_token() from_token = int(from_token) if from_token == current_token: return [], current_token - logger.info("get_updates_since: %s", self.__class__) - if self._LIMITED: - rows = await self.update_function( - from_token, current_token, limit=MAX_EVENTS_BEHIND + 1 - ) + rows = await self.update_function( + from_token, current_token, limit=MAX_EVENTS_BEHIND + 1 + ) - # never turn more than MAX_EVENTS_BEHIND + 1 into updates. - rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1) - else: - rows = await self.update_function(from_token, current_token) + # never turn more than MAX_EVENTS_BEHIND + 1 into updates. + rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1) updates = [(row[0], row[1:]) for row in rows] # check we didn't get more rows than the limit. # doing it like this allows the update_function to be a generator. - if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: + if len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) + # The update function didn't hit the limit, so we must have got all + # the updates to `current_token`, and can return that as our new + # stream position. return updates, current_token def current_token(self): @@ -227,9 +218,8 @@ class Stream(object): """ raise NotImplementedError() - def update_function(self, from_token, current_token, limit=None): - """Get updates between from_token and to_token. If Stream._LIMITED is - True then limit is provided, otherwise it's not. + def update_function(self, from_token, current_token, limit): + """Get updates between from_token and to_token. Returns: Deferred(list(tuple)): the first entry in the tuple is the token for @@ -257,7 +247,6 @@ class BackfillStream(Stream): class PresenceStream(Stream): NAME = "presence" - _LIMITED = False ROW_TYPE = PresenceStreamRow def __init__(self, hs): @@ -272,7 +261,6 @@ class PresenceStream(Stream): class TypingStream(Stream): NAME = "typing" - _LIMITED = False ROW_TYPE = TypingStreamRow def __init__(self, hs): @@ -372,7 +360,6 @@ class DeviceListsStream(Stream): """ NAME = "device_lists" - _LIMITED = False ROW_TYPE = DeviceListsStreamRow def __init__(self, hs): @@ -462,7 +449,6 @@ class UserSignatureStream(Stream): """ NAME = "user_signature" - _LIMITED = False ROW_TYPE = UserSignatureStreamRow def __init__(self, hs): diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 4c19c02bbc..2d47cfd131 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -576,7 +576,7 @@ class DeviceWorkerStore(SQLBaseStore): return set() async def get_all_device_list_changes_for_remotes( - self, from_key: int, to_key: int + self, from_key: int, to_key: int, limit: int, ) -> List[Tuple[int, str]]: """Return a list of `(stream_id, entity)` which is the combined list of changes to devices and which destinations need to be poked. Entity is @@ -592,10 +592,16 @@ class DeviceWorkerStore(SQLBaseStore): SELECT stream_id, destination AS entity FROM device_lists_outbound_pokes ) AS e WHERE ? < stream_id AND stream_id <= ? + LIMIT ? """ return await self.db.execute( - "get_all_device_list_changes_for_remotes", None, sql, from_key, to_key + "get_all_device_list_changes_for_remotes", + None, + sql, + from_key, + to_key, + limit, ) @cached(max_entries=10000) diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py index 001a53f9b4..bcf746b7ef 100644 --- a/synapse/storage/data_stores/main/end_to_end_keys.py +++ b/synapse/storage/data_stores/main/end_to_end_keys.py @@ -537,7 +537,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): return result - def get_all_user_signature_changes_for_remotes(self, from_key, to_key): + def get_all_user_signature_changes_for_remotes(self, from_key, to_key, limit): """Return a list of changes from the user signature stream to notify remotes. Note that the user signature stream represents when a user signs their device with their user-signing key, which is not published to other @@ -552,13 +552,19 @@ class EndToEndKeyWorkerStore(SQLBaseStore): Deferred[list[(int,str)]] a list of `(stream_id, user_id)` """ sql = """ - SELECT MAX(stream_id) AS stream_id, from_user_id AS user_id + SELECT stream_id, from_user_id AS user_id FROM user_signature_stream WHERE ? < stream_id AND stream_id <= ? - GROUP BY user_id + ORDER BY stream_id ASC + LIMIT ? """ return self.db.execute( - "get_all_user_signature_changes_for_remotes", None, sql, from_key, to_key + "get_all_user_signature_changes_for_remotes", + None, + sql, + from_key, + to_key, + limit, ) diff --git a/synapse/storage/data_stores/main/presence.py b/synapse/storage/data_stores/main/presence.py index 604c8b7ddd..dab31e0c2d 100644 --- a/synapse/storage/data_stores/main/presence.py +++ b/synapse/storage/data_stores/main/presence.py @@ -60,7 +60,7 @@ class PresenceStore(SQLBaseStore): "status_msg": state.status_msg, "currently_active": state.currently_active, } - for state in presence_states + for stream_id, state in zip(stream_orderings, presence_states) ], ) @@ -73,19 +73,22 @@ class PresenceStore(SQLBaseStore): ) txn.execute(sql + clause, [stream_id] + list(args)) - def get_all_presence_updates(self, last_id, current_id): + def get_all_presence_updates(self, last_id, current_id, limit): if last_id == current_id: return defer.succeed([]) def get_all_presence_updates_txn(txn): - sql = ( - "SELECT stream_id, user_id, state, last_active_ts," - " last_federation_update_ts, last_user_sync_ts, status_msg," - " currently_active" - " FROM presence_stream" - " WHERE ? < stream_id AND stream_id <= ?" - ) - txn.execute(sql, (last_id, current_id)) + sql = """ + SELECT stream_id, user_id, state, last_active_ts, + last_federation_update_ts, last_user_sync_ts, + status_msg, + currently_active + FROM presence_stream + WHERE ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC + LIMIT ? + """ + txn.execute(sql, (last_id, current_id, limit)) return txn.fetchall() return self.db.runInteraction( From c165c1233b8ef244fadca97c7d465fdcf473d077 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 20 Mar 2020 16:24:22 +0100 Subject: [PATCH 287/382] Improve database configuration docs (#6988) Attempts to clarify the sample config for databases, and add some stuff about tcp keepalives to `postgres.md`. --- changelog.d/6988.doc | 1 + docs/postgres.md | 42 ++++++++++++---- docs/sample_config.yaml | 43 ++++++++++++++-- synapse/config/_base.py | 2 - synapse/config/database.py | 93 ++++++++++++++++++++++------------- tests/config/test_database.py | 22 +-------- 6 files changed, 132 insertions(+), 71 deletions(-) create mode 100644 changelog.d/6988.doc diff --git a/changelog.d/6988.doc b/changelog.d/6988.doc new file mode 100644 index 0000000000..b6f71bb966 --- /dev/null +++ b/changelog.d/6988.doc @@ -0,0 +1 @@ +Improve the documentation for database configuration. diff --git a/docs/postgres.md b/docs/postgres.md index e0793ecee8..16a630c3d1 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -105,19 +105,41 @@ of free memory the database host has available. When you are ready to start using PostgreSQL, edit the `database` section in your config file to match the following lines: - database: - name: psycopg2 - args: - user: - password: - database: - host: - cp_min: 5 - cp_max: 10 +```yaml +database: + name: psycopg2 + args: + user: + password: + database: + host: + cp_min: 5 + cp_max: 10 +``` All key, values in `args` are passed to the `psycopg2.connect(..)` function, except keys beginning with `cp_`, which are consumed by the -twisted adbapi connection pool. +twisted adbapi connection pool. See the [libpq +documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS) +for a list of options which can be passed. + +You should consider tuning the `args.keepalives_*` options if there is any danger of +the connection between your homeserver and database dropping, otherwise Synapse +may block for an extended period while it waits for a response from the +database server. Example values might be: + +```yaml +# seconds of inactivity after which TCP should send a keepalive message to the server +keepalives_idle: 10 + +# the number of seconds after which a TCP keepalive message that is not +# acknowledged by the server should be retransmitted +keepalives_interval: 10 + +# the number of TCP keepalives that can be lost before the client's connection +# to the server is considered dead +keepalives_count: 3 +``` ## Porting from SQLite diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2ff0dd05a2..276e43b732 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -578,13 +578,46 @@ acme: ## Database ## +# The 'database' setting defines the database that synapse uses to store all of +# its data. +# +# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or +# 'psycopg2' (for PostgreSQL). +# +# 'args' gives options which are passed through to the database engine, +# except for options starting 'cp_', which are used to configure the Twisted +# connection pool. For a reference to valid arguments, see: +# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect +# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS +# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ +# +# +# Example SQLite configuration: +# +#database: +# name: sqlite3 +# args: +# database: /path/to/homeserver.db +# +# +# Example Postgres configuration: +# +#database: +# name: psycopg2 +# args: +# user: synapse +# password: secretpassword +# database: synapse +# host: localhost +# cp_min: 5 +# cp_max: 10 +# +# For more information on using Synapse with Postgres, see `docs/postgres.md`. +# database: - # The database engine name - name: "sqlite3" - # Arguments to pass to the engine + name: sqlite3 args: - # Path to the database - database: "DATADIR/homeserver.db" + database: DATADIR/homeserver.db # Number of events to cache in memory. # diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ba846042c4..efe2af5504 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -294,7 +294,6 @@ class RootConfig(object): report_stats=None, open_private_ports=False, listeners=None, - database_conf=None, tls_certificate_path=None, tls_private_key_path=None, acme_domain=None, @@ -367,7 +366,6 @@ class RootConfig(object): report_stats=report_stats, open_private_ports=open_private_ports, listeners=listeners, - database_conf=database_conf, tls_certificate_path=tls_certificate_path, tls_private_key_path=tls_private_key_path, acme_domain=acme_domain, diff --git a/synapse/config/database.py b/synapse/config/database.py index 219b32f670..b8ab2f86ac 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +15,60 @@ # limitations under the License. import logging import os -from textwrap import indent - -import yaml from synapse.config._base import Config, ConfigError logger = logging.getLogger(__name__) +DEFAULT_CONFIG = """\ +## Database ## + +# The 'database' setting defines the database that synapse uses to store all of +# its data. +# +# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or +# 'psycopg2' (for PostgreSQL). +# +# 'args' gives options which are passed through to the database engine, +# except for options starting 'cp_', which are used to configure the Twisted +# connection pool. For a reference to valid arguments, see: +# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect +# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS +# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ +# +# +# Example SQLite configuration: +# +#database: +# name: sqlite3 +# args: +# database: /path/to/homeserver.db +# +# +# Example Postgres configuration: +# +#database: +# name: psycopg2 +# args: +# user: synapse +# password: secretpassword +# database: synapse +# host: localhost +# cp_min: 5 +# cp_max: 10 +# +# For more information on using Synapse with Postgres, see `docs/postgres.md`. +# +database: + name: sqlite3 + args: + database: %(database_path)s + +# Number of events to cache in memory. +# +#event_cache_size: 10K +""" + class DatabaseConnectionConfig: """Contains the connection config for a particular database. @@ -36,10 +83,12 @@ class DatabaseConnectionConfig: """ def __init__(self, name: str, db_config: dict): - if db_config["name"] not in ("sqlite3", "psycopg2"): - raise ConfigError("Unsupported database type %r" % (db_config["name"],)) + db_engine = db_config.get("name", "sqlite3") - if db_config["name"] == "sqlite3": + if db_engine not in ("sqlite3", "psycopg2"): + raise ConfigError("Unsupported database type %r" % (db_engine,)) + + if db_engine == "sqlite3": db_config.setdefault("args", {}).update( {"cp_min": 1, "cp_max": 1, "check_same_thread": False} ) @@ -97,34 +146,10 @@ class DatabaseConfig(Config): self.set_databasepath(config.get("database_path")) - def generate_config_section(self, data_dir_path, database_conf, **kwargs): - if not database_conf: - database_path = os.path.join(data_dir_path, "homeserver.db") - database_conf = ( - """# The database engine name - name: "sqlite3" - # Arguments to pass to the engine - args: - # Path to the database - database: "%(database_path)s" - """ - % locals() - ) - else: - database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip() - - return ( - """\ - ## Database ## - - database: - %(database_conf)s - # Number of events to cache in memory. - # - #event_cache_size: 10K - """ - % locals() - ) + def generate_config_section(self, data_dir_path, **kwargs): + return DEFAULT_CONFIG % { + "database_path": os.path.join(data_dir_path, "homeserver.db") + } def read_arguments(self, args): self.set_databasepath(args.database_path) diff --git a/tests/config/test_database.py b/tests/config/test_database.py index 151d3006ac..f675bde68e 100644 --- a/tests/config/test_database.py +++ b/tests/config/test_database.py @@ -21,9 +21,9 @@ from tests import unittest class DatabaseConfigTestCase(unittest.TestCase): - def test_database_configured_correctly_no_database_conf_param(self): + def test_database_configured_correctly(self): conf = yaml.safe_load( - DatabaseConfig().generate_config_section("/data_dir_path", None) + DatabaseConfig().generate_config_section(data_dir_path="/data_dir_path") ) expected_database_conf = { @@ -32,21 +32,3 @@ class DatabaseConfigTestCase(unittest.TestCase): } self.assertEqual(conf["database"], expected_database_conf) - - def test_database_configured_correctly_database_conf_param(self): - - database_conf = { - "name": "my super fast datastore", - "args": { - "user": "matrix", - "password": "synapse_database_password", - "host": "synapse_database_host", - "database": "matrix", - }, - } - - conf = yaml.safe_load( - DatabaseConfig().generate_config_section("/data_dir_path", database_conf) - ) - - self.assertEqual(conf["database"], database_conf) From 477c4f5b1c2c7733d4b2cf578dc9aa8e048011b0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 20 Mar 2020 16:22:47 -0400 Subject: [PATCH 288/382] Clean-up some auth/login REST code (#7115) --- changelog.d/7115.misc | 1 + synapse/rest/client/v1/login.py | 8 ----- synapse/rest/client/v2_alpha/auth.py | 53 +++++++++++----------------- 3 files changed, 21 insertions(+), 41 deletions(-) create mode 100644 changelog.d/7115.misc diff --git a/changelog.d/7115.misc b/changelog.d/7115.misc new file mode 100644 index 0000000000..7d4a011e3e --- /dev/null +++ b/changelog.d/7115.misc @@ -0,0 +1 @@ +De-duplicate / remove unused REST code for login and auth. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index d0d4999795..31551524f8 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -28,7 +28,6 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) -from synapse.push.mailer import load_jinja2_templates from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import UserID, map_username_to_mxid_localpart @@ -548,13 +547,6 @@ class SSOAuthHandler(object): self._registration_handler = hs.get_registration_handler() self._macaroon_gen = hs.get_macaroon_generator() - # Load the redirect page HTML template - self._template = load_jinja2_templates( - hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"], - )[0] - - self._server_name = hs.config.server_name - # cast to tuple for use with str.startswith self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 50e080673b..85cf5a14c6 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -142,14 +142,6 @@ class AuthRestServlet(RestServlet): % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), "sitekey": self.hs.config.recaptcha_public_key, } - html_bytes = html.encode("utf8") - request.setResponseCode(200) - request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) - - request.write(html_bytes) - finish_request(request) - return None elif stagetype == LoginType.TERMS: html = TERMS_TEMPLATE % { "session": session, @@ -158,17 +150,19 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } - html_bytes = html.encode("utf8") - request.setResponseCode(200) - request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) - - request.write(html_bytes) - finish_request(request) - return None else: raise SynapseError(404, "Unknown auth stage type") + # Render the HTML and return. + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) + return None + async def on_POST(self, request, stagetype): session = parse_string(request, "session") @@ -196,15 +190,6 @@ class AuthRestServlet(RestServlet): % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), "sitekey": self.hs.config.recaptcha_public_key, } - html_bytes = html.encode("utf8") - request.setResponseCode(200) - request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) - - request.write(html_bytes) - finish_request(request) - - return None elif stagetype == LoginType.TERMS: authdict = {"session": session} @@ -225,17 +210,19 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } - html_bytes = html.encode("utf8") - request.setResponseCode(200) - request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) - - request.write(html_bytes) - finish_request(request) - return None else: raise SynapseError(404, "Unknown auth stage type") + # Render the HTML and return. + html_bytes = html.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) + return None + def on_OPTIONS(self, _): return 200, {} From 96071eea8f5e18282c07da3a61e4b3431f694cc5 Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Mon, 23 Mar 2020 11:48:28 +0200 Subject: [PATCH 289/382] Set Referrer-Policy to no-referrer for media (#7009) --- changelog.d/7009.feature | 1 + synapse/rest/media/v1/download_resource.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/7009.feature diff --git a/changelog.d/7009.feature b/changelog.d/7009.feature new file mode 100644 index 0000000000..cd2705d5ba --- /dev/null +++ b/changelog.d/7009.feature @@ -0,0 +1 @@ +Set `Referrer-Policy` header to `no-referrer` on media downloads. diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 66a01559e1..24d3ae5bbc 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -50,6 +50,9 @@ class DownloadResource(DirectServeResource): b" media-src 'self';" b" object-src 'self';", ) + request.setHeader( + b"Referrer-Policy", b"no-referrer", + ) server_name, media_id, name = parse_media_id(request) if server_name == self.server_name: await self.media_repo.get_local_media(request, media_id, name) From b3cee0ce670ada582b2a4b36c377f160c7ee1d09 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 23 Mar 2020 11:39:36 +0000 Subject: [PATCH 290/382] Fix processing of `groups` stream, and use symbolic names for streams (#7117) `groups` != `receipts` Introduced in #6964 --- changelog.d/7117.bugfix | 1 + synapse/app/generic_worker.py | 35 +++++++---- synapse/replication/tcp/streams/__init__.py | 70 +++++++++++++++------ 3 files changed, 76 insertions(+), 30 deletions(-) create mode 100644 changelog.d/7117.bugfix diff --git a/changelog.d/7117.bugfix b/changelog.d/7117.bugfix new file mode 100644 index 0000000000..1896d7ad49 --- /dev/null +++ b/changelog.d/7117.bugfix @@ -0,0 +1 @@ +Fix a bug which meant that groups updates were not correctly replicated between workers. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index cdc078cf11..136babe6ce 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -65,12 +65,23 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.replication.tcp.streams._base import ( +from synapse.replication.tcp.streams import ( + AccountDataStream, DeviceListsStream, + GroupServerStream, + PresenceStream, + PushersStream, + PushRulesStream, ReceiptsStream, + TagAccountDataStream, ToDeviceStream, + TypingStream, +) +from synapse.replication.tcp.streams.events import ( + EventsStream, + EventsStreamEventRow, + EventsStreamRow, ) -from synapse.replication.tcp.streams.events import EventsStreamEventRow, EventsStreamRow from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client.v1 import events from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet @@ -626,7 +637,7 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): if self.send_handler: self.send_handler.process_replication_rows(stream_name, token, rows) - if stream_name == "events": + if stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so # we don't need to optimise this for multiple rows. for row in rows: @@ -649,44 +660,44 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): ) await self.pusher_pool.on_new_notifications(token, token) - elif stream_name == "push_rules": + elif stream_name == PushRulesStream.NAME: self.notifier.on_new_event( "push_rules_key", token, users=[row.user_id for row in rows] ) - elif stream_name in ("account_data", "tag_account_data"): + elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): self.notifier.on_new_event( "account_data_key", token, users=[row.user_id for row in rows] ) - elif stream_name == "receipts": + elif stream_name == ReceiptsStream.NAME: self.notifier.on_new_event( "receipt_key", token, rooms=[row.room_id for row in rows] ) await self.pusher_pool.on_new_receipts( token, token, {row.room_id for row in rows} ) - elif stream_name == "typing": + elif stream_name == TypingStream.NAME: self.typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( "typing_key", token, rooms=[row.room_id for row in rows] ) - elif stream_name == "to_device": + elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: self.notifier.on_new_event("to_device_key", token, users=entities) - elif stream_name == "device_lists": + elif stream_name == DeviceListsStream.NAME: all_room_ids = set() for row in rows: if row.entity.startswith("@"): room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) - elif stream_name == "presence": + elif stream_name == PresenceStream.NAME: await self.presence_handler.process_replication_rows(token, rows) - elif stream_name == "receipts": + elif stream_name == GroupServerStream.NAME: self.notifier.on_new_event( "groups_key", token, users=[row.user_id for row in rows] ) - elif stream_name == "pushers": + elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index 5f52264e84..29199f5b46 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -24,27 +24,61 @@ Each stream is defined by the following information: current_token: The function that returns the current token for the stream update_function: The function that returns a list of updates between two tokens """ - -from . import _base, events, federation +from synapse.replication.tcp.streams._base import ( + AccountDataStream, + BackfillStream, + CachesStream, + DeviceListsStream, + GroupServerStream, + PresenceStream, + PublicRoomsStream, + PushersStream, + PushRulesStream, + ReceiptsStream, + TagAccountDataStream, + ToDeviceStream, + TypingStream, + UserSignatureStream, +) +from synapse.replication.tcp.streams.events import EventsStream +from synapse.replication.tcp.streams.federation import FederationStream STREAMS_MAP = { stream.NAME: stream for stream in ( - events.EventsStream, - _base.BackfillStream, - _base.PresenceStream, - _base.TypingStream, - _base.ReceiptsStream, - _base.PushRulesStream, - _base.PushersStream, - _base.CachesStream, - _base.PublicRoomsStream, - _base.DeviceListsStream, - _base.ToDeviceStream, - federation.FederationStream, - _base.TagAccountDataStream, - _base.AccountDataStream, - _base.GroupServerStream, - _base.UserSignatureStream, + EventsStream, + BackfillStream, + PresenceStream, + TypingStream, + ReceiptsStream, + PushRulesStream, + PushersStream, + CachesStream, + PublicRoomsStream, + DeviceListsStream, + ToDeviceStream, + FederationStream, + TagAccountDataStream, + AccountDataStream, + GroupServerStream, + UserSignatureStream, ) } + +__all__ = [ + "STREAMS_MAP", + "BackfillStream", + "PresenceStream", + "TypingStream", + "ReceiptsStream", + "PushRulesStream", + "PushersStream", + "CachesStream", + "PublicRoomsStream", + "DeviceListsStream", + "ToDeviceStream", + "TagAccountDataStream", + "AccountDataStream", + "GroupServerStream", + "UserSignatureStream", +] From 2fa55c0cc6396cab4ed74b450eb1a73b0a595ec6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Mar 2020 12:13:09 +0000 Subject: [PATCH 291/382] 1.12.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e3550497a4..9ba930e729 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.12.0 (2020-03-23) +=========================== + +No significant changes. + + Synapse 1.12.0rc1 (2020-03-19) ============================== diff --git a/debian/changelog b/debian/changelog index c39ea8f47f..39ec9da7ab 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.12.0) stable; urgency=medium + + * New synapse release 1.12.0. + + -- Synapse Packaging team Mon, 23 Mar 2020 12:13:03 +0000 + matrix-synapse-py3 (1.11.1) stable; urgency=medium * New synapse release 1.11.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 020e0536be..5b86008945 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.0rc1" +__version__ = "1.12.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From a438950a00170fb371054bc2d37373426bcbbc18 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Mar 2020 13:00:40 +0000 Subject: [PATCH 292/382] 1.12.0 changelog --- CHANGES.md | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9ba930e729..d94a802fa7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,59 @@ Synapse 1.12.0 (2020-03-23) =========================== -No significant changes. +No significant changes since 1.12.0rc1. + +Debian packages and Docker images are rebuilt using the letest versions of +dependency libraries, including Twisted 20.3.0. **Please see security advisory +below**. + +Security advisory +----------------- + +Synapse may be vulnerable to request-smuggling attacks when it is used with a +reverse-proxy. The vulnerabilties are fixed in Twisted 20.3.0, and are +described in +[CVE-2020-10108](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10108) +and +[CVE-2020-10109](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-10109). +For a good introduction to this class of request-smuggling attacks, see +https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn. + +We are not aware of these vulnerabilities being exploited in the world, and +do not believe that they are exploitable with current versions of any reverse +proxies. Nevertheless, we recommend that all Synapse administrators ensure that +they have the latest versions of the Twisted library to ensure that their +installation remains secore. + +* Administrators using the [`matrix.org` Docker + image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu + packages from + `matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages) + should ensure that they have version 1.12.0 installed: these images include + Twisted 20.3.0. +* Administrators who have [installed Synapse from + source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source) + should upgrade Twisted within their virtualenv by running: + ```sh + /bin/pip install 'Twisted>=20.3.0' + ``` +* Administrators who have installed Synapse from distribution packages should + consult the information from their distributions. + +Advance notice of change to the default `git` branch for Synapse +---------------------------------------------------------------- + +Currently, the default `git` branch for Synapse is `master`, which tracks the +latest release. + +After the release of Synapse 1.13.0, we intend to change this default to +`develop`, which is the development tip. This is more consistent with common +practice and modern `git` usage. + +Although we try to keep `develop` in a stable state, there may be occasions +where regressions keep in. Developers and distributors who have scripts which +run builds using the default branch of `Synapse` should therefore consider +pinning their scripts to `master`. Synapse 1.12.0rc1 (2020-03-19) From 56b5f1d0eebb0e414badf36deed83542bbf296d1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Mar 2020 13:23:21 +0000 Subject: [PATCH 293/382] changelog typos --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d94a802fa7..3b66006072 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.12.0 (2020-03-23) No significant changes since 1.12.0rc1. -Debian packages and Docker images are rebuilt using the letest versions of +Debian packages and Docker images are rebuilt using the latest versions of dependency libraries, including Twisted 20.3.0. **Please see security advisory below**. @@ -23,7 +23,7 @@ We are not aware of these vulnerabilities being exploited in the world, and do not believe that they are exploitable with current versions of any reverse proxies. Nevertheless, we recommend that all Synapse administrators ensure that they have the latest versions of the Twisted library to ensure that their -installation remains secore. +installation remains secure. * Administrators using the [`matrix.org` Docker image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu @@ -51,7 +51,7 @@ After the release of Synapse 1.13.0, we intend to change this default to practice and modern `git` usage. Although we try to keep `develop` in a stable state, there may be occasions -where regressions keep in. Developers and distributors who have scripts which +where regressions creep in. Developers and distributors who have scripts which run builds using the default branch of `Synapse` should therefore consider pinning their scripts to `master`. From 066804f5916289d6d62cf94dfb1eb09438ce7a2a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 23 Mar 2020 13:36:16 +0000 Subject: [PATCH 294/382] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 3b66006072..076b046d23 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,7 +19,7 @@ and For a good introduction to this class of request-smuggling attacks, see https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn. -We are not aware of these vulnerabilities being exploited in the world, and +We are not aware of these vulnerabilities being exploited in the wild, and do not believe that they are exploitable with current versions of any reverse proxies. Nevertheless, we recommend that all Synapse administrators ensure that they have the latest versions of the Twisted library to ensure that their From 88bb6c27e1ddf67ba8620eb1d856b113214e3507 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 Mar 2020 13:37:52 +0000 Subject: [PATCH 295/382] matrix.org was fine --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 076b046d23..f794c585b7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -40,6 +40,8 @@ installation remains secure. * Administrators who have installed Synapse from distribution packages should consult the information from their distributions. +The `matrix.org` Synapse instance was not vulnerable to these vulnerabilities. + Advance notice of change to the default `git` branch for Synapse ---------------------------------------------------------------- From a564b92d37625855940fe599c730a9958c33f973 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 23 Mar 2020 13:59:11 +0000 Subject: [PATCH 296/382] Convert `*StreamRow` classes to inner classes (#7116) This just helps keep the rows closer to their streams, so that it's easier to see what the format of each stream is. --- changelog.d/7116.misc | 1 + synapse/app/generic_worker.py | 2 +- synapse/federation/send_queue.py | 2 +- synapse/replication/tcp/streams/_base.py | 181 +++++++++--------- synapse/replication/tcp/streams/federation.py | 16 +- .../replication/tcp/streams/test_receipts.py | 4 +- 6 files changed, 106 insertions(+), 100 deletions(-) create mode 100644 changelog.d/7116.misc diff --git a/changelog.d/7116.misc b/changelog.d/7116.misc new file mode 100644 index 0000000000..89d90bd49e --- /dev/null +++ b/changelog.d/7116.misc @@ -0,0 +1 @@ +Convert `*StreamRow` classes to inner classes. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 136babe6ce..c8fd8909a4 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -804,7 +804,7 @@ class FederationSenderHandler(object): async def _on_new_receipts(self, rows): """ Args: - rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]): + rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): new receipts to be processed """ for receipt in rows: diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 876fb0e245..e1700ca8aa 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -477,7 +477,7 @@ def process_rows_for_federation(transaction_queue, rows): Args: transaction_queue (FederationSender) - rows (list(synapse.replication.tcp.streams.FederationStreamRow)) + rows (list(synapse.replication.tcp.streams.federation.FederationStream.FederationStreamRow)) """ # The federation stream contains a bunch of different types of diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index abf5c6c6a8..32d9514883 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -28,94 +28,6 @@ logger = logging.getLogger(__name__) MAX_EVENTS_BEHIND = 500000 -BackfillStreamRow = namedtuple( - "BackfillStreamRow", - ( - "event_id", # str - "room_id", # str - "type", # str - "state_key", # str, optional - "redacts", # str, optional - "relates_to", # str, optional - ), -) -PresenceStreamRow = namedtuple( - "PresenceStreamRow", - ( - "user_id", # str - "state", # str - "last_active_ts", # int - "last_federation_update_ts", # int - "last_user_sync_ts", # int - "status_msg", # str - "currently_active", # bool - ), -) -TypingStreamRow = namedtuple( - "TypingStreamRow", ("room_id", "user_ids") # str # list(str) -) -ReceiptsStreamRow = namedtuple( - "ReceiptsStreamRow", - ( - "room_id", # str - "receipt_type", # str - "user_id", # str - "event_id", # str - "data", # dict - ), -) -PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",)) # str -PushersStreamRow = namedtuple( - "PushersStreamRow", - ("user_id", "app_id", "pushkey", "deleted"), # str # str # str # bool -) - - -@attr.s -class CachesStreamRow: - """Stream to inform workers they should invalidate their cache. - - Attributes: - cache_func: Name of the cached function. - keys: The entry in the cache to invalidate. If None then will - invalidate all. - invalidation_ts: Timestamp of when the invalidation took place. - """ - - cache_func = attr.ib(type=str) - keys = attr.ib(type=Optional[List[Any]]) - invalidation_ts = attr.ib(type=int) - - -PublicRoomsStreamRow = namedtuple( - "PublicRoomsStreamRow", - ( - "room_id", # str - "visibility", # str - "appservice_id", # str, optional - "network_id", # str, optional - ), -) - - -@attr.s -class DeviceListsStreamRow: - entity = attr.ib(type=str) - - -ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",)) # str -TagAccountDataStreamRow = namedtuple( - "TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict -) -AccountDataStreamRow = namedtuple( - "AccountDataStream", ("user_id", "room_id", "data_type") # str # str # str -) -GroupsStreamRow = namedtuple( - "GroupsStreamRow", - ("group_id", "user_id", "type", "content"), # str # str # str # dict -) -UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str - class Stream(object): """Base class for the streams. @@ -234,6 +146,18 @@ class BackfillStream(Stream): or it went from being an outlier to not. """ + BackfillStreamRow = namedtuple( + "BackfillStreamRow", + ( + "event_id", # str + "room_id", # str + "type", # str + "state_key", # str, optional + "redacts", # str, optional + "relates_to", # str, optional + ), + ) + NAME = "backfill" ROW_TYPE = BackfillStreamRow @@ -246,6 +170,19 @@ class BackfillStream(Stream): class PresenceStream(Stream): + PresenceStreamRow = namedtuple( + "PresenceStreamRow", + ( + "user_id", # str + "state", # str + "last_active_ts", # int + "last_federation_update_ts", # int + "last_user_sync_ts", # int + "status_msg", # str + "currently_active", # bool + ), + ) + NAME = "presence" ROW_TYPE = PresenceStreamRow @@ -260,6 +197,10 @@ class PresenceStream(Stream): class TypingStream(Stream): + TypingStreamRow = namedtuple( + "TypingStreamRow", ("room_id", "user_ids") # str # list(str) + ) + NAME = "typing" ROW_TYPE = TypingStreamRow @@ -273,6 +214,17 @@ class TypingStream(Stream): class ReceiptsStream(Stream): + ReceiptsStreamRow = namedtuple( + "ReceiptsStreamRow", + ( + "room_id", # str + "receipt_type", # str + "user_id", # str + "event_id", # str + "data", # dict + ), + ) + NAME = "receipts" ROW_TYPE = ReceiptsStreamRow @@ -289,6 +241,8 @@ class PushRulesStream(Stream): """A user has changed their push rules """ + PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",)) # str + NAME = "push_rules" ROW_TYPE = PushRulesStreamRow @@ -309,6 +263,11 @@ class PushersStream(Stream): """A user has added/changed/removed a pusher """ + PushersStreamRow = namedtuple( + "PushersStreamRow", + ("user_id", "app_id", "pushkey", "deleted"), # str # str # str # bool + ) + NAME = "pushers" ROW_TYPE = PushersStreamRow @@ -326,6 +285,21 @@ class CachesStream(Stream): the cache on the workers """ + @attr.s + class CachesStreamRow: + """Stream to inform workers they should invalidate their cache. + + Attributes: + cache_func: Name of the cached function. + keys: The entry in the cache to invalidate. If None then will + invalidate all. + invalidation_ts: Timestamp of when the invalidation took place. + """ + + cache_func = attr.ib(type=str) + keys = attr.ib(type=Optional[List[Any]]) + invalidation_ts = attr.ib(type=int) + NAME = "caches" ROW_TYPE = CachesStreamRow @@ -342,6 +316,16 @@ class PublicRoomsStream(Stream): """The public rooms list changed """ + PublicRoomsStreamRow = namedtuple( + "PublicRoomsStreamRow", + ( + "room_id", # str + "visibility", # str + "appservice_id", # str, optional + "network_id", # str, optional + ), + ) + NAME = "public_rooms" ROW_TYPE = PublicRoomsStreamRow @@ -359,6 +343,10 @@ class DeviceListsStream(Stream): told about a device update. """ + @attr.s + class DeviceListsStreamRow: + entity = attr.ib(type=str) + NAME = "device_lists" ROW_TYPE = DeviceListsStreamRow @@ -375,6 +363,8 @@ class ToDeviceStream(Stream): """New to_device messages for a client """ + ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",)) # str + NAME = "to_device" ROW_TYPE = ToDeviceStreamRow @@ -391,6 +381,10 @@ class TagAccountDataStream(Stream): """Someone added/removed a tag for a room """ + TagAccountDataStreamRow = namedtuple( + "TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict + ) + NAME = "tag_account_data" ROW_TYPE = TagAccountDataStreamRow @@ -407,6 +401,10 @@ class AccountDataStream(Stream): """Global or per room account data was changed """ + AccountDataStreamRow = namedtuple( + "AccountDataStream", ("user_id", "room_id", "data_type") # str # str # str + ) + NAME = "account_data" ROW_TYPE = AccountDataStreamRow @@ -432,6 +430,11 @@ class AccountDataStream(Stream): class GroupServerStream(Stream): + GroupsStreamRow = namedtuple( + "GroupsStreamRow", + ("group_id", "user_id", "type", "content"), # str # str # str # dict + ) + NAME = "groups" ROW_TYPE = GroupsStreamRow @@ -448,6 +451,8 @@ class UserSignatureStream(Stream): """A user has signed their own device with their user-signing key """ + UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str + NAME = "user_signature" ROW_TYPE = UserSignatureStreamRow diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 615f3dc9ac..f5f9336430 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -17,20 +17,20 @@ from collections import namedtuple from ._base import Stream -FederationStreamRow = namedtuple( - "FederationStreamRow", - ( - "type", # str, the type of data as defined in the BaseFederationRows - "data", # dict, serialization of a federation.send_queue.BaseFederationRow - ), -) - class FederationStream(Stream): """Data to be sent over federation. Only available when master has federation sending disabled. """ + FederationStreamRow = namedtuple( + "FederationStreamRow", + ( + "type", # str, the type of data as defined in the BaseFederationRows + "data", # dict, serialization of a federation.send_queue.BaseFederationRow + ), + ) + NAME = "federation" ROW_TYPE = FederationStreamRow diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index d5a99f6caa..fa2493cad6 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.replication.tcp.streams._base import ReceiptsStreamRow +from synapse.replication.tcp.streams._base import ReceiptsStream from tests.replication.tcp.streams._base import BaseStreamTestCase @@ -38,7 +38,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase): rdata_rows = self.test_handler.received_rdata_rows self.assertEqual(1, len(rdata_rows)) self.assertEqual(rdata_rows[0][0], "receipts") - row = rdata_rows[0][2] # type: ReceiptsStreamRow + row = rdata_rows[0][2] # type: ReceiptsStream.ReceiptsStreamRow self.assertEqual(ROOM_ID, row.room_id) self.assertEqual("m.read", row.receipt_type) self.assertEqual(USER_ID, row.user_id) From e341518f92132ad0b71a826857146b0bd2e56d6b Mon Sep 17 00:00:00 2001 From: "Kartikaya Gupta (kats)" Date: Mon, 23 Mar 2020 11:31:02 -0400 Subject: [PATCH 297/382] Update pre-built package name for FreeBSD (#7107). (#7107) Signed-off-by: Kartikaya Gupta --- INSTALL.md | 2 +- changelog.d/7107.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7107.doc diff --git a/INSTALL.md b/INSTALL.md index c0926ba590..f9e13b4cf6 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -383,7 +383,7 @@ Synapse can be found in the void repositories as 'synapse': Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from: - Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean` - - Packages: `pkg install py27-matrix-synapse` + - Packages: `pkg install py37-matrix-synapse` ### NixOS diff --git a/changelog.d/7107.doc b/changelog.d/7107.doc new file mode 100644 index 0000000000..f6da32d406 --- /dev/null +++ b/changelog.d/7107.doc @@ -0,0 +1 @@ +Update pre-built package name for FreeBSD. From 190ab593b7a2c0d79569758c0faa4d2442bc2c5f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 23 Mar 2020 15:21:54 -0400 Subject: [PATCH 298/382] Use the proper error code when a canonical alias that does not exist is used. (#7109) --- changelog.d/7109.bugfix | 1 + synapse/handlers/message.py | 57 ++++++++++++++++++++++++------------- 2 files changed, 39 insertions(+), 19 deletions(-) create mode 100644 changelog.d/7109.bugfix diff --git a/changelog.d/7109.bugfix b/changelog.d/7109.bugfix new file mode 100644 index 0000000000..268de9978e --- /dev/null +++ b/changelog.d/7109.bugfix @@ -0,0 +1 @@ +Return the proper error (M_BAD_ALIAS) when a non-existant canonical alias is provided. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b743fc2dcc..522271eed1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -851,6 +851,38 @@ class EventCreationHandler(object): self.store.remove_push_actions_from_staging, event.event_id ) + @defer.inlineCallbacks + def _validate_canonical_alias( + self, directory_handler, room_alias_str, expected_room_id + ): + """ + Ensure that the given room alias points to the expected room ID. + + Args: + directory_handler: The directory handler object. + room_alias_str: The room alias to check. + expected_room_id: The room ID that the alias should point to. + """ + room_alias = RoomAlias.from_string(room_alias_str) + try: + mapping = yield directory_handler.get_association(room_alias) + except SynapseError as e: + # Turn M_NOT_FOUND errors into M_BAD_ALIAS errors. + if e.errcode == Codes.NOT_FOUND: + raise SynapseError( + 400, + "Room alias %s does not point to the room" % (room_alias_str,), + Codes.BAD_ALIAS, + ) + raise + + if mapping["room_id"] != expected_room_id: + raise SynapseError( + 400, + "Room alias %s does not point to the room" % (room_alias_str,), + Codes.BAD_ALIAS, + ) + @defer.inlineCallbacks def persist_and_notify_client_event( self, requester, event, context, ratelimit=True, extra_users=[] @@ -905,15 +937,9 @@ class EventCreationHandler(object): room_alias_str = event.content.get("alias", None) directory_handler = self.hs.get_handlers().directory_handler if room_alias_str and room_alias_str != original_alias: - room_alias = RoomAlias.from_string(room_alias_str) - mapping = yield directory_handler.get_association(room_alias) - - if mapping["room_id"] != event.room_id: - raise SynapseError( - 400, - "Room alias %s does not point to the room" % (room_alias_str,), - Codes.BAD_ALIAS, - ) + yield self._validate_canonical_alias( + directory_handler, room_alias_str, event.room_id + ) # Check that alt_aliases is the proper form. alt_aliases = event.content.get("alt_aliases", []) @@ -931,16 +957,9 @@ class EventCreationHandler(object): new_alt_aliases = set(alt_aliases) - set(original_alt_aliases) if new_alt_aliases: for alias_str in new_alt_aliases: - room_alias = RoomAlias.from_string(alias_str) - mapping = yield directory_handler.get_association(room_alias) - - if mapping["room_id"] != event.room_id: - raise SynapseError( - 400, - "Room alias %s does not point to the room" - % (room_alias_str,), - Codes.BAD_ALIAS, - ) + yield self._validate_canonical_alias( + directory_handler, alias_str, event.room_id + ) federation_handler = self.hs.get_handlers().federation_handler From c816072d47c16d2840116418698d95a855f0f24c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Mar 2020 10:35:00 +0000 Subject: [PATCH 299/382] Fix starting workers when federation sending not split out. --- synapse/app/generic_worker.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index b2c764bfe8..5363642d64 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -860,6 +860,9 @@ def start(config_options): # Force the appservice to start since they will be disabled in the main config config.notify_appservices = True + else: + # For other worker types we force this to off. + config.notify_appservices = False if config.worker_app == "synapse.app.pusher": if config.start_pushers: @@ -873,6 +876,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.start_pushers = True + else: + # For other worker types we force this to off. + config.start_pushers = False if config.worker_app == "synapse.app.user_dir": if config.update_user_directory: @@ -886,6 +892,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.update_user_directory = True + else: + # For other worker types we force this to off. + config.update_user_directory = False if config.worker_app == "synapse.app.federation_sender": if config.send_federation: @@ -899,6 +908,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.send_federation = True + else: + # For other worker types we force this to off. + config.send_federation = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts From d6828c129ffa5bbdd8bd0ed620772f77be45c006 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Mar 2020 10:36:44 +0000 Subject: [PATCH 300/382] Newsfile --- changelog.d/7133.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7133.bugfix diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix new file mode 100644 index 0000000000..61a86fd34e --- /dev/null +++ b/changelog.d/7133.bugfix @@ -0,0 +1 @@ +Fix starting workers when federation sending not split out. From 1fcf9c6f95fcfcacab95bb78849d79b8c7fa22e9 Mon Sep 17 00:00:00 2001 From: Naugrimm Date: Tue, 24 Mar 2020 12:59:04 +0100 Subject: [PATCH 301/382] Fix CAS redirect url (#6634) Build the same service URL when requesting the CAS ticket and when calling the proxyValidate URL. --- changelog.d/6634.bugfix | 1 + synapse/rest/client/v1/login.py | 27 ++++++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) create mode 100644 changelog.d/6634.bugfix diff --git a/changelog.d/6634.bugfix b/changelog.d/6634.bugfix new file mode 100644 index 0000000000..ec48fdc0a0 --- /dev/null +++ b/changelog.d/6634.bugfix @@ -0,0 +1 @@ +Fix single-sign on with CAS systems: pass the same service URL when requesting the CAS ticket and when calling the `proxyValidate` URL. Contributed by @Naugrimm. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 31551524f8..56d713462a 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -72,6 +72,14 @@ def login_id_thirdparty_from_phone(identifier): return {"type": "m.id.thirdparty", "medium": "msisdn", "address": msisdn} +def build_service_param(cas_service_url, client_redirect_url): + return "%s%s?redirectUrl=%s" % ( + cas_service_url, + "/_matrix/client/r0/login/cas/ticket", + urllib.parse.quote(client_redirect_url, safe=""), + ) + + class LoginRestServlet(RestServlet): PATTERNS = client_patterns("/login$", v1=True) CAS_TYPE = "m.login.cas" @@ -427,18 +435,15 @@ class BaseSSORedirectServlet(RestServlet): class CasRedirectServlet(BaseSSORedirectServlet): def __init__(self, hs): super(CasRedirectServlet, self).__init__() - self.cas_server_url = hs.config.cas_server_url.encode("ascii") - self.cas_service_url = hs.config.cas_service_url.encode("ascii") + self.cas_server_url = hs.config.cas_server_url + self.cas_service_url = hs.config.cas_service_url def get_sso_url(self, client_redirect_url): - client_redirect_url_param = urllib.parse.urlencode( - {b"redirectUrl": client_redirect_url} - ).encode("ascii") - hs_redirect_url = self.cas_service_url + b"/_matrix/client/r0/login/cas/ticket" - service_param = urllib.parse.urlencode( - {b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param)} - ).encode("ascii") - return b"%s/login?%s" % (self.cas_server_url, service_param) + args = urllib.parse.urlencode( + {"service": build_service_param(self.cas_service_url, client_redirect_url)} + ) + + return "%s/login?%s" % (self.cas_server_url, args) class CasTicketServlet(RestServlet): @@ -458,7 +463,7 @@ class CasTicketServlet(RestServlet): uri = self.cas_server_url + "/proxyValidate" args = { "ticket": parse_string(request, "ticket", required=True), - "service": self.cas_service_url, + "service": build_service_param(self.cas_service_url, client_redirect_url), } try: body = await self._http_client.get_raw(uri, args) From 39230d217104f3cd7aba9065dc478f935ce1e614 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 24 Mar 2020 14:45:33 +0000 Subject: [PATCH 302/382] Clean up some LoggingContext stuff (#7120) * Pull Sentinel out of LoggingContext ... and drop a few unnecessary references to it * Factor out LoggingContext.current_context move `current_context` and `set_context` out to top-level functions. Mostly this means that I can more easily trace what's actually referring to LoggingContext, but I think it's generally neater. * move copy-to-parent into `stop` this really just makes `start` and `stop` more symetric. It also means that it behaves correctly if you manually `set_log_context` rather than using the context manager. * Replace `LoggingContext.alive` with `finished` Turn `alive` into `finished` and make it a bit better defined. --- changelog.d/7120.misc | 1 + docs/log_contexts.md | 5 +- synapse/crypto/keyring.py | 4 +- synapse/federation/federation_base.py | 4 +- synapse/handlers/sync.py | 4 +- synapse/http/request_metrics.py | 6 +- synapse/logging/_structured.py | 4 +- synapse/logging/context.py | 234 +++++++++--------- synapse/logging/scopecontextmanager.py | 13 +- .../storage/data_stores/main/events_worker.py | 4 +- synapse/storage/database.py | 11 +- synapse/util/metrics.py | 4 +- synapse/util/patch_inline_callbacks.py | 36 +-- tests/crypto/test_keyring.py | 7 +- .../test_matrix_federation_agent.py | 6 +- tests/http/federation/test_srv_resolver.py | 6 +- tests/http/test_fedclient.py | 6 +- tests/rest/client/test_transactions.py | 16 +- tests/unittest.py | 12 +- tests/util/caches/test_descriptors.py | 22 +- tests/util/test_async_utils.py | 15 +- tests/util/test_linearizer.py | 6 +- tests/util/test_logcontext.py | 22 +- tests/utils.py | 6 +- 24 files changed, 232 insertions(+), 222 deletions(-) create mode 100644 changelog.d/7120.misc diff --git a/changelog.d/7120.misc b/changelog.d/7120.misc new file mode 100644 index 0000000000..731f4dcb52 --- /dev/null +++ b/changelog.d/7120.misc @@ -0,0 +1 @@ +Clean up some LoggingContext code. diff --git a/docs/log_contexts.md b/docs/log_contexts.md index 5331e8c88b..fe30ca2791 100644 --- a/docs/log_contexts.md +++ b/docs/log_contexts.md @@ -29,14 +29,13 @@ from synapse.logging import context # omitted from future snippets def handle_request(request_id): request_context = context.LoggingContext() - calling_context = context.LoggingContext.current_context() - context.LoggingContext.set_current_context(request_context) + calling_context = context.set_current_context(request_context) try: request_context.request = request_id do_request_handling() logger.debug("finished") finally: - context.LoggingContext.set_current_context(calling_context) + context.set_current_context(calling_context) def do_request_handling(): logger.debug("phew") # this will be logged against request_id diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 983f0ead8c..a9f4025bfe 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -43,8 +43,8 @@ from synapse.api.errors import ( SynapseError, ) from synapse.logging.context import ( - LoggingContext, PreserveLoggingContext, + current_context, make_deferred_yieldable, preserve_fn, run_in_background, @@ -236,7 +236,7 @@ class Keyring(object): """ try: - ctx = LoggingContext.current_context() + ctx = current_context() # map from server name to a set of outstanding request ids server_to_request_ids = {} diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index b0b0eba41e..4b115aac04 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -32,8 +32,8 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( - LoggingContext, PreserveLoggingContext, + current_context, make_deferred_yieldable, ) from synapse.types import JsonDict, get_domain_from_id @@ -78,7 +78,7 @@ class FederationBase(object): """ deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus) - ctx = LoggingContext.current_context() + ctx = current_context() def callback(_, pdu: EventBase): with PreserveLoggingContext(ctx): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 669dbc8a48..5746fdea14 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -26,7 +26,7 @@ from prometheus_client import Counter from synapse.api.constants import EventTypes, Membership from synapse.api.filtering import FilterCollection from synapse.events import EventBase -from synapse.logging.context import LoggingContext +from synapse.logging.context import current_context from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter @@ -301,7 +301,7 @@ class SyncHandler(object): else: sync_type = "incremental_sync" - context = LoggingContext.current_context() + context = current_context() if context: context.tag = sync_type diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 58f9cc61c8..b58ae3d9db 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -19,7 +19,7 @@ import threading from prometheus_client.core import Counter, Histogram -from synapse.logging.context import LoggingContext +from synapse.logging.context import current_context from synapse.metrics import LaterGauge logger = logging.getLogger(__name__) @@ -148,7 +148,7 @@ LaterGauge( class RequestMetrics(object): def start(self, time_sec, name, method): self.start = time_sec - self.start_context = LoggingContext.current_context() + self.start_context = current_context() self.name = name self.method = method @@ -163,7 +163,7 @@ class RequestMetrics(object): with _in_flight_requests_lock: _in_flight_requests.discard(self) - context = LoggingContext.current_context() + context = current_context() tag = "" if context: diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index ffa7b20ca8..7372450b45 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -42,7 +42,7 @@ from synapse.logging._terse_json import ( TerseJSONToConsoleLogObserver, TerseJSONToTCPLogObserver, ) -from synapse.logging.context import LoggingContext +from synapse.logging.context import current_context def stdlib_log_level_to_twisted(level: str) -> LogLevel: @@ -86,7 +86,7 @@ class LogContextObserver(object): ].startswith("Timing out client"): return - context = LoggingContext.current_context() + context = current_context() # Copy the context information to the log event. if context is not None: diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 860b99a4c6..a8eafb1c7c 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -175,7 +175,54 @@ class ContextResourceUsage(object): return res -LoggingContextOrSentinel = Union["LoggingContext", "LoggingContext.Sentinel"] +LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"] + + +class _Sentinel(object): + """Sentinel to represent the root context""" + + __slots__ = ["previous_context", "finished", "request", "scope", "tag"] + + def __init__(self) -> None: + # Minimal set for compatibility with LoggingContext + self.previous_context = None + self.finished = False + self.request = None + self.scope = None + self.tag = None + + def __str__(self): + return "sentinel" + + def copy_to(self, record): + pass + + def copy_to_twisted_log_entry(self, record): + record["request"] = None + record["scope"] = None + + def start(self): + pass + + def stop(self): + pass + + def add_database_transaction(self, duration_sec): + pass + + def add_database_scheduled(self, sched_sec): + pass + + def record_event_fetch(self, event_count): + pass + + def __nonzero__(self): + return False + + __bool__ = __nonzero__ # python3 + + +SENTINEL_CONTEXT = _Sentinel() class LoggingContext(object): @@ -199,76 +246,33 @@ class LoggingContext(object): "_resource_usage", "usage_start", "main_thread", - "alive", + "finished", "request", "tag", "scope", ] - thread_local = threading.local() - - class Sentinel(object): - """Sentinel to represent the root context""" - - __slots__ = ["previous_context", "alive", "request", "scope", "tag"] - - def __init__(self) -> None: - # Minimal set for compatibility with LoggingContext - self.previous_context = None - self.alive = None - self.request = None - self.scope = None - self.tag = None - - def __str__(self): - return "sentinel" - - def copy_to(self, record): - pass - - def copy_to_twisted_log_entry(self, record): - record["request"] = None - record["scope"] = None - - def start(self): - pass - - def stop(self): - pass - - def add_database_transaction(self, duration_sec): - pass - - def add_database_scheduled(self, sched_sec): - pass - - def record_event_fetch(self, event_count): - pass - - def __nonzero__(self): - return False - - __bool__ = __nonzero__ # python3 - - sentinel = Sentinel() - def __init__(self, name=None, parent_context=None, request=None) -> None: - self.previous_context = LoggingContext.current_context() + self.previous_context = current_context() self.name = name # track the resources used by this context so far self._resource_usage = ContextResourceUsage() - # If alive has the thread resource usage when the logcontext last - # became active. + # The thread resource usage when the logcontext became active. None + # if the context is not currently active. self.usage_start = None self.main_thread = get_thread_id() self.request = None self.tag = "" - self.alive = True self.scope = None # type: Optional[_LogContextScope] + # keep track of whether we have hit the __exit__ block for this context + # (suggesting that the the thing that created the context thinks it should + # be finished, and that re-activating it would suggest an error). + self.finished = False + self.parent_context = parent_context if self.parent_context is not None: @@ -283,44 +287,15 @@ class LoggingContext(object): return str(self.request) return "%s@%x" % (self.name, id(self)) - @classmethod - def current_context(cls) -> LoggingContextOrSentinel: - """Get the current logging context from thread local storage - - Returns: - LoggingContext: the current logging context - """ - return getattr(cls.thread_local, "current_context", cls.sentinel) - - @classmethod - def set_current_context( - cls, context: LoggingContextOrSentinel - ) -> LoggingContextOrSentinel: - """Set the current logging context in thread local storage - Args: - context(LoggingContext): The context to activate. - Returns: - The context that was previously active - """ - current = cls.current_context() - - if current is not context: - current.stop() - cls.thread_local.current_context = context - context.start() - return current - def __enter__(self) -> "LoggingContext": """Enters this logging context into thread local storage""" - old_context = self.set_current_context(self) + old_context = set_current_context(self) if self.previous_context != old_context: logger.warning( "Expected previous context %r, found %r", self.previous_context, old_context, ) - self.alive = True - return self def __exit__(self, type, value, traceback) -> None: @@ -329,24 +304,19 @@ class LoggingContext(object): Returns: None to avoid suppressing any exceptions that were thrown. """ - current = self.set_current_context(self.previous_context) + current = set_current_context(self.previous_context) if current is not self: - if current is self.sentinel: + if current is SENTINEL_CONTEXT: logger.warning("Expected logging context %s was lost", self) else: logger.warning( "Expected logging context %s but found %s", self, current ) - self.alive = False - # if we have a parent, pass our CPU usage stats on - if self.parent_context is not None and hasattr( - self.parent_context, "_resource_usage" - ): - self.parent_context._resource_usage += self._resource_usage - - # reset them in case we get entered again - self._resource_usage.reset() + # the fact that we are here suggests that the caller thinks that everything + # is done and dusted for this logcontext, and further activity will not get + # recorded against the correct metrics. + self.finished = True def copy_to(self, record) -> None: """Copy logging fields from this context to a log record or @@ -371,9 +341,14 @@ class LoggingContext(object): logger.warning("Started logcontext %s on different thread", self) return + if self.finished: + logger.warning("Re-starting finished log context %s", self) + # If we haven't already started record the thread resource usage so # far - if not self.usage_start: + if self.usage_start: + logger.warning("Re-starting already-active log context %s", self) + else: self.usage_start = get_thread_resource_usage() def stop(self) -> None: @@ -396,6 +371,15 @@ class LoggingContext(object): self.usage_start = None + # if we have a parent, pass our CPU usage stats on + if self.parent_context is not None and hasattr( + self.parent_context, "_resource_usage" + ): + self.parent_context._resource_usage += self._resource_usage + + # reset them in case we get entered again + self._resource_usage.reset() + def get_resource_usage(self) -> ContextResourceUsage: """Get resources used by this logcontext so far. @@ -409,7 +393,7 @@ class LoggingContext(object): # If we are on the correct thread and we're currently running then we # can include resource usage so far. is_main_thread = get_thread_id() == self.main_thread - if self.alive and self.usage_start and is_main_thread: + if self.usage_start and is_main_thread: utime_delta, stime_delta = self._get_cputime() res.ru_utime += utime_delta res.ru_stime += stime_delta @@ -492,7 +476,7 @@ class LoggingContextFilter(logging.Filter): Returns: True to include the record in the log output. """ - context = LoggingContext.current_context() + context = current_context() for key, value in self.defaults.items(): setattr(record, key, value) @@ -512,27 +496,24 @@ class PreserveLoggingContext(object): __slots__ = ["current_context", "new_context", "has_parent"] - def __init__(self, new_context: Optional[LoggingContextOrSentinel] = None) -> None: - if new_context is None: - self.new_context = LoggingContext.sentinel # type: LoggingContextOrSentinel - else: - self.new_context = new_context + def __init__( + self, new_context: LoggingContextOrSentinel = SENTINEL_CONTEXT + ) -> None: + self.new_context = new_context def __enter__(self) -> None: """Captures the current logging context""" - self.current_context = LoggingContext.set_current_context(self.new_context) + self.current_context = set_current_context(self.new_context) if self.current_context: self.has_parent = self.current_context.previous_context is not None - if not self.current_context.alive: - logger.debug("Entering dead context: %s", self.current_context) def __exit__(self, type, value, traceback) -> None: """Restores the current logging context""" - context = LoggingContext.set_current_context(self.current_context) + context = set_current_context(self.current_context) if context != self.new_context: - if context is LoggingContext.sentinel: + if not context: logger.warning("Expected logging context %s was lost", self.new_context) else: logger.warning( @@ -541,9 +522,30 @@ class PreserveLoggingContext(object): context, ) - if self.current_context is not LoggingContext.sentinel: - if not self.current_context.alive: - logger.debug("Restoring dead context: %s", self.current_context) + +_thread_local = threading.local() +_thread_local.current_context = SENTINEL_CONTEXT + + +def current_context() -> LoggingContextOrSentinel: + """Get the current logging context from thread local storage""" + return getattr(_thread_local, "current_context", SENTINEL_CONTEXT) + + +def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel: + """Set the current logging context in thread local storage + Args: + context(LoggingContext): The context to activate. + Returns: + The context that was previously active + """ + current = current_context() + + if current is not context: + current.stop() + _thread_local.current_context = context + context.start() + return current def nested_logging_context( @@ -572,7 +574,7 @@ def nested_logging_context( if parent_context is not None: context = parent_context # type: LoggingContextOrSentinel else: - context = LoggingContext.current_context() + context = current_context() return LoggingContext( parent_context=context, request=str(context.request) + "-" + suffix ) @@ -604,7 +606,7 @@ def run_in_background(f, *args, **kwargs): CRITICAL error about an unhandled error will be logged without much indication about where it came from. """ - current = LoggingContext.current_context() + current = current_context() try: res = f(*args, **kwargs) except: # noqa: E722 @@ -625,7 +627,7 @@ def run_in_background(f, *args, **kwargs): # The function may have reset the context before returning, so # we need to restore it now. - ctx = LoggingContext.set_current_context(current) + ctx = set_current_context(current) # The original context will be restored when the deferred # completes, but there is nothing waiting for it, so it will @@ -674,7 +676,7 @@ def make_deferred_yieldable(deferred): # ok, we can't be sure that a yield won't block, so let's reset the # logcontext, and add a callback to the deferred to restore it. - prev_context = LoggingContext.set_current_context(LoggingContext.sentinel) + prev_context = set_current_context(SENTINEL_CONTEXT) deferred.addBoth(_set_context_cb, prev_context) return deferred @@ -684,7 +686,7 @@ ResultT = TypeVar("ResultT") def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: """A callback function which just sets the logging context""" - LoggingContext.set_current_context(context) + set_current_context(context) return result @@ -752,7 +754,7 @@ def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): Deferred: A Deferred which fires a callback with the result of `f`, or an errback if `f` throws an exception. """ - logcontext = LoggingContext.current_context() + logcontext = current_context() def g(): with LoggingContext(parent_context=logcontext): diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 4eed4f2338..dc3ab00cbb 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -19,7 +19,7 @@ from opentracing import Scope, ScopeManager import twisted -from synapse.logging.context import LoggingContext, nested_logging_context +from synapse.logging.context import current_context, nested_logging_context logger = logging.getLogger(__name__) @@ -49,11 +49,8 @@ class LogContextScopeManager(ScopeManager): (Scope) : the Scope that is active, or None if not available. """ - ctx = LoggingContext.current_context() - if ctx is LoggingContext.sentinel: - return None - else: - return ctx.scope + ctx = current_context() + return ctx.scope def activate(self, span, finish_on_close): """ @@ -70,9 +67,9 @@ class LogContextScopeManager(ScopeManager): """ enter_logcontext = False - ctx = LoggingContext.current_context() + ctx = current_context() - if ctx is LoggingContext.sentinel: + if not ctx: # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") return Scope(None, span) diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index ca237c6f12..3013f49d32 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -35,7 +35,7 @@ from synapse.api.room_versions import ( ) from synapse.events import make_event_from_dict from synapse.events.utils import prune_event -from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.context import PreserveLoggingContext, current_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.database import Database @@ -409,7 +409,7 @@ class EventsWorkerStore(SQLBaseStore): missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: - log_ctx = LoggingContext.current_context() + log_ctx = current_context() log_ctx.record_event_fetch(len(missing_events_ids)) # Note that _get_events_from_db is also responsible for turning db rows diff --git a/synapse/storage/database.py b/synapse/storage/database.py index e61595336c..715c0346dd 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -32,6 +32,7 @@ from synapse.config.database import DatabaseConnectionConfig from synapse.logging.context import ( LoggingContext, LoggingContextOrSentinel, + current_context, make_deferred_yieldable, ) from synapse.metrics.background_process_metrics import run_as_background_process @@ -483,7 +484,7 @@ class Database(object): end = monotonic_time() duration = end - start - LoggingContext.current_context().add_database_transaction(duration) + current_context().add_database_transaction(duration) transaction_logger.debug("[TXN END] {%s} %f sec", name, duration) @@ -510,7 +511,7 @@ class Database(object): after_callbacks = [] # type: List[_CallbackListEntry] exception_callbacks = [] # type: List[_CallbackListEntry] - if LoggingContext.current_context() == LoggingContext.sentinel: + if not current_context(): logger.warning("Starting db txn '%s' from sentinel context", desc) try: @@ -547,10 +548,8 @@ class Database(object): Returns: Deferred: The result of func """ - parent_context = ( - LoggingContext.current_context() - ) # type: Optional[LoggingContextOrSentinel] - if parent_context == LoggingContext.sentinel: + parent_context = current_context() # type: Optional[LoggingContextOrSentinel] + if not parent_context: logger.warning( "Starting db connection from sentinel context: metrics will be lost" ) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 7b18455469..ec61e14423 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -21,7 +21,7 @@ from prometheus_client import Counter from twisted.internet import defer -from synapse.logging.context import LoggingContext +from synapse.logging.context import LoggingContext, current_context from synapse.metrics import InFlightGauge logger = logging.getLogger(__name__) @@ -106,7 +106,7 @@ class Measure(object): raise RuntimeError("Measure() objects cannot be re-used") self.start = self.clock.time() - parent_context = LoggingContext.current_context() + parent_context = current_context() self._logging_context = LoggingContext( "Measure[%s]" % (self.name,), parent_context ) diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 3925927f9f..fdff195771 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -32,7 +32,7 @@ def do_patch(): Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit """ - from synapse.logging.context import LoggingContext + from synapse.logging.context import current_context global _already_patched @@ -43,35 +43,35 @@ def do_patch(): def new_inline_callbacks(f): @functools.wraps(f) def wrapped(*args, **kwargs): - start_context = LoggingContext.current_context() + start_context = current_context() changes = [] # type: List[str] orig = orig_inline_callbacks(_check_yield_points(f, changes)) try: res = orig(*args, **kwargs) except Exception: - if LoggingContext.current_context() != start_context: + if current_context() != start_context: for err in changes: print(err, file=sys.stderr) err = "%s changed context from %s to %s on exception" % ( f, start_context, - LoggingContext.current_context(), + current_context(), ) print(err, file=sys.stderr) raise Exception(err) raise if not isinstance(res, Deferred) or res.called: - if LoggingContext.current_context() != start_context: + if current_context() != start_context: for err in changes: print(err, file=sys.stderr) err = "Completed %s changed context from %s to %s" % ( f, start_context, - LoggingContext.current_context(), + current_context(), ) # print the error to stderr because otherwise all we # see in travis-ci is the 500 error @@ -79,23 +79,23 @@ def do_patch(): raise Exception(err) return res - if LoggingContext.current_context() != LoggingContext.sentinel: + if current_context(): err = ( "%s returned incomplete deferred in non-sentinel context " "%s (start was %s)" - ) % (f, LoggingContext.current_context(), start_context) + ) % (f, current_context(), start_context) print(err, file=sys.stderr) raise Exception(err) def check_ctx(r): - if LoggingContext.current_context() != start_context: + if current_context() != start_context: for err in changes: print(err, file=sys.stderr) err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", f, start_context, - LoggingContext.current_context(), + current_context(), ) print(err, file=sys.stderr) raise Exception(err) @@ -127,7 +127,7 @@ def _check_yield_points(f: Callable, changes: List[str]): function """ - from synapse.logging.context import LoggingContext + from synapse.logging.context import current_context @functools.wraps(f) def check_yield_points_inner(*args, **kwargs): @@ -136,7 +136,7 @@ def _check_yield_points(f: Callable, changes: List[str]): last_yield_line_no = gen.gi_frame.f_lineno result = None # type: Any while True: - expected_context = LoggingContext.current_context() + expected_context = current_context() try: isFailure = isinstance(result, Failure) @@ -145,7 +145,7 @@ def _check_yield_points(f: Callable, changes: List[str]): else: d = gen.send(result) except (StopIteration, defer._DefGen_Return) as e: - if LoggingContext.current_context() != expected_context: + if current_context() != expected_context: # This happens when the context is lost sometime *after* the # final yield and returning. E.g. we forgot to yield on a # function that returns a deferred. @@ -159,7 +159,7 @@ def _check_yield_points(f: Callable, changes: List[str]): % ( f.__qualname__, expected_context, - LoggingContext.current_context(), + current_context(), f.__code__.co_filename, last_yield_line_no, ) @@ -173,13 +173,13 @@ def _check_yield_points(f: Callable, changes: List[str]): # This happens if we yield on a deferred that doesn't follow # the log context rules without wrapping in a `make_deferred_yieldable`. # We raise here as this should never happen. - if LoggingContext.current_context() is not LoggingContext.sentinel: + if current_context(): err = ( "%s yielded with context %s rather than sentinel," " yielded on line %d in %s" % ( frame.f_code.co_name, - LoggingContext.current_context(), + current_context(), frame.f_lineno, frame.f_code.co_filename, ) @@ -191,7 +191,7 @@ def _check_yield_points(f: Callable, changes: List[str]): except Exception as e: result = Failure(e) - if LoggingContext.current_context() != expected_context: + if current_context() != expected_context: # This happens because the context is lost sometime *after* the # previous yield and *after* the current yield. E.g. the @@ -206,7 +206,7 @@ def _check_yield_points(f: Callable, changes: List[str]): % ( frame.f_code.co_name, expected_context, - LoggingContext.current_context(), + current_context(), last_yield_line_no, frame.f_lineno, frame.f_code.co_filename, diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 34d5895f18..70c8e72303 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -34,6 +34,7 @@ from synapse.crypto.keyring import ( from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, + current_context, make_deferred_yieldable, ) from synapse.storage.keys import FetchKeyResult @@ -83,9 +84,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): ) def check_context(self, _, expected): - self.assertEquals( - getattr(LoggingContext.current_context(), "request", None), expected - ) + self.assertEquals(getattr(current_context(), "request", None), expected) def test_verify_json_objects_for_server_awaits_previous_requests(self): key1 = signedjson.key.generate_signing_key(1) @@ -105,7 +104,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def get_perspectives(**kwargs): - self.assertEquals(LoggingContext.current_context().request, "11") + self.assertEquals(current_context().request, "11") with PreserveLoggingContext(): yield persp_deferred return persp_resp diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index fdc1d918ff..562397cdda 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -38,7 +38,7 @@ from synapse.http.federation.well_known_resolver import ( WellKnownResolver, _cache_period_from_headers, ) -from synapse.logging.context import LoggingContext +from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context from synapse.util.caches.ttlcache import TTLCache from tests import unittest @@ -155,7 +155,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.assertNoResult(fetch_d) # should have reset logcontext to the sentinel - _check_logcontext(LoggingContext.sentinel) + _check_logcontext(SENTINEL_CONTEXT) try: fetch_res = yield fetch_d @@ -1197,7 +1197,7 @@ class TestCachePeriodFromHeaders(unittest.TestCase): def _check_logcontext(context): - current = LoggingContext.current_context() + current = current_context() if current is not context: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index df034ab237..babc201643 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -22,7 +22,7 @@ from twisted.internet.error import ConnectError from twisted.names import dns, error from synapse.http.federation.srv_resolver import SrvResolver -from synapse.logging.context import LoggingContext +from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context from tests import unittest from tests.utils import MockClock @@ -54,12 +54,12 @@ class SrvResolverTestCase(unittest.TestCase): self.assertNoResult(resolve_d) # should have reset to the sentinel context - self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertIs(current_context(), SENTINEL_CONTEXT) result = yield resolve_d # should have restored our context - self.assertIs(LoggingContext.current_context(), ctx) + self.assertIs(current_context(), ctx) return result diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 2b01f40a42..fff4f0cbf4 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -29,14 +29,14 @@ from synapse.http.matrixfederationclient import ( MatrixFederationHttpClient, MatrixFederationRequest, ) -from synapse.logging.context import LoggingContext +from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context from tests.server import FakeTransport from tests.unittest import HomeserverTestCase def check_logcontext(context): - current = LoggingContext.current_context() + current = current_context() if current is not context: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) @@ -64,7 +64,7 @@ class FederationClientTests(HomeserverTestCase): self.assertNoResult(fetch_d) # should have reset logcontext to the sentinel - check_logcontext(LoggingContext.sentinel) + check_logcontext(SENTINEL_CONTEXT) try: fetch_res = yield fetch_d diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index a3d7e3c046..171632e195 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -2,7 +2,7 @@ from mock import Mock, call from twisted.internet import defer, reactor -from synapse.logging.context import LoggingContext +from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache from synapse.util import Clock @@ -52,14 +52,14 @@ class HttpTransactionCacheTestCase(unittest.TestCase): def test(): with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) - self.assertIs(LoggingContext.current_context(), c1) + self.assertIs(current_context(), c1) self.assertEqual(res, "yay") # run the test twice in parallel d = defer.gatherResults([test(), test()]) - self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertIs(current_context(), SENTINEL_CONTEXT) yield d - self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertIs(current_context(), SENTINEL_CONTEXT) @defer.inlineCallbacks def test_does_not_cache_exceptions(self): @@ -81,11 +81,11 @@ class HttpTransactionCacheTestCase(unittest.TestCase): yield self.cache.fetch_or_execute(self.mock_key, cb) except Exception as e: self.assertEqual(e.args[0], "boo") - self.assertIs(LoggingContext.current_context(), test_context) + self.assertIs(current_context(), test_context) res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertEqual(res, self.mock_http_response) - self.assertIs(LoggingContext.current_context(), test_context) + self.assertIs(current_context(), test_context) @defer.inlineCallbacks def test_does_not_cache_failures(self): @@ -107,11 +107,11 @@ class HttpTransactionCacheTestCase(unittest.TestCase): yield self.cache.fetch_or_execute(self.mock_key, cb) except Exception as e: self.assertEqual(e.args[0], "boo") - self.assertIs(LoggingContext.current_context(), test_context) + self.assertIs(current_context(), test_context) res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertEqual(res, self.mock_http_response) - self.assertIs(LoggingContext.current_context(), test_context) + self.assertIs(current_context(), test_context) @defer.inlineCallbacks def test_cleans_up(self): diff --git a/tests/unittest.py b/tests/unittest.py index 8816a4d152..439174dbfc 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -38,7 +38,11 @@ from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.federation.transport import server as federation_server from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite -from synapse.logging.context import LoggingContext +from synapse.logging.context import ( + SENTINEL_CONTEXT, + current_context, + set_current_context, +) from synapse.server import HomeServer from synapse.types import Requester, UserID, create_requester from synapse.util.ratelimitutils import FederationRateLimiter @@ -97,10 +101,10 @@ class TestCase(unittest.TestCase): def setUp(orig): # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. - if LoggingContext.current_context() is not LoggingContext.sentinel: + if current_context(): self.fail( "Test starting with non-sentinel logging context %s" - % (LoggingContext.current_context(),) + % (current_context(),) ) old_level = logging.getLogger().level @@ -122,7 +126,7 @@ class TestCase(unittest.TestCase): # force a GC to workaround problems with deferreds leaking logcontexts when # they are GCed (see the logcontext docs) gc.collect() - LoggingContext.set_current_context(LoggingContext.sentinel) + set_current_context(SENTINEL_CONTEXT) return ret diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 39e360fe24..4d2b9e0d64 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -22,8 +22,10 @@ from twisted.internet import defer, reactor from synapse.api.errors import SynapseError from synapse.logging.context import ( + SENTINEL_CONTEXT, LoggingContext, PreserveLoggingContext, + current_context, make_deferred_yieldable, ) from synapse.util.caches import descriptors @@ -194,7 +196,7 @@ class DescriptorTestCase(unittest.TestCase): with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) - self.assertEqual(LoggingContext.current_context(), c1) + self.assertEqual(current_context(), c1) return r def check_result(r): @@ -204,12 +206,12 @@ class DescriptorTestCase(unittest.TestCase): # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertEqual(current_context(), SENTINEL_CONTEXT) d1.addCallback(check_result) # and another d2 = do_lookup() - self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertEqual(current_context(), SENTINEL_CONTEXT) d2.addCallback(check_result) # let the lookup complete @@ -239,14 +241,14 @@ class DescriptorTestCase(unittest.TestCase): try: d = obj.fn(1) self.assertEqual( - LoggingContext.current_context(), LoggingContext.sentinel + current_context(), SENTINEL_CONTEXT, ) yield d self.fail("No exception thrown") except SynapseError: pass - self.assertEqual(LoggingContext.current_context(), c1) + self.assertEqual(current_context(), c1) # the cache should now be empty self.assertEqual(len(obj.fn.cache.cache), 0) @@ -255,7 +257,7 @@ class DescriptorTestCase(unittest.TestCase): # set off a deferred which will do a cache lookup d1 = do_lookup() - self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertEqual(current_context(), SENTINEL_CONTEXT) return d1 @@ -366,10 +368,10 @@ class CachedListDescriptorTestCase(unittest.TestCase): @descriptors.cachedList("fn", "args1", inlineCallbacks=True) def list_fn(self, args1, arg2): - assert LoggingContext.current_context().request == "c1" + assert current_context().request == "c1" # we want this to behave like an asynchronous function yield run_on_reactor() - assert LoggingContext.current_context().request == "c1" + assert current_context().request == "c1" return self.mock(args1, arg2) with LoggingContext() as c1: @@ -377,9 +379,9 @@ class CachedListDescriptorTestCase(unittest.TestCase): obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) - self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 - self.assertEqual(LoggingContext.current_context(), c1) + self.assertEqual(current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index f60918069a..17fd86d02d 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -16,7 +16,12 @@ from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred from twisted.internet.task import Clock -from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.context import ( + SENTINEL_CONTEXT, + LoggingContext, + PreserveLoggingContext, + current_context, +) from synapse.util.async_helpers import timeout_deferred from tests.unittest import TestCase @@ -79,10 +84,10 @@ class TimeoutDeferredTest(TestCase): # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( - LoggingContext.current_context(), + current_context(), context_one, "errback %s run in unexpected logcontext %s" - % (deferred_name, LoggingContext.current_context()), + % (deferred_name, current_context()), ) return res @@ -90,7 +95,7 @@ class TimeoutDeferredTest(TestCase): original_deferred.addErrback(errback, "orig") timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock) self.assertNoResult(timing_out_d) - self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertIs(current_context(), SENTINEL_CONTEXT) timing_out_d.addErrback(errback, "timingout") self.clock.pump((1.0,)) @@ -99,4 +104,4 @@ class TimeoutDeferredTest(TestCase): blocking_was_cancelled[0], "non-completing deferred was not cancelled" ) self.failureResultOf(timing_out_d, defer.TimeoutError) - self.assertIs(LoggingContext.current_context(), context_one) + self.assertIs(current_context(), context_one) diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 0ec8ef90ce..852ef23185 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -19,7 +19,7 @@ from six.moves import range from twisted.internet import defer, reactor from twisted.internet.defer import CancelledError -from synapse.logging.context import LoggingContext +from synapse.logging.context import LoggingContext, current_context from synapse.util import Clock from synapse.util.async_helpers import Linearizer @@ -54,11 +54,11 @@ class LinearizerTestCase(unittest.TestCase): def func(i, sleep=False): with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): - self.assertEqual(LoggingContext.current_context(), lc) + self.assertEqual(current_context(), lc) if sleep: yield Clock(reactor).sleep(0) - self.assertEqual(LoggingContext.current_context(), lc) + self.assertEqual(current_context(), lc) func(0, sleep=True) for i in range(1, 100): diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 281b32c4b8..95301c013c 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -2,8 +2,10 @@ import twisted.python.failure from twisted.internet import defer, reactor from synapse.logging.context import ( + SENTINEL_CONTEXT, LoggingContext, PreserveLoggingContext, + current_context, make_deferred_yieldable, nested_logging_context, run_in_background, @@ -15,7 +17,7 @@ from .. import unittest class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): - self.assertEquals(LoggingContext.current_context().request, value) + self.assertEquals(current_context().request, value) def test_with_context(self): with LoggingContext() as context_one: @@ -41,7 +43,7 @@ class LoggingContextTestCase(unittest.TestCase): self._check_test_key("one") def _test_run_in_background(self, function): - sentinel_context = LoggingContext.current_context() + sentinel_context = current_context() callback_completed = [False] @@ -71,7 +73,7 @@ class LoggingContextTestCase(unittest.TestCase): # make sure that the context was reset before it got thrown back # into the reactor try: - self.assertIs(LoggingContext.current_context(), sentinel_context) + self.assertIs(current_context(), sentinel_context) d2.callback(None) except BaseException: d2.errback(twisted.python.failure.Failure()) @@ -108,7 +110,7 @@ class LoggingContextTestCase(unittest.TestCase): async def testfunc(): self._check_test_key("one") d = Clock(reactor).sleep(0) - self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) + self.assertIs(current_context(), SENTINEL_CONTEXT) await d self._check_test_key("one") @@ -129,14 +131,14 @@ class LoggingContextTestCase(unittest.TestCase): reactor.callLater(0, d.callback, None) return d - sentinel_context = LoggingContext.current_context() + sentinel_context = current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable - self.assertIs(LoggingContext.current_context(), sentinel_context) + self.assertIs(current_context(), sentinel_context) yield d1 @@ -145,14 +147,14 @@ class LoggingContextTestCase(unittest.TestCase): @defer.inlineCallbacks def test_make_deferred_yieldable_with_chained_deferreds(self): - sentinel_context = LoggingContext.current_context() + sentinel_context = current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable - self.assertIs(LoggingContext.current_context(), sentinel_context) + self.assertIs(current_context(), sentinel_context) yield d1 @@ -189,14 +191,14 @@ class LoggingContextTestCase(unittest.TestCase): reactor.callLater(0, d.callback, None) await d - sentinel_context = LoggingContext.current_context() + sentinel_context = current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable - self.assertIs(LoggingContext.current_context(), sentinel_context) + self.assertIs(current_context(), sentinel_context) yield d1 diff --git a/tests/utils.py b/tests/utils.py index 513f358f4f..968d109f77 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -35,7 +35,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer -from synapse.logging.context import LoggingContext +from synapse.logging.context import current_context, set_current_context from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine @@ -493,10 +493,10 @@ class MockClock(object): return self.time() * 1000 def call_later(self, delay, callback, *args, **kwargs): - current_context = LoggingContext.current_context() + ctx = current_context() def wrapped_callback(): - LoggingContext.thread_local.current_context = current_context + set_current_context(ctx) callback(*args, **kwargs) t = [self.now + delay, wrapped_callback, False] From 28d9d6e8a9d6a6d5162de41cada1b6d6d4b0f941 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 24 Mar 2020 18:33:49 +0000 Subject: [PATCH 303/382] Remove spurious "name" parameter to `default_config` this is never set to anything other than "test", and is a source of unnecessary boilerplate. --- tests/app/test_frontend_proxy.py | 4 ++-- tests/app/test_openid_listener.py | 4 ++-- tests/federation/test_complexity.py | 4 ++-- tests/handlers/test_register.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 4 ++-- tests/rest/key/v2/test_remote_key_resource.py | 4 ++-- .../server_notices/test_resource_limits_server_notices.py | 2 +- tests/test_terms_auth.py | 4 ++-- tests/unittest.py | 7 ++----- 9 files changed, 16 insertions(+), 19 deletions(-) diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index d3feafa1b7..be20a89682 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -27,8 +27,8 @@ class FrontendProxyTests(HomeserverTestCase): return hs - def default_config(self, name="test"): - c = super().default_config(name) + def default_config(self): + c = super().default_config() c["worker_app"] = "synapse.app.frontend_proxy" return c diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 89fcc3889a..7364f9f1ec 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -29,8 +29,8 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): ) return hs - def default_config(self, name="test"): - conf = super().default_config(name) + def default_config(self): + conf = super().default_config() # we're using FederationReaderServer, which uses a SlavedStore, so we # have to tell the FederationHandler not to try to access stuff that is only # in the primary store. diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 24fa8dbb45..94980733c4 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -33,8 +33,8 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): login.register_servlets, ] - def default_config(self, name="test"): - config = super().default_config(name=name) + def default_config(self): + config = super().default_config() config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index e2915eb7b1..e7b638dbfe 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -34,7 +34,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): """ Tests the RegistrationHandler. """ def make_homeserver(self, reactor, clock): - hs_config = self.default_config("test") + hs_config = self.default_config() # some of the tests rely on us having a user consent version hs_config["user_consent"] = { diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d0c997e385..b6ed06e02d 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -36,8 +36,8 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): servlets = [register.register_servlets] url = b"/_matrix/client/r0/register" - def default_config(self, name="test"): - config = super().default_config(name) + def default_config(self): + config = super().default_config() config["allow_guest_access"] = True return config diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 6776a56cad..99eb477149 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -143,8 +143,8 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): endpoint, to check that the two implementations are compatible. """ - def default_config(self, *args, **kwargs): - config = super().default_config(*args, **kwargs) + def default_config(self): + config = super().default_config() # replace the signing key with our own self.hs_signing_key = signedjson.key.generate_signing_key("kssk") diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index eb540e34f6..0d27b92a86 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -28,7 +28,7 @@ from tests import unittest class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): - hs_config = self.default_config("test") + hs_config = self.default_config() hs_config["server_notices"] = { "system_mxid_localpart": "server", "system_mxid_display_name": "test display name", diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 5ec5d2b358..81d796f3f3 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -28,8 +28,8 @@ from tests import unittest class TermsTestCase(unittest.HomeserverTestCase): servlets = [register_servlets] - def default_config(self, name="test"): - config = super().default_config(name) + def default_config(self): + config = super().default_config() config.update( { "public_baseurl": "https://example.org/", diff --git a/tests/unittest.py b/tests/unittest.py index 8816a4d152..23b59bea22 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -311,14 +311,11 @@ class HomeserverTestCase(TestCase): return resource - def default_config(self, name="test"): + def default_config(self): """ Get a default HomeServer config dict. - - Args: - name (str): The homeserver name/domain. """ - config = default_config(name) + config = default_config("test") # apply any additional config which was specified via the override_config # decorator. From 7bab642707ecd985ebd736af890f4bfe2c3232fe Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 25 Mar 2020 13:56:40 +0000 Subject: [PATCH 304/382] Various cleanups to INSTALL.md (#7141) --- INSTALL.md | 98 ++++++++++++++++++-------------------------- changelog.d/7141.doc | 1 + 2 files changed, 40 insertions(+), 59 deletions(-) create mode 100644 changelog.d/7141.doc diff --git a/INSTALL.md b/INSTALL.md index f9e13b4cf6..af9a5ef439 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -2,7 +2,6 @@ - [Installing Synapse](#installing-synapse) - [Installing from source](#installing-from-source) - [Platform-Specific Instructions](#platform-specific-instructions) - - [Troubleshooting Installation](#troubleshooting-installation) - [Prebuilt packages](#prebuilt-packages) - [Setting up Synapse](#setting-up-synapse) - [TLS certificates](#tls-certificates) @@ -10,6 +9,7 @@ - [Registering a user](#registering-a-user) - [Setting up a TURN server](#setting-up-a-turn-server) - [URL previews](#url-previews) +- [Troubleshooting Installation](#troubleshooting-installation) # Choosing your server name @@ -70,7 +70,7 @@ pip install -U matrix-synapse ``` Before you can start Synapse, you will need to generate a configuration -file. To do this, run (in your virtualenv, as before):: +file. To do this, run (in your virtualenv, as before): ``` cd ~/synapse @@ -84,22 +84,24 @@ python -m synapse.app.homeserver \ ... substituting an appropriate value for `--server-name`. This command will generate you a config file that you can then customise, but it will -also generate a set of keys for you. These keys will allow your Home Server to -identify itself to other Home Servers, so don't lose or delete them. It would be +also generate a set of keys for you. These keys will allow your homeserver to +identify itself to other homeserver, so don't lose or delete them. It would be wise to back them up somewhere safe. (If, for whatever reason, you do need to -change your Home Server's keys, you may find that other Home Servers have the +change your homeserver's keys, you may find that other homeserver have the old key cached. If you update the signing key, you should change the name of the key in the `.signing.key` file (the second word) to something different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) -for more information on key management.) +for more information on key management). To actually run your new homeserver, pick a working directory for Synapse to -run (e.g. `~/synapse`), and:: +run (e.g. `~/synapse`), and: - cd ~/synapse - source env/bin/activate - synctl start +``` +cd ~/synapse +source env/bin/activate +synctl start +``` ### Platform-Specific Instructions @@ -188,7 +190,7 @@ doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \ There is currently no port for OpenBSD. Additionally, OpenBSD's security settings require a slightly more difficult installation process. -XXX: I suspect this is out of date. +(XXX: I suspect this is out of date) 1. Create a new directory in `/usr/local` called `_synapse`. Also, create a new user called `_synapse` and set that directory as the new user's home. @@ -196,7 +198,7 @@ XXX: I suspect this is out of date. write and execute permissions on the same memory space to be run from `/usr/local`. 2. `su` to the new `_synapse` user and change to their home directory. -3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse` +3. Create a new virtualenv: `virtualenv -p python3 ~/.synapse` 4. Source the virtualenv configuration located at `/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by using the `.` command, rather than `bash`'s `source`. @@ -217,45 +219,6 @@ be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server for Windows Server. -### Troubleshooting Installation - -XXX a bunch of this is no longer relevant. - -Synapse requires pip 8 or later, so if your OS provides too old a version you -may need to manually upgrade it:: - - sudo pip install --upgrade pip - -Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`. -You can fix this by manually upgrading pip and virtualenv:: - - sudo pip install --upgrade virtualenv - -You can next rerun `virtualenv -p python3 synapse` to update the virtual env. - -Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.` -You can fix this by manually installing ndg-httpsclient:: - - pip install --upgrade ndg-httpsclient - -Installing may fail with `mock requires setuptools>=17.1. Aborting installation`. -You can fix this by upgrading setuptools:: - - pip install --upgrade setuptools - -If pip crashes mid-installation for reason (e.g. lost terminal), pip may -refuse to run until you remove the temporary installation directory it -created. To reset the installation:: - - rm -rf /tmp/pip_install_matrix - -pip seems to leak *lots* of memory during installation. For instance, a Linux -host with 512MB of RAM may run out of memory whilst installing Twisted. If this -happens, you will have to individually install the dependencies which are -failing, e.g.:: - - pip install twisted - ## Prebuilt packages As an alternative to installing from source, prebuilt packages are available @@ -314,7 +277,7 @@ For `buster` and `sid`, Synapse is available in the Debian repositories and it should be possible to install it with simply: ``` - sudo apt install matrix-synapse +sudo apt install matrix-synapse ``` There is also a version of `matrix-synapse` in `stretch-backports`. Please see @@ -375,8 +338,10 @@ sudo pip install py-bcrypt Synapse can be found in the void repositories as 'synapse': - xbps-install -Su - xbps-install -S synapse +``` +xbps-install -Su +xbps-install -S synapse +``` ### FreeBSD @@ -420,6 +385,7 @@ so, you will need to edit `homeserver.yaml`, as follows: resources: - names: [client, federation] ``` + * You will also need to uncomment the `tls_certificate_path` and `tls_private_key_path` lines under the `TLS` section. You can either point these settings at an existing certificate and key, or you can @@ -435,7 +401,7 @@ so, you will need to edit `homeserver.yaml`, as follows: `cert.pem`). For a more detailed guide to configuring your server for federation, see -[federate.md](docs/federate.md) +[federate.md](docs/federate.md). ## Email @@ -482,7 +448,7 @@ on your server even if `enable_registration` is `false`. ## Setting up a TURN server For reliable VoIP calls to be routed via this homeserver, you MUST configure -a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details. +a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details. ## URL previews @@ -491,10 +457,24 @@ turn it on you must enable the `url_preview_enabled: True` config parameter and explicitly specify the IP ranges that Synapse is not allowed to spider for previewing in the `url_preview_ip_range_blacklist` configuration parameter. This is critical from a security perspective to stop arbitrary Matrix users -spidering 'internal' URLs on your network. At the very least we recommend that +spidering 'internal' URLs on your network. At the very least we recommend that your loopback and RFC1918 IP addresses are blacklisted. -This also requires the optional lxml and netaddr python dependencies to be -installed. This in turn requires the libxml2 library to be available - on +This also requires the optional `lxml` and `netaddr` python dependencies to be +installed. This in turn requires the `libxml2` library to be available - on Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for your OS. + +# Troubleshooting Installation + +`pip` seems to leak *lots* of memory during installation. For instance, a Linux +host with 512MB of RAM may run out of memory whilst installing Twisted. If this +happens, you will have to individually install the dependencies which are +failing, e.g.: + +``` +pip install twisted +``` + +If you have any other problems, feel free to ask in +[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org). diff --git a/changelog.d/7141.doc b/changelog.d/7141.doc new file mode 100644 index 0000000000..2fcbd666c2 --- /dev/null +++ b/changelog.d/7141.doc @@ -0,0 +1 @@ +Clean up INSTALL.md a bit. \ No newline at end of file From 4cff617df1ba6f241fee6957cc44859f57edcc0e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Mar 2020 14:54:01 +0000 Subject: [PATCH 305/382] Move catchup of replication streams to worker. (#7024) This changes the replication protocol so that the server does not send down `RDATA` for rows that happened before the client connected. Instead, the server will send a `POSITION` and clients then query the database (or master out of band) to get up to date. --- changelog.d/7024.misc | 1 + docs/tcp_replication.md | 46 ++-- synapse/app/generic_worker.py | 3 + synapse/federation/sender/__init__.py | 9 + synapse/replication/http/__init__.py | 2 + synapse/replication/http/streams.py | 78 +++++++ synapse/replication/slave/storage/_base.py | 14 +- synapse/replication/slave/storage/pushers.py | 3 + synapse/replication/tcp/client.py | 3 +- synapse/replication/tcp/commands.py | 34 +-- synapse/replication/tcp/protocol.py | 206 ++++++------------ synapse/replication/tcp/resource.py | 19 +- synapse/replication/tcp/streams/__init__.py | 8 +- synapse/replication/tcp/streams/_base.py | 160 +++++++++----- synapse/replication/tcp/streams/events.py | 5 +- synapse/replication/tcp/streams/federation.py | 19 +- synapse/server.py | 5 + synapse/storage/data_stores/main/cache.py | 44 ++-- .../storage/data_stores/main/deviceinbox.py | 88 ++++---- synapse/storage/data_stores/main/events.py | 114 ---------- .../storage/data_stores/main/events_worker.py | 114 ++++++++++ synapse/storage/data_stores/main/room.py | 40 ++-- tests/replication/tcp/streams/_base.py | 55 +++-- .../replication/tcp/streams/test_receipts.py | 52 ++++- 24 files changed, 635 insertions(+), 487 deletions(-) create mode 100644 changelog.d/7024.misc create mode 100644 synapse/replication/http/streams.py diff --git a/changelog.d/7024.misc b/changelog.d/7024.misc new file mode 100644 index 0000000000..676f285377 --- /dev/null +++ b/changelog.d/7024.misc @@ -0,0 +1 @@ +Move catchup of replication streams logic to worker. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index e3a4634b14..d4f7d9ec18 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -14,16 +14,16 @@ example flow would be (where '>' indicates master to worker and '<' worker to master flows): > SERVER example.com - < REPLICATE events 53 + < REPLICATE + > POSITION events 53 > RDATA events 54 ["$foo1:bar.com", ...] > RDATA events 55 ["$foo4:bar.com", ...] -The example shows the server accepting a new connection and sending its -identity with the `SERVER` command, followed by the client asking to -subscribe to the `events` stream from the token `53`. The server then -periodically sends `RDATA` commands which have the format -`RDATA `, where the format of `` is -defined by the individual streams. +The example shows the server accepting a new connection and sending its identity +with the `SERVER` command, followed by the client server to respond with the +position of all streams. The server then periodically sends `RDATA` commands +which have the format `RDATA `, where the format of +`` is defined by the individual streams. Error reporting happens by either the client or server sending an ERROR command, and usually the connection will be closed. @@ -32,9 +32,6 @@ Since the protocol is a simple line based, its possible to manually connect to the server using a tool like netcat. A few things should be noted when manually using the protocol: -- When subscribing to a stream using `REPLICATE`, the special token - `NOW` can be used to get all future updates. The special stream name - `ALL` can be used with `NOW` to subscribe to all available streams. - The federation stream is only available if federation sending has been disabled on the main process. - The server will only time connections out that have sent a `PING` @@ -91,9 +88,7 @@ The client: - Sends a `NAME` command, allowing the server to associate a human friendly name with the connection. This is optional. - Sends a `PING` as above -- For each stream the client wishes to subscribe to it sends a - `REPLICATE` with the `stream_name` and token it wants to subscribe - from. +- Sends a `REPLICATE` to get the current position of all streams. - On receipt of a `SERVER` command, checks that the server name matches the expected server name. @@ -140,9 +135,7 @@ the wire: > PING 1490197665618 < NAME synapse.app.appservice < PING 1490197665618 - < REPLICATE events 1 - < REPLICATE backfill 1 - < REPLICATE caches 1 + < REPLICATE > POSITION events 1 > POSITION backfill 1 > POSITION caches 1 @@ -181,9 +174,9 @@ client (C): #### POSITION (S) - The position of the stream has been updated. Sent to the client - after all missing updates for a stream have been sent to the client - and they're now up to date. + On receipt of a POSITION command clients should check if they have missed any + updates, and if so then fetch them out of band. Sent in response to a + REPLICATE command (but can happen at any time). #### ERROR (S, C) @@ -199,20 +192,7 @@ client (C): #### REPLICATE (C) -Asks the server to replicate a given stream. The syntax is: - -``` - REPLICATE -``` - -Where `` may be either: - * a numeric stream_id to stream updates since (exclusive) - * `NOW` to stream all subsequent updates. - -The `` is the name of a replication stream to subscribe -to (see [here](../synapse/replication/tcp/streams/_base.py) for a list -of streams). It can also be `ALL` to subscribe to all known streams, -in which case the `` must be set to `NOW`. +Asks the server for the current position of all streams. #### USER_SYNC (C) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index bd1733573b..fba7ad9551 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -401,6 +401,9 @@ class GenericWorkerTyping(object): self._room_serials[row.room_id] = token self._room_typing[row.room_id] = row.user_ids + def get_current_token(self) -> int: + return self._latest_room_serial + class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 233cb33daf..a477578e44 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -499,4 +499,13 @@ class FederationSender(object): self._get_per_destination_queue(destination).attempt_new_transaction() def get_current_token(self) -> int: + # Dummy implementation for case where federation sender isn't offloaded + # to a worker. return 0 + + async def get_replication_rows( + self, from_token, to_token, limit, federation_ack=None + ): + # Dummy implementation for case where federation sender isn't offloaded + # to a worker. + return [] diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index 28dbc6fcba..4613b2538c 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -21,6 +21,7 @@ from synapse.replication.http import ( membership, register, send_event, + streams, ) REPLICATION_PREFIX = "/_synapse/replication" @@ -38,3 +39,4 @@ class ReplicationRestResource(JsonResource): login.register_servlets(hs, self) register.register_servlets(hs, self) devices.register_servlets(hs, self) + streams.register_servlets(hs, self) diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py new file mode 100644 index 0000000000..ffd4c61993 --- /dev/null +++ b/synapse/replication/http/streams.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.api.errors import SynapseError +from synapse.http.servlet import parse_integer +from synapse.replication.http._base import ReplicationEndpoint + +logger = logging.getLogger(__name__) + + +class ReplicationGetStreamUpdates(ReplicationEndpoint): + """Fetches stream updates from a server. Used for streams not persisted to + the database, e.g. typing notifications. + + The API looks like: + + GET /_synapse/replication/get_repl_stream_updates/events?from_token=0&to_token=10&limit=100 + + 200 OK + + { + updates: [ ... ], + upto_token: 10, + limited: False, + } + + """ + + NAME = "get_repl_stream_updates" + PATH_ARGS = ("stream_name",) + METHOD = "GET" + + def __init__(self, hs): + super().__init__(hs) + + # We pull the streams from the replication steamer (if we try and make + # them ourselves we end up in an import loop). + self.streams = hs.get_replication_streamer().get_streams() + + @staticmethod + def _serialize_payload(stream_name, from_token, upto_token, limit): + return {"from_token": from_token, "upto_token": upto_token, "limit": limit} + + async def _handle_request(self, request, stream_name): + stream = self.streams.get(stream_name) + if stream is None: + raise SynapseError(400, "Unknown stream") + + from_token = parse_integer(request, "from_token", required=True) + upto_token = parse_integer(request, "upto_token", required=True) + limit = parse_integer(request, "limit", required=True) + + updates, upto_token, limited = await stream.get_updates_since( + from_token, upto_token, limit + ) + + return ( + 200, + {"updates": updates, "upto_token": upto_token, "limited": limited}, + ) + + +def register_servlets(hs, http_server): + ReplicationGetStreamUpdates(hs).register(http_server) diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index f45cbd37a0..751c799d94 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -18,8 +18,10 @@ from typing import Dict, Optional import six -from synapse.storage._base import SQLBaseStore -from synapse.storage.data_stores.main.cache import CURRENT_STATE_CACHE_NAME +from synapse.storage.data_stores.main.cache import ( + CURRENT_STATE_CACHE_NAME, + CacheInvalidationWorkerStore, +) from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine @@ -35,7 +37,7 @@ def __func__(inp): return inp.__func__ -class BaseSlavedStore(SQLBaseStore): +class BaseSlavedStore(CacheInvalidationWorkerStore): def __init__(self, database: Database, db_conn, hs): super(BaseSlavedStore, self).__init__(database, db_conn, hs) if isinstance(self.database_engine, PostgresEngine): @@ -60,6 +62,12 @@ class BaseSlavedStore(SQLBaseStore): pos["caches"] = self._cache_id_gen.get_current_token() return pos + def get_cache_stream_token(self): + if self._cache_id_gen: + return self._cache_id_gen.get_current_token() + else: + return 0 + def process_replication_rows(self, stream_name, token, rows): if stream_name == "caches": if self._cache_id_gen: diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index f22c2d44a3..bce8a3d115 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -33,6 +33,9 @@ class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): result["pushers"] = self._pushers_id_gen.get_current_token() return result + def get_pushers_stream_token(self): + return self._pushers_id_gen.get_current_token() + def process_replication_rows(self, stream_name, token, rows): if stream_name == "pushers": self._pushers_id_gen.advance(token) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 02ab5b66ea..7e7ad0f798 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -55,6 +55,7 @@ class ReplicationClientFactory(ReconnectingClientFactory): self.client_name = client_name self.handler = handler self.server_name = hs.config.server_name + self.hs = hs self._clock = hs.get_clock() # As self.clock is defined in super class hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying) @@ -65,7 +66,7 @@ class ReplicationClientFactory(ReconnectingClientFactory): def buildProtocol(self, addr): logger.info("Connected to replication: %r", addr) return ClientReplicationStreamProtocol( - self.client_name, self.server_name, self._clock, self.handler + self.hs, self.client_name, self.server_name, self._clock, self.handler, ) def clientConnectionLost(self, connector, reason): diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 451671412d..5a6b734094 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -136,8 +136,8 @@ class PositionCommand(Command): """Sent by the server to tell the client the stream postition without needing to send an RDATA. - Sent to the client after all missing updates for a stream have been sent - to the client and they're now up to date. + On receipt of a POSITION command clients should check if they have missed + any updates, and if so then fetch them out of band. """ NAME = "POSITION" @@ -179,42 +179,24 @@ class NameCommand(Command): class ReplicateCommand(Command): - """Sent by the client to subscribe to the stream. + """Sent by the client to subscribe to streams. Format:: - REPLICATE - - Where may be either: - * a numeric stream_id to stream updates from - * "NOW" to stream all subsequent updates. - - The can be "ALL" to subscribe to all known streams, in which - case the must be set to "NOW", i.e.:: - - REPLICATE ALL NOW + REPLICATE """ NAME = "REPLICATE" - def __init__(self, stream_name, token): - self.stream_name = stream_name - self.token = token + def __init__(self): + pass @classmethod def from_line(cls, line): - stream_name, token = line.split(" ", 1) - if token in ("NOW", "now"): - token = "NOW" - else: - token = int(token) - return cls(stream_name, token) + return cls() def to_line(self): - return " ".join((self.stream_name, str(self.token))) - - def get_logcontext_id(self): - return "REPLICATE-" + self.stream_name + return "" class UserSyncCommand(Command): diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index bc1482a9bb..f81d2e2442 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -35,9 +35,7 @@ indicate which side is sending, these are *not* included on the wire:: > PING 1490197665618 < NAME synapse.app.appservice < PING 1490197665618 - < REPLICATE events 1 - < REPLICATE backfill 1 - < REPLICATE caches 1 + < REPLICATE > POSITION events 1 > POSITION backfill 1 > POSITION caches 1 @@ -53,17 +51,15 @@ import fcntl import logging import struct from collections import defaultdict -from typing import Any, DefaultDict, Dict, List, Set, Tuple +from typing import Any, DefaultDict, Dict, List, Set -from six import iteritems, iterkeys +from six import iteritems from prometheus_client import Counter -from twisted.internet import defer from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure -from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.commands import ( @@ -82,11 +78,16 @@ from synapse.replication.tcp.commands import ( SyncCommand, UserSyncCommand, ) -from synapse.replication.tcp.streams import STREAMS_MAP +from synapse.replication.tcp.streams import STREAMS_MAP, Stream from synapse.types import Collection from synapse.util import Clock from synapse.util.stringutils import random_string +MYPY = False +if MYPY: + from synapse.server import HomeServer + + connection_close_counter = Counter( "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"] ) @@ -411,16 +412,6 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): self.server_name = server_name self.streamer = streamer - # The streams the client has subscribed to and is up to date with - self.replication_streams = set() # type: Set[str] - - # The streams the client is currently subscribing to. - self.connecting_streams = set() # type: Set[str] - - # Map from stream name to list of updates to send once we've finished - # subscribing the client to the stream. - self.pending_rdata = {} # type: Dict[str, List[Tuple[int, Any]]] - def connectionMade(self): self.send_command(ServerCommand(self.server_name)) BaseReplicationStreamProtocol.connectionMade(self) @@ -436,21 +427,10 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): ) async def on_REPLICATE(self, cmd): - stream_name = cmd.stream_name - token = cmd.token - - if stream_name == "ALL": - # Subscribe to all streams we're publishing to. - deferreds = [ - run_in_background(self.subscribe_to_stream, stream, token) - for stream in iterkeys(self.streamer.streams_by_name) - ] - - await make_deferred_yieldable( - defer.gatherResults(deferreds, consumeErrors=True) - ) - else: - await self.subscribe_to_stream(stream_name, token) + # Subscribe to all streams we're publishing to. + for stream_name in self.streamer.streams_by_name: + current_token = self.streamer.get_stream_token(stream_name) + self.send_command(PositionCommand(stream_name, current_token)) async def on_FEDERATION_ACK(self, cmd): self.streamer.federation_ack(cmd.token) @@ -474,87 +454,12 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): cmd.last_seen, ) - async def subscribe_to_stream(self, stream_name, token): - """Subscribe the remote to a stream. - - This invloves checking if they've missed anything and sending those - updates down if they have. During that time new updates for the stream - are queued and sent once we've sent down any missed updates. - """ - self.replication_streams.discard(stream_name) - self.connecting_streams.add(stream_name) - - try: - # Get missing updates - updates, current_token = await self.streamer.get_stream_updates( - stream_name, token - ) - - # Send all the missing updates - for update in updates: - token, row = update[0], update[1] - self.send_command(RdataCommand(stream_name, token, row)) - - # We send a POSITION command to ensure that they have an up to - # date token (especially useful if we didn't send any updates - # above) - self.send_command(PositionCommand(stream_name, current_token)) - - # Now we can send any updates that came in while we were subscribing - pending_rdata = self.pending_rdata.pop(stream_name, []) - updates = [] - for token, update in pending_rdata: - # If the token is null, it is part of a batch update. Batches - # are multiple updates that share a single token. To denote - # this, the token is set to None for all tokens in the batch - # except for the last. If we find a None token, we keep looking - # through tokens until we find one that is not None and then - # process all previous updates in the batch as if they had the - # final token. - if token is None: - # Store this update as part of a batch - updates.append(update) - continue - - if token <= current_token: - # This update or batch of updates is older than - # current_token, dismiss it - updates = [] - continue - - updates.append(update) - - # Send all updates that are part of this batch with the - # found token - for update in updates: - self.send_command(RdataCommand(stream_name, token, update)) - - # Clear stored updates - updates = [] - - # They're now fully subscribed - self.replication_streams.add(stream_name) - except Exception as e: - logger.exception("[%s] Failed to handle REPLICATE command", self.id()) - self.send_error("failed to handle replicate: %r", e) - finally: - self.connecting_streams.discard(stream_name) - def stream_update(self, stream_name, token, data): """Called when a new update is available to stream to clients. We need to check if the client is interested in the stream or not """ - if stream_name in self.replication_streams: - # The client is subscribed to the stream - self.send_command(RdataCommand(stream_name, token, data)) - elif stream_name in self.connecting_streams: - # The client is being subscribed to the stream - logger.debug("[%s] Queuing RDATA %r %r", self.id(), stream_name, token) - self.pending_rdata.setdefault(stream_name, []).append((token, data)) - else: - # The client isn't subscribed - logger.debug("[%s] Dropping RDATA %r %r", self.id(), stream_name, token) + self.send_command(RdataCommand(stream_name, token, data)) def send_sync(self, data): self.send_command(SyncCommand(data)) @@ -638,6 +543,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): def __init__( self, + hs: "HomeServer", client_name: str, server_name: str, clock: Clock, @@ -649,22 +555,25 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): self.server_name = server_name self.handler = handler + self.streams = { + stream.NAME: stream(hs) for stream in STREAMS_MAP.values() + } # type: Dict[str, Stream] + # Set of stream names that have been subscribe to, but haven't yet # caught up with. This is used to track when the client has been fully # connected to the remote. - self.streams_connecting = set() # type: Set[str] + self.streams_connecting = set(STREAMS_MAP) # type: Set[str] # Map of stream to batched updates. See RdataCommand for info on how # batching works. - self.pending_batches = {} # type: Dict[str, Any] + self.pending_batches = {} # type: Dict[str, List[Any]] def connectionMade(self): self.send_command(NameCommand(self.client_name)) BaseReplicationStreamProtocol.connectionMade(self) # Once we've connected subscribe to the necessary streams - for stream_name, token in iteritems(self.handler.get_streams_to_replicate()): - self.replicate(stream_name, token) + self.replicate() # Tell the server if we have any users currently syncing (should only # happen on synchrotrons) @@ -676,10 +585,6 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): # We've now finished connecting to so inform the client handler self.handler.update_connection(self) - # This will happen if we don't actually subscribe to any streams - if not self.streams_connecting: - self.handler.finished_connecting() - async def on_SERVER(self, cmd): if cmd.data != self.server_name: logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) @@ -697,7 +602,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): ) raise - if cmd.token is None: + if cmd.token is None or stream_name in self.streams_connecting: # I.e. this is part of a batch of updates for this stream. Batch # until we get an update for the stream with a non None token self.pending_batches.setdefault(stream_name, []).append(row) @@ -707,14 +612,55 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): rows.append(row) await self.handler.on_rdata(stream_name, cmd.token, rows) - async def on_POSITION(self, cmd): - # When we get a `POSITION` command it means we've finished getting - # missing updates for the given stream, and are now up to date. + async def on_POSITION(self, cmd: PositionCommand): + stream = self.streams.get(cmd.stream_name) + if not stream: + logger.error("Got POSITION for unknown stream: %s", cmd.stream_name) + return + + # Find where we previously streamed up to. + current_token = self.handler.get_streams_to_replicate().get(cmd.stream_name) + if current_token is None: + logger.warning( + "Got POSITION for stream we're not subscribed to: %s", cmd.stream_name + ) + return + + # Fetch all updates between then and now. + limited = True + while limited: + updates, current_token, limited = await stream.get_updates_since( + current_token, cmd.token + ) + + # Check if the connection was closed underneath us, if so we bail + # rather than risk having concurrent catch ups going on. + if self.state == ConnectionStates.CLOSED: + return + + if updates: + await self.handler.on_rdata( + cmd.stream_name, + current_token, + [stream.parse_row(update[1]) for update in updates], + ) + + # We've now caught up to position sent to us, notify handler. + await self.handler.on_position(cmd.stream_name, cmd.token) + self.streams_connecting.discard(cmd.stream_name) if not self.streams_connecting: self.handler.finished_connecting() - await self.handler.on_position(cmd.stream_name, cmd.token) + # Check if the connection was closed underneath us, if so we bail + # rather than risk having concurrent catch ups going on. + if self.state == ConnectionStates.CLOSED: + return + + # Handle any RDATA that came in while we were catching up. + rows = self.pending_batches.pop(cmd.stream_name, []) + if rows: + await self.handler.on_rdata(cmd.stream_name, rows[-1].token, rows) async def on_SYNC(self, cmd): self.handler.on_sync(cmd.data) @@ -722,22 +668,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): self.handler.on_remote_server_up(cmd.data) - def replicate(self, stream_name, token): + def replicate(self): """Send the subscription request to the server """ - if stream_name not in STREAMS_MAP: - raise Exception("Invalid stream name %r" % (stream_name,)) + logger.info("[%s] Subscribing to replication streams", self.id()) - logger.info( - "[%s] Subscribing to replication stream: %r from %r", - self.id(), - stream_name, - token, - ) - - self.streams_connecting.add(stream_name) - - self.send_command(ReplicateCommand(stream_name, token)) + self.send_command(ReplicateCommand()) def on_connection_closed(self): BaseReplicationStreamProtocol.on_connection_closed(self) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 6e2ebaf614..4374e99e32 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -17,7 +17,7 @@ import logging import random -from typing import Any, List +from typing import Any, Dict, List from six import itervalues @@ -30,7 +30,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.metrics import Measure, measure_func from .protocol import ServerReplicationStreamProtocol -from .streams import STREAMS_MAP +from .streams import STREAMS_MAP, Stream from .streams.federation import FederationStream stream_updates_counter = Counter( @@ -52,7 +52,7 @@ class ReplicationStreamProtocolFactory(Factory): """ def __init__(self, hs): - self.streamer = ReplicationStreamer(hs) + self.streamer = hs.get_replication_streamer() self.clock = hs.get_clock() self.server_name = hs.config.server_name @@ -133,6 +133,11 @@ class ReplicationStreamer(object): for conn in self.connections: conn.send_error("server shutting down") + def get_streams(self) -> Dict[str, Stream]: + """Get a mapp from stream name to stream instance. + """ + return self.streams_by_name + def on_notifier_poke(self): """Checks if there is actually any new data and sends it to the connections if there are. @@ -190,7 +195,8 @@ class ReplicationStreamer(object): stream.current_token(), ) try: - updates, current_token = await stream.get_updates() + updates, current_token, limited = await stream.get_updates() + self.pending_updates |= limited except Exception: logger.info("Failed to handle stream %s", stream.NAME) raise @@ -226,8 +232,7 @@ class ReplicationStreamer(object): self.pending_updates = False self.is_looping = False - @measure_func("repl.get_stream_updates") - async def get_stream_updates(self, stream_name, token): + def get_stream_token(self, stream_name): """For a given stream get all updates since token. This is called when a client first subscribes to a stream. """ @@ -235,7 +240,7 @@ class ReplicationStreamer(object): if not stream: raise Exception("unknown stream %s", stream_name) - return await stream.get_updates_since(token) + return stream.current_token() @measure_func("repl.federation_ack") def federation_ack(self, token): diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index 29199f5b46..37bcd3de66 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -24,6 +24,9 @@ Each stream is defined by the following information: current_token: The function that returns the current token for the stream update_function: The function that returns a list of updates between two tokens """ + +from typing import Dict, Type + from synapse.replication.tcp.streams._base import ( AccountDataStream, BackfillStream, @@ -35,6 +38,7 @@ from synapse.replication.tcp.streams._base import ( PushersStream, PushRulesStream, ReceiptsStream, + Stream, TagAccountDataStream, ToDeviceStream, TypingStream, @@ -63,10 +67,12 @@ STREAMS_MAP = { GroupServerStream, UserSignatureStream, ) -} +} # type: Dict[str, Type[Stream]] + __all__ = [ "STREAMS_MAP", + "Stream", "BackfillStream", "PresenceStream", "TypingStream", diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 32d9514883..c14dff6c64 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -14,13 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import itertools import logging from collections import namedtuple -from typing import Any, List, Optional, Tuple +from typing import Any, Awaitable, Callable, List, Optional, Tuple import attr +from synapse.replication.http.streams import ReplicationGetStreamUpdates from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -29,6 +29,15 @@ logger = logging.getLogger(__name__) MAX_EVENTS_BEHIND = 500000 +# Some type aliases to make things a bit easier. + +# A stream position token +Token = int + +# A pair of position in stream and args used to create an instance of `ROW_TYPE`. +StreamRow = Tuple[Token, tuple] + + class Stream(object): """Base class for the streams. @@ -56,6 +65,7 @@ class Stream(object): return cls.ROW_TYPE(*row) def __init__(self, hs): + # The token from which we last asked for updates self.last_token = self.current_token() @@ -65,61 +75,46 @@ class Stream(object): """ self.last_token = self.current_token() - async def get_updates(self): + async def get_updates(self) -> Tuple[List[Tuple[Token, JsonDict]], Token, bool]: """Gets all updates since the last time this function was called (or since the stream was constructed if it hadn't been called before). Returns: - Deferred[Tuple[List[Tuple[int, Any]], int]: - Resolves to a pair ``(updates, current_token)``, where ``updates`` is a - list of ``(token, row)`` entries. ``row`` will be json-serialised and - sent over the replication steam. + A triplet `(updates, new_last_token, limited)`, where `updates` is + a list of `(token, row)` entries, `new_last_token` is the new + position in stream, and `limited` is whether there are more updates + to fetch. """ - updates, current_token = await self.get_updates_since(self.last_token) + current_token = self.current_token() + updates, current_token, limited = await self.get_updates_since( + self.last_token, current_token + ) self.last_token = current_token - return updates, current_token + return updates, current_token, limited async def get_updates_since( - self, from_token: int - ) -> Tuple[List[Tuple[int, JsonDict]], int]: + self, from_token: Token, upto_token: Token, limit: int = 100 + ) -> Tuple[List[Tuple[Token, JsonDict]], Token, bool]: """Like get_updates except allows specifying from when we should stream updates Returns: - Resolves to a pair `(updates, new_last_token)`, where `updates` is - a list of `(token, row)` entries and `new_last_token` is the new - position in stream. + A triplet `(updates, new_last_token, limited)`, where `updates` is + a list of `(token, row)` entries, `new_last_token` is the new + position in stream, and `limited` is whether there are more updates + to fetch. """ - if from_token in ("NOW", "now"): - return [], self.current_token() - - current_token = self.current_token() - from_token = int(from_token) - if from_token == current_token: - return [], current_token + if from_token == upto_token: + return [], upto_token, False - rows = await self.update_function( - from_token, current_token, limit=MAX_EVENTS_BEHIND + 1 + updates, upto_token, limited = await self.update_function( + from_token, upto_token, limit=limit, ) - - # never turn more than MAX_EVENTS_BEHIND + 1 into updates. - rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1) - - updates = [(row[0], row[1:]) for row in rows] - - # check we didn't get more rows than the limit. - # doing it like this allows the update_function to be a generator. - if len(updates) >= MAX_EVENTS_BEHIND: - raise Exception("stream %s has fallen behind" % (self.NAME)) - - # The update function didn't hit the limit, so we must have got all - # the updates to `current_token`, and can return that as our new - # stream position. - return updates, current_token + return updates, upto_token, limited def current_token(self): """Gets the current token of the underlying streams. Should be provided @@ -141,6 +136,48 @@ class Stream(object): raise NotImplementedError() +def db_query_to_update_function( + query_function: Callable[[Token, Token, int], Awaitable[List[tuple]]] +) -> Callable[[Token, Token, int], Awaitable[Tuple[List[StreamRow], Token, bool]]]: + """Wraps a db query function which returns a list of rows to make it + suitable for use as an `update_function` for the Stream class + """ + + async def update_function(from_token, upto_token, limit): + rows = await query_function(from_token, upto_token, limit) + updates = [(row[0], row[1:]) for row in rows] + limited = False + if len(updates) == limit: + upto_token = rows[-1][0] + limited = True + + return updates, upto_token, limited + + return update_function + + +def make_http_update_function( + hs, stream_name: str +) -> Callable[[Token, Token, Token], Awaitable[Tuple[List[StreamRow], Token, bool]]]: + """Makes a suitable function for use as an `update_function` that queries + the master process for updates. + """ + + client = ReplicationGetStreamUpdates.make_client(hs) + + async def update_function( + from_token: int, upto_token: int, limit: int + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: + return await client( + stream_name=stream_name, + from_token=from_token, + upto_token=upto_token, + limit=limit, + ) + + return update_function + + class BackfillStream(Stream): """We fetched some old events and either we had never seen that event before or it went from being an outlier to not. @@ -164,7 +201,7 @@ class BackfillStream(Stream): def __init__(self, hs): store = hs.get_datastore() self.current_token = store.get_current_backfill_token # type: ignore - self.update_function = store.get_all_new_backfill_event_rows # type: ignore + self.update_function = db_query_to_update_function(store.get_all_new_backfill_event_rows) # type: ignore super(BackfillStream, self).__init__(hs) @@ -190,8 +227,15 @@ class PresenceStream(Stream): store = hs.get_datastore() presence_handler = hs.get_presence_handler() + self._is_worker = hs.config.worker_app is not None + self.current_token = store.get_current_presence_token # type: ignore - self.update_function = presence_handler.get_all_presence_updates # type: ignore + + if hs.config.worker_app is None: + self.update_function = db_query_to_update_function(presence_handler.get_all_presence_updates) # type: ignore + else: + # Query master process + self.update_function = make_http_update_function(hs, self.NAME) # type: ignore super(PresenceStream, self).__init__(hs) @@ -208,7 +252,12 @@ class TypingStream(Stream): typing_handler = hs.get_typing_handler() self.current_token = typing_handler.get_current_token # type: ignore - self.update_function = typing_handler.get_all_typing_updates # type: ignore + + if hs.config.worker_app is None: + self.update_function = db_query_to_update_function(typing_handler.get_all_typing_updates) # type: ignore + else: + # Query master process + self.update_function = make_http_update_function(hs, self.NAME) # type: ignore super(TypingStream, self).__init__(hs) @@ -232,7 +281,7 @@ class ReceiptsStream(Stream): store = hs.get_datastore() self.current_token = store.get_max_receipt_stream_id # type: ignore - self.update_function = store.get_all_updated_receipts # type: ignore + self.update_function = db_query_to_update_function(store.get_all_updated_receipts) # type: ignore super(ReceiptsStream, self).__init__(hs) @@ -256,7 +305,13 @@ class PushRulesStream(Stream): async def update_function(self, from_token, to_token, limit): rows = await self.store.get_all_push_rule_updates(from_token, to_token, limit) - return [(row[0], row[2]) for row in rows] + + limited = False + if len(rows) == limit: + to_token = rows[-1][0] + limited = True + + return [(row[0], (row[2],)) for row in rows], to_token, limited class PushersStream(Stream): @@ -275,7 +330,7 @@ class PushersStream(Stream): store = hs.get_datastore() self.current_token = store.get_pushers_stream_token # type: ignore - self.update_function = store.get_all_updated_pushers_rows # type: ignore + self.update_function = db_query_to_update_function(store.get_all_updated_pushers_rows) # type: ignore super(PushersStream, self).__init__(hs) @@ -307,7 +362,7 @@ class CachesStream(Stream): store = hs.get_datastore() self.current_token = store.get_cache_stream_token # type: ignore - self.update_function = store.get_all_updated_caches # type: ignore + self.update_function = db_query_to_update_function(store.get_all_updated_caches) # type: ignore super(CachesStream, self).__init__(hs) @@ -333,7 +388,7 @@ class PublicRoomsStream(Stream): store = hs.get_datastore() self.current_token = store.get_current_public_room_stream_id # type: ignore - self.update_function = store.get_all_new_public_rooms # type: ignore + self.update_function = db_query_to_update_function(store.get_all_new_public_rooms) # type: ignore super(PublicRoomsStream, self).__init__(hs) @@ -354,7 +409,7 @@ class DeviceListsStream(Stream): store = hs.get_datastore() self.current_token = store.get_device_stream_token # type: ignore - self.update_function = store.get_all_device_list_changes_for_remotes # type: ignore + self.update_function = db_query_to_update_function(store.get_all_device_list_changes_for_remotes) # type: ignore super(DeviceListsStream, self).__init__(hs) @@ -372,7 +427,7 @@ class ToDeviceStream(Stream): store = hs.get_datastore() self.current_token = store.get_to_device_stream_token # type: ignore - self.update_function = store.get_all_new_device_messages # type: ignore + self.update_function = db_query_to_update_function(store.get_all_new_device_messages) # type: ignore super(ToDeviceStream, self).__init__(hs) @@ -392,7 +447,7 @@ class TagAccountDataStream(Stream): store = hs.get_datastore() self.current_token = store.get_max_account_data_stream_id # type: ignore - self.update_function = store.get_all_updated_tags # type: ignore + self.update_function = db_query_to_update_function(store.get_all_updated_tags) # type: ignore super(TagAccountDataStream, self).__init__(hs) @@ -412,10 +467,11 @@ class AccountDataStream(Stream): self.store = hs.get_datastore() self.current_token = self.store.get_max_account_data_stream_id # type: ignore + self.update_function = db_query_to_update_function(self._update_function) # type: ignore super(AccountDataStream, self).__init__(hs) - async def update_function(self, from_token, to_token, limit): + async def _update_function(self, from_token, to_token, limit): global_results, room_results = await self.store.get_all_updated_account_data( from_token, from_token, to_token, limit ) @@ -442,7 +498,7 @@ class GroupServerStream(Stream): store = hs.get_datastore() self.current_token = store.get_group_stream_token # type: ignore - self.update_function = store.get_all_groups_changes # type: ignore + self.update_function = db_query_to_update_function(store.get_all_groups_changes) # type: ignore super(GroupServerStream, self).__init__(hs) @@ -460,6 +516,6 @@ class UserSignatureStream(Stream): store = hs.get_datastore() self.current_token = store.get_device_stream_token # type: ignore - self.update_function = store.get_all_user_signature_changes_for_remotes # type: ignore + self.update_function = db_query_to_update_function(store.get_all_user_signature_changes_for_remotes) # type: ignore super(UserSignatureStream, self).__init__(hs) diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index b3afabb8cd..c6a595629f 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -19,7 +19,7 @@ from typing import Tuple, Type import attr -from ._base import Stream +from ._base import Stream, db_query_to_update_function """Handling of the 'events' replication stream @@ -117,10 +117,11 @@ class EventsStream(Stream): def __init__(self, hs): self._store = hs.get_datastore() self.current_token = self._store.get_current_events_token # type: ignore + self.update_function = db_query_to_update_function(self._update_function) # type: ignore super(EventsStream, self).__init__(hs) - async def update_function(self, from_token, current_token, limit=None): + async def _update_function(self, from_token, current_token, limit=None): event_rows = await self._store.get_all_new_forward_event_rows( from_token, current_token, limit ) diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index f5f9336430..48c1d45718 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -15,7 +15,9 @@ # limitations under the License. from collections import namedtuple -from ._base import Stream +from twisted.internet import defer + +from synapse.replication.tcp.streams._base import Stream, db_query_to_update_function class FederationStream(Stream): @@ -33,11 +35,18 @@ class FederationStream(Stream): NAME = "federation" ROW_TYPE = FederationStreamRow + _QUERY_MASTER = True def __init__(self, hs): - federation_sender = hs.get_federation_sender() - - self.current_token = federation_sender.get_current_token # type: ignore - self.update_function = federation_sender.get_replication_rows # type: ignore + # Not all synapse instances will have a federation sender instance, + # whether that's a `FederationSender` or a `FederationRemoteSendQueue`, + # so we stub the stream out when that is the case. + if hs.config.worker_app is None or hs.should_send_federation(): + federation_sender = hs.get_federation_sender() + self.current_token = federation_sender.get_current_token # type: ignore + self.update_function = db_query_to_update_function(federation_sender.get_replication_rows) # type: ignore + else: + self.current_token = lambda: 0 # type: ignore + self.update_function = lambda from_token, upto_token, limit: defer.succeed(([], upto_token, bool)) # type: ignore super(FederationStream, self).__init__(hs) diff --git a/synapse/server.py b/synapse/server.py index 1b980371de..9426eb1672 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -85,6 +85,7 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator from synapse.push.pusherpool import PusherPool +from synapse.replication.tcp.resource import ReplicationStreamer from synapse.rest.media.v1.media_repository import ( MediaRepository, MediaRepositoryResource, @@ -199,6 +200,7 @@ class HomeServer(object): "saml_handler", "event_client_serializer", "storage", + "replication_streamer", ] REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] @@ -536,6 +538,9 @@ class HomeServer(object): def build_storage(self) -> Storage: return Storage(self, self.datastores) + def build_replication_streamer(self) -> ReplicationStreamer: + return ReplicationStreamer(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py index d4c44dcc75..4dc5da3fe8 100644 --- a/synapse/storage/data_stores/main/cache.py +++ b/synapse/storage/data_stores/main/cache.py @@ -32,7 +32,29 @@ logger = logging.getLogger(__name__) CURRENT_STATE_CACHE_NAME = "cs_cache_fake" -class CacheInvalidationStore(SQLBaseStore): +class CacheInvalidationWorkerStore(SQLBaseStore): + def get_all_updated_caches(self, last_id, current_id, limit): + if last_id == current_id: + return defer.succeed([]) + + def get_all_updated_caches_txn(txn): + # We purposefully don't bound by the current token, as we want to + # send across cache invalidations as quickly as possible. Cache + # invalidations are idempotent, so duplicates are fine. + sql = ( + "SELECT stream_id, cache_func, keys, invalidation_ts" + " FROM cache_invalidation_stream" + " WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?" + ) + txn.execute(sql, (last_id, limit)) + return txn.fetchall() + + return self.db.runInteraction( + "get_all_updated_caches", get_all_updated_caches_txn + ) + + +class CacheInvalidationStore(CacheInvalidationWorkerStore): async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -145,26 +167,6 @@ class CacheInvalidationStore(SQLBaseStore): }, ) - def get_all_updated_caches(self, last_id, current_id, limit): - if last_id == current_id: - return defer.succeed([]) - - def get_all_updated_caches_txn(txn): - # We purposefully don't bound by the current token, as we want to - # send across cache invalidations as quickly as possible. Cache - # invalidations are idempotent, so duplicates are fine. - sql = ( - "SELECT stream_id, cache_func, keys, invalidation_ts" - " FROM cache_invalidation_stream" - " WHERE stream_id > ? ORDER BY stream_id ASC LIMIT ?" - ) - txn.execute(sql, (last_id, limit)) - return txn.fetchall() - - return self.db.runInteraction( - "get_all_updated_caches", get_all_updated_caches_txn - ) - def get_cache_stream_token(self): if self._cache_id_gen: return self._cache_id_gen.get_current_token() diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py index 0613b49f4a..9a1178fb39 100644 --- a/synapse/storage/data_stores/main/deviceinbox.py +++ b/synapse/storage/data_stores/main/deviceinbox.py @@ -207,6 +207,50 @@ class DeviceInboxWorkerStore(SQLBaseStore): "delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn ) + def get_all_new_device_messages(self, last_pos, current_pos, limit): + """ + Args: + last_pos(int): + current_pos(int): + limit(int): + Returns: + A deferred list of rows from the device inbox + """ + if last_pos == current_pos: + return defer.succeed([]) + + def get_all_new_device_messages_txn(txn): + # We limit like this as we might have multiple rows per stream_id, and + # we want to make sure we always get all entries for any stream_id + # we return. + upper_pos = min(current_pos, last_pos + limit) + sql = ( + "SELECT max(stream_id), user_id" + " FROM device_inbox" + " WHERE ? < stream_id AND stream_id <= ?" + " GROUP BY user_id" + ) + txn.execute(sql, (last_pos, upper_pos)) + rows = txn.fetchall() + + sql = ( + "SELECT max(stream_id), destination" + " FROM device_federation_outbox" + " WHERE ? < stream_id AND stream_id <= ?" + " GROUP BY destination" + ) + txn.execute(sql, (last_pos, upper_pos)) + rows.extend(txn) + + # Order by ascending stream ordering + rows.sort() + + return rows + + return self.db.runInteraction( + "get_all_new_device_messages", get_all_new_device_messages_txn + ) + class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" @@ -411,47 +455,3 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) rows.append((user_id, device_id, stream_id, message_json)) txn.executemany(sql, rows) - - def get_all_new_device_messages(self, last_pos, current_pos, limit): - """ - Args: - last_pos(int): - current_pos(int): - limit(int): - Returns: - A deferred list of rows from the device inbox - """ - if last_pos == current_pos: - return defer.succeed([]) - - def get_all_new_device_messages_txn(txn): - # We limit like this as we might have multiple rows per stream_id, and - # we want to make sure we always get all entries for any stream_id - # we return. - upper_pos = min(current_pos, last_pos + limit) - sql = ( - "SELECT max(stream_id), user_id" - " FROM device_inbox" - " WHERE ? < stream_id AND stream_id <= ?" - " GROUP BY user_id" - ) - txn.execute(sql, (last_pos, upper_pos)) - rows = txn.fetchall() - - sql = ( - "SELECT max(stream_id), destination" - " FROM device_federation_outbox" - " WHERE ? < stream_id AND stream_id <= ?" - " GROUP BY destination" - ) - txn.execute(sql, (last_pos, upper_pos)) - rows.extend(txn) - - # Order by ascending stream ordering - rows.sort() - - return rows - - return self.db.runInteraction( - "get_all_new_device_messages", get_all_new_device_messages_txn - ) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index d593ef47b8..e71c23541d 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1267,104 +1267,6 @@ class EventsStore( ret = yield self.db.runInteraction("count_daily_active_rooms", _count) return ret - def get_current_backfill_token(self): - """The current minimum token that backfilled events have reached""" - return -self._backfill_id_gen.get_current_token() - - def get_current_events_token(self): - """The current maximum token that events have reached""" - return self._stream_id_gen.get_current_token() - - def get_all_new_forward_event_rows(self, last_id, current_id, limit): - if last_id == current_id: - return defer.succeed([]) - - def get_all_new_forward_event_rows(txn): - sql = ( - "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts, relates_to_id" - " FROM events AS e" - " LEFT JOIN redactions USING (event_id)" - " LEFT JOIN state_events USING (event_id)" - " LEFT JOIN event_relations USING (event_id)" - " WHERE ? < stream_ordering AND stream_ordering <= ?" - " ORDER BY stream_ordering ASC" - " LIMIT ?" - ) - txn.execute(sql, (last_id, current_id, limit)) - new_event_updates = txn.fetchall() - - if len(new_event_updates) == limit: - upper_bound = new_event_updates[-1][0] - else: - upper_bound = current_id - - sql = ( - "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts, relates_to_id" - " FROM events AS e" - " INNER JOIN ex_outlier_stream USING (event_id)" - " LEFT JOIN redactions USING (event_id)" - " LEFT JOIN state_events USING (event_id)" - " LEFT JOIN event_relations USING (event_id)" - " WHERE ? < event_stream_ordering" - " AND event_stream_ordering <= ?" - " ORDER BY event_stream_ordering DESC" - ) - txn.execute(sql, (last_id, upper_bound)) - new_event_updates.extend(txn) - - return new_event_updates - - return self.db.runInteraction( - "get_all_new_forward_event_rows", get_all_new_forward_event_rows - ) - - def get_all_new_backfill_event_rows(self, last_id, current_id, limit): - if last_id == current_id: - return defer.succeed([]) - - def get_all_new_backfill_event_rows(txn): - sql = ( - "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts, relates_to_id" - " FROM events AS e" - " LEFT JOIN redactions USING (event_id)" - " LEFT JOIN state_events USING (event_id)" - " LEFT JOIN event_relations USING (event_id)" - " WHERE ? > stream_ordering AND stream_ordering >= ?" - " ORDER BY stream_ordering ASC" - " LIMIT ?" - ) - txn.execute(sql, (-last_id, -current_id, limit)) - new_event_updates = txn.fetchall() - - if len(new_event_updates) == limit: - upper_bound = new_event_updates[-1][0] - else: - upper_bound = current_id - - sql = ( - "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts, relates_to_id" - " FROM events AS e" - " INNER JOIN ex_outlier_stream USING (event_id)" - " LEFT JOIN redactions USING (event_id)" - " LEFT JOIN state_events USING (event_id)" - " LEFT JOIN event_relations USING (event_id)" - " WHERE ? > event_stream_ordering" - " AND event_stream_ordering >= ?" - " ORDER BY event_stream_ordering DESC" - ) - txn.execute(sql, (-last_id, -upper_bound)) - new_event_updates.extend(txn.fetchall()) - - return new_event_updates - - return self.db.runInteraction( - "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows - ) - @cached(num_args=5, max_entries=10) def get_all_new_events( self, @@ -1850,22 +1752,6 @@ class EventsStore( return (int(res["topological_ordering"]), int(res["stream_ordering"])) - def get_all_updated_current_state_deltas(self, from_token, to_token, limit): - def get_all_updated_current_state_deltas_txn(txn): - sql = """ - SELECT stream_id, room_id, type, state_key, event_id - FROM current_state_delta_stream - WHERE ? < stream_id AND stream_id <= ? - ORDER BY stream_id ASC LIMIT ? - """ - txn.execute(sql, (from_token, to_token, limit)) - return txn.fetchall() - - return self.db.runInteraction( - "get_all_updated_current_state_deltas", - get_all_updated_current_state_deltas_txn, - ) - def insert_labels_for_event_txn( self, txn, event_id, labels, room_id, topological_ordering ): diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 3013f49d32..16ea8948b1 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -963,3 +963,117 @@ class EventsWorkerStore(SQLBaseStore): complexity_v1 = round(state_events / 500, 2) return {"v1": complexity_v1} + + def get_current_backfill_token(self): + """The current minimum token that backfilled events have reached""" + return -self._backfill_id_gen.get_current_token() + + def get_current_events_token(self): + """The current maximum token that events have reached""" + return self._stream_id_gen.get_current_token() + + def get_all_new_forward_event_rows(self, last_id, current_id, limit): + if last_id == current_id: + return defer.succeed([]) + + def get_all_new_forward_event_rows(txn): + sql = ( + "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," + " state_key, redacts, relates_to_id" + " FROM events AS e" + " LEFT JOIN redactions USING (event_id)" + " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" + " WHERE ? < stream_ordering AND stream_ordering <= ?" + " ORDER BY stream_ordering ASC" + " LIMIT ?" + ) + txn.execute(sql, (last_id, current_id, limit)) + new_event_updates = txn.fetchall() + + if len(new_event_updates) == limit: + upper_bound = new_event_updates[-1][0] + else: + upper_bound = current_id + + sql = ( + "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," + " state_key, redacts, relates_to_id" + " FROM events AS e" + " INNER JOIN ex_outlier_stream USING (event_id)" + " LEFT JOIN redactions USING (event_id)" + " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" + " WHERE ? < event_stream_ordering" + " AND event_stream_ordering <= ?" + " ORDER BY event_stream_ordering DESC" + ) + txn.execute(sql, (last_id, upper_bound)) + new_event_updates.extend(txn) + + return new_event_updates + + return self.db.runInteraction( + "get_all_new_forward_event_rows", get_all_new_forward_event_rows + ) + + def get_all_new_backfill_event_rows(self, last_id, current_id, limit): + if last_id == current_id: + return defer.succeed([]) + + def get_all_new_backfill_event_rows(txn): + sql = ( + "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type," + " state_key, redacts, relates_to_id" + " FROM events AS e" + " LEFT JOIN redactions USING (event_id)" + " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" + " WHERE ? > stream_ordering AND stream_ordering >= ?" + " ORDER BY stream_ordering ASC" + " LIMIT ?" + ) + txn.execute(sql, (-last_id, -current_id, limit)) + new_event_updates = txn.fetchall() + + if len(new_event_updates) == limit: + upper_bound = new_event_updates[-1][0] + else: + upper_bound = current_id + + sql = ( + "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type," + " state_key, redacts, relates_to_id" + " FROM events AS e" + " INNER JOIN ex_outlier_stream USING (event_id)" + " LEFT JOIN redactions USING (event_id)" + " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" + " WHERE ? > event_stream_ordering" + " AND event_stream_ordering >= ?" + " ORDER BY event_stream_ordering DESC" + ) + txn.execute(sql, (-last_id, -upper_bound)) + new_event_updates.extend(txn.fetchall()) + + return new_event_updates + + return self.db.runInteraction( + "get_all_new_backfill_event_rows", get_all_new_backfill_event_rows + ) + + def get_all_updated_current_state_deltas(self, from_token, to_token, limit): + def get_all_updated_current_state_deltas_txn(txn): + sql = """ + SELECT stream_id, room_id, type, state_key, event_id + FROM current_state_delta_stream + WHERE ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC LIMIT ? + """ + txn.execute(sql, (from_token, to_token, limit)) + return txn.fetchall() + + return self.db.runInteraction( + "get_all_updated_current_state_deltas", + get_all_updated_current_state_deltas_txn, + ) diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index e6c10c6316..aaebe427d3 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -732,6 +732,26 @@ class RoomWorkerStore(SQLBaseStore): return total_media_quarantined + def get_all_new_public_rooms(self, prev_id, current_id, limit): + def get_all_new_public_rooms(txn): + sql = """ + SELECT stream_id, room_id, visibility, appservice_id, network_id + FROM public_room_list_stream + WHERE stream_id > ? AND stream_id <= ? + ORDER BY stream_id ASC + LIMIT ? + """ + + txn.execute(sql, (prev_id, current_id, limit)) + return txn.fetchall() + + if prev_id == current_id: + return defer.succeed([]) + + return self.db.runInteraction( + "get_all_new_public_rooms", get_all_new_public_rooms + ) + class RoomBackgroundUpdateStore(SQLBaseStore): REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" @@ -1249,26 +1269,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): def get_current_public_room_stream_id(self): return self._public_room_id_gen.get_current_token() - def get_all_new_public_rooms(self, prev_id, current_id, limit): - def get_all_new_public_rooms(txn): - sql = """ - SELECT stream_id, room_id, visibility, appservice_id, network_id - FROM public_room_list_stream - WHERE stream_id > ? AND stream_id <= ? - ORDER BY stream_id ASC - LIMIT ? - """ - - txn.execute(sql, (prev_id, current_id, limit)) - return txn.fetchall() - - if prev_id == current_id: - return defer.succeed([]) - - return self.db.runInteraction( - "get_all_new_public_rooms", get_all_new_public_rooms - ) - @defer.inlineCallbacks def block_room(self, room_id, user_id): """Marks the room as blocked. Can be called multiple times. diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py index e96ad4ca4e..a755fe2879 100644 --- a/tests/replication/tcp/streams/_base.py +++ b/tests/replication/tcp/streams/_base.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from mock import Mock from synapse.replication.tcp.commands import ReplicateCommand @@ -29,19 +30,37 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): # build a replication server server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer - server = server_factory.buildProtocol(None) + self.server = server_factory.buildProtocol(None) - # build a replication client, with a dummy handler - handler_factory = Mock() - self.test_handler = TestReplicationClientHandler() - self.test_handler.factory = handler_factory + self.test_handler = Mock(wraps=TestReplicationClientHandler()) self.client = ClientReplicationStreamProtocol( - "client", "test", clock, self.test_handler + hs, "client", "test", clock, self.test_handler, ) - # wire them together - self.client.makeConnection(FakeTransport(server, reactor)) - server.makeConnection(FakeTransport(self.client, reactor)) + self._client_transport = None + self._server_transport = None + + def reconnect(self): + if self._client_transport: + self.client.close() + + if self._server_transport: + self.server.close() + + self._client_transport = FakeTransport(self.server, self.reactor) + self.client.makeConnection(self._client_transport) + + self._server_transport = FakeTransport(self.client, self.reactor) + self.server.makeConnection(self._server_transport) + + def disconnect(self): + if self._client_transport: + self._client_transport = None + self.client.close() + + if self._server_transport: + self._server_transport = None + self.server.close() def replicate(self): """Tell the master side of replication that something has happened, and then @@ -50,19 +69,24 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): self.streamer.on_notifier_poke() self.pump(0.1) - def replicate_stream(self, stream, token="NOW"): + def replicate_stream(self): """Make the client end a REPLICATE command to set up a subscription to a stream""" - self.client.send_command(ReplicateCommand(stream, token)) + self.client.send_command(ReplicateCommand()) class TestReplicationClientHandler(object): """Drop-in for ReplicationClientHandler which just collects RDATA rows""" def __init__(self): - self.received_rdata_rows = [] + self.streams = set() + self._received_rdata_rows = [] def get_streams_to_replicate(self): - return {} + positions = {s: 0 for s in self.streams} + for stream, token, _ in self._received_rdata_rows: + if stream in self.streams: + positions[stream] = max(token, positions.get(stream, 0)) + return positions def get_currently_syncing_users(self): return [] @@ -73,6 +97,9 @@ class TestReplicationClientHandler(object): def finished_connecting(self): pass + async def on_position(self, stream_name, token): + """Called when we get new position data.""" + async def on_rdata(self, stream_name, token, rows): for r in rows: - self.received_rdata_rows.append((stream_name, token, r)) + self._received_rdata_rows.append((stream_name, token, r)) diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index fa2493cad6..0ec0825a0e 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -17,30 +17,64 @@ from synapse.replication.tcp.streams._base import ReceiptsStream from tests.replication.tcp.streams._base import BaseStreamTestCase USER_ID = "@feeling:blue" -ROOM_ID = "!room:blue" -EVENT_ID = "$event:blue" class ReceiptsStreamTestCase(BaseStreamTestCase): def test_receipt(self): + self.reconnect() + # make the client subscribe to the receipts stream - self.replicate_stream("receipts", "NOW") + self.replicate_stream() + self.test_handler.streams.add("receipts") # tell the master to send a new receipt self.get_success( self.hs.get_datastore().insert_receipt( - ROOM_ID, "m.read", USER_ID, [EVENT_ID], {"a": 1} + "!room:blue", "m.read", USER_ID, ["$event:blue"], {"a": 1} ) ) self.replicate() # there should be one RDATA command - rdata_rows = self.test_handler.received_rdata_rows + self.test_handler.on_rdata.assert_called_once() + stream_name, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.assertEqual(stream_name, "receipts") self.assertEqual(1, len(rdata_rows)) - self.assertEqual(rdata_rows[0][0], "receipts") - row = rdata_rows[0][2] # type: ReceiptsStream.ReceiptsStreamRow - self.assertEqual(ROOM_ID, row.room_id) + row = rdata_rows[0] # type: ReceiptsStream.ReceiptsStreamRow + self.assertEqual("!room:blue", row.room_id) self.assertEqual("m.read", row.receipt_type) self.assertEqual(USER_ID, row.user_id) - self.assertEqual(EVENT_ID, row.event_id) + self.assertEqual("$event:blue", row.event_id) self.assertEqual({"a": 1}, row.data) + + # Now let's disconnect and insert some data. + self.disconnect() + + self.test_handler.on_rdata.reset_mock() + + self.get_success( + self.hs.get_datastore().insert_receipt( + "!room2:blue", "m.read", USER_ID, ["$event2:foo"], {"a": 2} + ) + ) + self.replicate() + + # Nothing should have happened as we are disconnected + self.test_handler.on_rdata.assert_not_called() + + self.reconnect() + self.pump(0.1) + + # We should now have caught up and get the missing data + self.test_handler.on_rdata.assert_called_once() + stream_name, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.assertEqual(stream_name, "receipts") + self.assertEqual(token, 3) + self.assertEqual(1, len(rdata_rows)) + + row = rdata_rows[0] # type: ReceiptsStream.ReceiptsStreamRow + self.assertEqual("!room2:blue", row.room_id) + self.assertEqual("m.read", row.receipt_type) + self.assertEqual(USER_ID, row.user_id) + self.assertEqual("$event2:foo", row.event_id) + self.assertEqual({"a": 2}, row.data) From 6ca5e56fd12bbccb6b3ab43ed7c0281e4822274a Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 25 Mar 2020 12:49:34 -0500 Subject: [PATCH 306/382] Remove unused captcha_bypass_secret option (#7137) Signed-off-by: Aaron Raimist --- changelog.d/7137.removal | 1 + docs/sample_config.yaml | 4 ---- synapse/config/captcha.py | 5 ----- 3 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 changelog.d/7137.removal diff --git a/changelog.d/7137.removal b/changelog.d/7137.removal new file mode 100644 index 0000000000..75266a06bb --- /dev/null +++ b/changelog.d/7137.removal @@ -0,0 +1 @@ +Remove nonfunctional `captcha_bypass_secret` option from `homeserver.yaml`. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 276e43b732..2ef83646b3 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -872,10 +872,6 @@ media_store_path: "DATADIR/media_store" # #enable_registration_captcha: false -# A secret key used to bypass the captcha test entirely. -# -#captcha_bypass_secret: "YOUR_SECRET_HERE" - # The API endpoint to use for verifying m.login.recaptcha responses. # #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify" diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index f0171bb5b2..56c87fa296 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -24,7 +24,6 @@ class CaptchaConfig(Config): self.enable_registration_captcha = config.get( "enable_registration_captcha", False ) - self.captcha_bypass_secret = config.get("captcha_bypass_secret") self.recaptcha_siteverify_api = config.get( "recaptcha_siteverify_api", "https://www.recaptcha.net/recaptcha/api/siteverify", @@ -49,10 +48,6 @@ class CaptchaConfig(Config): # #enable_registration_captcha: false - # A secret key used to bypass the captcha test entirely. - # - #captcha_bypass_secret: "YOUR_SECRET_HERE" - # The API endpoint to use for verifying m.login.recaptcha responses. # #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify" From 1c1242acba9694a3a4b1eb3b14ec0bac11ee4ff8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Mar 2020 07:39:34 -0400 Subject: [PATCH 307/382] Validate that the session is not modified during UI-Auth (#7068) --- changelog.d/7068.bugfix | 1 + synapse/handlers/auth.py | 37 +++++++++++-- synapse/rest/client/v2_alpha/account.py | 11 ++-- synapse/rest/client/v2_alpha/devices.py | 4 +- synapse/rest/client/v2_alpha/keys.py | 2 +- synapse/rest/client/v2_alpha/register.py | 5 +- tests/rest/client/v2_alpha/test_auth.py | 68 +++++++++++++++++++++++- tests/test_terms_auth.py | 3 +- 8 files changed, 117 insertions(+), 14 deletions(-) create mode 100644 changelog.d/7068.bugfix diff --git a/changelog.d/7068.bugfix b/changelog.d/7068.bugfix new file mode 100644 index 0000000000..d1693a7f22 --- /dev/null +++ b/changelog.d/7068.bugfix @@ -0,0 +1 @@ +Ensure that a user inteactive authentication session is tied to a single request. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7860f9625e..2ce1425dfa 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -125,7 +125,11 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def validate_user_via_ui_auth( - self, requester: Requester, request_body: Dict[str, Any], clientip: str + self, + requester: Requester, + request: SynapseRequest, + request_body: Dict[str, Any], + clientip: str, ): """ Checks that the user is who they claim to be, via a UI auth. @@ -137,6 +141,8 @@ class AuthHandler(BaseHandler): Args: requester: The user, as given by the access token + request: The request sent by the client. + request_body: The body of the request sent by the client clientip: The IP address of the client. @@ -172,7 +178,9 @@ class AuthHandler(BaseHandler): flows = [[login_type] for login_type in self._supported_login_types] try: - result, params, _ = yield self.check_auth(flows, request_body, clientip) + result, params, _ = yield self.check_auth( + flows, request, request_body, clientip + ) except LoginError: # Update the ratelimite to say we failed (`can_do_action` doesn't raise). self._failed_uia_attempts_ratelimiter.can_do_action( @@ -211,7 +219,11 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def check_auth( - self, flows: List[List[str]], clientdict: Dict[str, Any], clientip: str + self, + flows: List[List[str]], + request: SynapseRequest, + clientdict: Dict[str, Any], + clientip: str, ): """ Takes a dictionary sent by the client in the login / registration @@ -231,6 +243,8 @@ class AuthHandler(BaseHandler): strings representing auth-types. At least one full flow must be completed in order for auth to be successful. + request: The request sent by the client. + clientdict: The dictionary from the client root level, not the 'auth' key: this method prompts for auth if none is sent. @@ -270,13 +284,27 @@ class AuthHandler(BaseHandler): # email auth link on there). It's probably too open to abuse # because it lets unauthenticated clients store arbitrary objects # on a homeserver. - # Revisit: Assumimg the REST APIs do sensible validation, the data + # Revisit: Assuming the REST APIs do sensible validation, the data # isn't arbintrary. session["clientdict"] = clientdict self._save_session(session) elif "clientdict" in session: clientdict = session["clientdict"] + # Ensure that the queried operation does not vary between stages of + # the UI authentication session. This is done by generating a stable + # comparator based on the URI, method, and body (minus the auth dict) + # and storing it during the initial query. Subsequent queries ensure + # that this comparator has not changed. + comparator = (request.uri, request.method, clientdict) + if "ui_auth" not in session: + session["ui_auth"] = comparator + elif session["ui_auth"] != comparator: + raise SynapseError( + 403, + "Requested operation has changed during the UI authentication session.", + ) + if not authdict: raise InteractiveAuthIncompleteError( self._auth_dict_for_flows(flows, session) @@ -322,6 +350,7 @@ class AuthHandler(BaseHandler): creds, list(clientdict), ) + return creds, clientdict, session["id"] ret = self._auth_dict_for_flows(flows, session) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 631cc74cb4..b1249b664c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -234,13 +234,16 @@ class PasswordRestServlet(RestServlet): if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req(request) params = await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) user_id = requester.user.to_string() else: requester = None result, params, _ = await self.auth_handler.check_auth( - [[LoginType.EMAIL_IDENTITY]], body, self.hs.get_ip_from_request(request) + [[LoginType.EMAIL_IDENTITY]], + request, + body, + self.hs.get_ip_from_request(request), ) if LoginType.EMAIL_IDENTITY in result: @@ -308,7 +311,7 @@ class DeactivateAccountRestServlet(RestServlet): return 200, {} await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) result = await self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase, id_server=body.get("id_server") @@ -656,7 +659,7 @@ class ThreepidAddRestServlet(RestServlet): assert_valid_client_secret(client_secret) await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) validation_session = await self.identity_handler.validate_threepid_session( diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 94ff73f384..119d979052 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -81,7 +81,7 @@ class DeleteDevicesRestServlet(RestServlet): assert_params_in_dict(body, ["devices"]) await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) await self.device_handler.delete_devices( @@ -127,7 +127,7 @@ class DeviceRestServlet(RestServlet): raise await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) await self.device_handler.delete_device(requester.user.to_string(), device_id) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index f7ed4daf90..5eb7ef35a4 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -263,7 +263,7 @@ class SigningKeyUploadServlet(RestServlet): body = parse_json_object_from_request(request) await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) + requester, request, body, self.hs.get_ip_from_request(request), ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index a09189b1b4..6963d79310 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -499,7 +499,10 @@ class RegisterRestServlet(RestServlet): ) auth_result, params, session_id = await self.auth_handler.check_auth( - self._registration_flows, body, self.hs.get_ip_from_request(request) + self._registration_flows, + request, + body, + self.hs.get_ip_from_request(request), ) # Check that we're not trying to register a denied 3pid. diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index b6df1396ad..624bf5ada2 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -104,7 +104,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase): ) self.render(request) - # Now we should have fufilled a complete auth flow, including + # Now we should have fulfilled a complete auth flow, including # the recaptcha fallback step, we can then send a # request to the register API with the session in the authdict. request, channel = self.make_request( @@ -115,3 +115,69 @@ class FallbackAuthTests(unittest.HomeserverTestCase): # We're given a registered user. self.assertEqual(channel.json_body["user_id"], "@user:test") + + def test_cannot_change_operation(self): + """ + The initial requested operation cannot be modified during the user interactive authentication session. + """ + + # Make the initial request to register. (Later on a different password + # will be used.) + request, channel = self.make_request( + "POST", + "register", + {"username": "user", "type": "m.login.password", "password": "bar"}, + ) + self.render(request) + + # Returns a 401 as per the spec + self.assertEqual(request.code, 401) + # Grab the session + session = channel.json_body["session"] + # Assert our configured public key is being given + self.assertEqual( + channel.json_body["params"]["m.login.recaptcha"]["public_key"], "brokencake" + ) + + request, channel = self.make_request( + "GET", "auth/m.login.recaptcha/fallback/web?session=" + session + ) + self.render(request) + self.assertEqual(request.code, 200) + + request, channel = self.make_request( + "POST", + "auth/m.login.recaptcha/fallback/web?session=" + + session + + "&g-recaptcha-response=a", + ) + self.render(request) + self.assertEqual(request.code, 200) + + # The recaptcha handler is called with the response given + attempts = self.recaptcha_checker.recaptcha_attempts + self.assertEqual(len(attempts), 1) + self.assertEqual(attempts[0][0]["response"], "a") + + # also complete the dummy auth + request, channel = self.make_request( + "POST", "register", {"auth": {"session": session, "type": "m.login.dummy"}} + ) + self.render(request) + + # Now we should have fulfilled a complete auth flow, including + # the recaptcha fallback step. Make the initial request again, but + # with a different password. This causes the request to fail since the + # operaiton was modified during the ui auth session. + request, channel = self.make_request( + "POST", + "register", + { + "username": "user", + "type": "m.login.password", + "password": "foo", # Note this doesn't match the original request. + "auth": {"session": session}, + }, + ) + self.render(request) + self.assertEqual(channel.code, 403) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 5ec5d2b358..a3f98a1412 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -53,7 +53,8 @@ class TermsTestCase(unittest.HomeserverTestCase): def test_ui_auth(self): # Do a UI auth request - request, channel = self.make_request(b"POST", self.url, b"{}") + request_data = json.dumps({"username": "kermit", "password": "monkey"}) + request, channel = self.make_request(b"POST", self.url, request_data) self.render(request) self.assertEquals(channel.result["code"], b"401", channel.result) From e8e2ddb60ae11db488f159901d918cb159695912 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 26 Mar 2020 17:51:13 +0100 Subject: [PATCH 308/382] Allow server admins to define and enforce a password policy (MSC2000). (#7118) --- changelog.d/7118.feature | 1 + docs/sample_config.yaml | 35 ++++ synapse/api/errors.py | 21 ++ synapse/config/password.py | 39 ++++ synapse/handlers/password_policy.py | 93 +++++++++ synapse/handlers/set_password.py | 2 + synapse/rest/__init__.py | 2 + .../rest/client/v2_alpha/password_policy.py | 58 ++++++ synapse/rest/client/v2_alpha/register.py | 2 + synapse/server.py | 5 + .../client/v2_alpha/test_password_policy.py | 179 ++++++++++++++++++ 11 files changed, 437 insertions(+) create mode 100644 changelog.d/7118.feature create mode 100644 synapse/handlers/password_policy.py create mode 100644 synapse/rest/client/v2_alpha/password_policy.py create mode 100644 tests/rest/client/v2_alpha/test_password_policy.py diff --git a/changelog.d/7118.feature b/changelog.d/7118.feature new file mode 100644 index 0000000000..5cbfd98160 --- /dev/null +++ b/changelog.d/7118.feature @@ -0,0 +1 @@ +Allow server admins to define and enforce a password policy (MSC2000). \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2ef83646b3..1a1d061759 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1482,6 +1482,41 @@ password_config: # #pepper: "EVEN_MORE_SECRET" + # Define and enforce a password policy. Each parameter is optional. + # This is an implementation of MSC2000. + # + policy: + # Whether to enforce the password policy. + # Defaults to 'false'. + # + #enabled: true + + # Minimum accepted length for a password. + # Defaults to 0. + # + #minimum_length: 15 + + # Whether a password must contain at least one digit. + # Defaults to 'false'. + # + #require_digit: true + + # Whether a password must contain at least one symbol. + # A symbol is any character that's not a number or a letter. + # Defaults to 'false'. + # + #require_symbol: true + + # Whether a password must contain at least one lowercase letter. + # Defaults to 'false'. + # + #require_lowercase: true + + # Whether a password must contain at least one lowercase letter. + # Defaults to 'false'. + # + #require_uppercase: true + # Configuration for sending emails from Synapse. # diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 616942b057..11da016ac5 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -64,6 +64,13 @@ class Codes(object): INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION" WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION" EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT" + PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT" + PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT" + PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE" + PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE" + PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL" + PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY" + WEAK_PASSWORD = "M_WEAK_PASSWORD" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" BAD_ALIAS = "M_BAD_ALIAS" @@ -439,6 +446,20 @@ class IncompatibleRoomVersionError(SynapseError): return cs_error(self.msg, self.errcode, room_version=self._room_version) +class PasswordRefusedError(SynapseError): + """A password has been refused, either during password reset/change or registration. + """ + + def __init__( + self, + msg="This password doesn't comply with the server's policy", + errcode=Codes.WEAK_PASSWORD, + ): + super(PasswordRefusedError, self).__init__( + code=400, msg=msg, errcode=errcode, + ) + + class RequestSendFailed(RuntimeError): """Sending a HTTP request over federation failed due to not being able to talk to the remote server for some reason. diff --git a/synapse/config/password.py b/synapse/config/password.py index 2a634ac751..9c0ea8c30a 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -31,6 +31,10 @@ class PasswordConfig(Config): self.password_localdb_enabled = password_config.get("localdb_enabled", True) self.password_pepper = password_config.get("pepper", "") + # Password policy + self.password_policy = password_config.get("policy") or {} + self.password_policy_enabled = self.password_policy.get("enabled", False) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ password_config: @@ -48,4 +52,39 @@ class PasswordConfig(Config): # DO NOT CHANGE THIS AFTER INITIAL SETUP! # #pepper: "EVEN_MORE_SECRET" + + # Define and enforce a password policy. Each parameter is optional. + # This is an implementation of MSC2000. + # + policy: + # Whether to enforce the password policy. + # Defaults to 'false'. + # + #enabled: true + + # Minimum accepted length for a password. + # Defaults to 0. + # + #minimum_length: 15 + + # Whether a password must contain at least one digit. + # Defaults to 'false'. + # + #require_digit: true + + # Whether a password must contain at least one symbol. + # A symbol is any character that's not a number or a letter. + # Defaults to 'false'. + # + #require_symbol: true + + # Whether a password must contain at least one lowercase letter. + # Defaults to 'false'. + # + #require_lowercase: true + + # Whether a password must contain at least one lowercase letter. + # Defaults to 'false'. + # + #require_uppercase: true """ diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py new file mode 100644 index 0000000000..d06b110269 --- /dev/null +++ b/synapse/handlers/password_policy.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re + +from synapse.api.errors import Codes, PasswordRefusedError + +logger = logging.getLogger(__name__) + + +class PasswordPolicyHandler(object): + def __init__(self, hs): + self.policy = hs.config.password_policy + self.enabled = hs.config.password_policy_enabled + + # Regexps for the spec'd policy parameters. + self.regexp_digit = re.compile("[0-9]") + self.regexp_symbol = re.compile("[^a-zA-Z0-9]") + self.regexp_uppercase = re.compile("[A-Z]") + self.regexp_lowercase = re.compile("[a-z]") + + def validate_password(self, password): + """Checks whether a given password complies with the server's policy. + + Args: + password (str): The password to check against the server's policy. + + Raises: + PasswordRefusedError: The password doesn't comply with the server's policy. + """ + + if not self.enabled: + return + + minimum_accepted_length = self.policy.get("minimum_length", 0) + if len(password) < minimum_accepted_length: + raise PasswordRefusedError( + msg=( + "The password must be at least %d characters long" + % minimum_accepted_length + ), + errcode=Codes.PASSWORD_TOO_SHORT, + ) + + if ( + self.policy.get("require_digit", False) + and self.regexp_digit.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one digit", + errcode=Codes.PASSWORD_NO_DIGIT, + ) + + if ( + self.policy.get("require_symbol", False) + and self.regexp_symbol.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one symbol", + errcode=Codes.PASSWORD_NO_SYMBOL, + ) + + if ( + self.policy.get("require_uppercase", False) + and self.regexp_uppercase.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one uppercase letter", + errcode=Codes.PASSWORD_NO_UPPERCASE, + ) + + if ( + self.policy.get("require_lowercase", False) + and self.regexp_lowercase.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one lowercase letter", + errcode=Codes.PASSWORD_NO_LOWERCASE, + ) diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index 12657ca698..7d1263caf2 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -32,6 +32,7 @@ class SetPasswordHandler(BaseHandler): super(SetPasswordHandler, self).__init__(hs) self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() + self._password_policy_handler = hs.get_password_policy_handler() @defer.inlineCallbacks def set_password( @@ -44,6 +45,7 @@ class SetPasswordHandler(BaseHandler): if not self.hs.config.password_localdb_enabled: raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) + self._password_policy_handler.validate_password(new_password) password_hash = yield self._auth_handler.hash(new_password) try: diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 4a1fc2ec2b..46e458e95b 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha import ( keys, notifications, openid, + password_policy, read_marker, receipts, register, @@ -118,6 +119,7 @@ class ClientRestResource(JsonResource): capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) relations.register_servlets(hs, client_resource) + password_policy.register_servlets(hs, client_resource) # moving to /_synapse/admin synapse.rest.admin.register_servlets_for_client_rest_resource( diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py new file mode 100644 index 0000000000..968403cca4 --- /dev/null +++ b/synapse/rest/client/v2_alpha/password_policy.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.http.servlet import RestServlet + +from ._base import client_patterns + +logger = logging.getLogger(__name__) + + +class PasswordPolicyServlet(RestServlet): + PATTERNS = client_patterns("/password_policy$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(PasswordPolicyServlet, self).__init__() + + self.policy = hs.config.password_policy + self.enabled = hs.config.password_policy_enabled + + def on_GET(self, request): + if not self.enabled or not self.policy: + return (200, {}) + + policy = {} + + for param in [ + "minimum_length", + "require_digit", + "require_symbol", + "require_lowercase", + "require_uppercase", + ]: + if param in self.policy: + policy["m.%s" % param] = self.policy[param] + + return (200, policy) + + +def register_servlets(hs, http_server): + PasswordPolicyServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 6963d79310..66fc8ec179 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -373,6 +373,7 @@ class RegisterRestServlet(RestServlet): self.room_member_handler = hs.get_room_member_handler() self.macaroon_gen = hs.get_macaroon_generator() self.ratelimiter = hs.get_registration_ratelimiter() + self.password_policy_handler = hs.get_password_policy_handler() self.clock = hs.get_clock() self._registration_flows = _calculate_registration_flows( @@ -420,6 +421,7 @@ class RegisterRestServlet(RestServlet): or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") + self.password_policy_handler.validate_password(body["password"]) desired_username = None if "username" in body: diff --git a/synapse/server.py b/synapse/server.py index 9426eb1672..d0d80e8ac5 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -66,6 +66,7 @@ from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerH from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler +from synapse.handlers.password_policy import PasswordPolicyHandler from synapse.handlers.presence import PresenceHandler from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler @@ -199,6 +200,7 @@ class HomeServer(object): "account_validity_handler", "saml_handler", "event_client_serializer", + "password_policy_handler", "storage", "replication_streamer", ] @@ -535,6 +537,9 @@ class HomeServer(object): def build_event_client_serializer(self): return EventClientSerializer(self) + def build_password_policy_handler(self): + return PasswordPolicyHandler(self) + def build_storage(self) -> Storage: return Storage(self, self.datastores) diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py new file mode 100644 index 0000000000..c57072f50c --- /dev/null +++ b/tests/rest/client/v2_alpha/test_password_policy.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from synapse.api.constants import LoginType +from synapse.api.errors import Codes +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account, password_policy, register + +from tests import unittest + + +class PasswordPolicyTestCase(unittest.HomeserverTestCase): + """Tests the password policy feature and its compliance with MSC2000. + + When validating a password, Synapse does the necessary checks in this order: + + 1. Password is long enough + 2. Password contains digit(s) + 3. Password contains symbol(s) + 4. Password contains uppercase letter(s) + 5. Password contains lowercase letter(s) + + For each test below that checks whether a password triggers the right error code, + that test provides a password good enough to pass the previous tests, but not the + one it is currently testing (nor any test that comes afterward). + """ + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + password_policy.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.register_url = "/_matrix/client/r0/register" + self.policy = { + "enabled": True, + "minimum_length": 10, + "require_digit": True, + "require_symbol": True, + "require_lowercase": True, + "require_uppercase": True, + } + + config = self.default_config() + config["password_config"] = { + "policy": self.policy, + } + + hs = self.setup_test_homeserver(config=config) + return hs + + def test_get_policy(self): + """Tests if the /password_policy endpoint returns the configured policy.""" + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/password_policy" + ) + self.render(request) + + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual( + channel.json_body, + { + "m.minimum_length": 10, + "m.require_digit": True, + "m.require_symbol": True, + "m.require_lowercase": True, + "m.require_uppercase": True, + }, + channel.result, + ) + + def test_password_too_short(self): + request_data = json.dumps({"username": "kermit", "password": "shorty"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result, + ) + + def test_password_no_digit(self): + request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result, + ) + + def test_password_no_symbol(self): + request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result, + ) + + def test_password_no_uppercase(self): + request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result, + ) + + def test_password_no_lowercase(self): + request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result, + ) + + def test_password_compliant(self): + request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + # Getting a 401 here means the password has passed validation and the server has + # responded with a list of registration flows. + self.assertEqual(channel.code, 401, channel.result) + + def test_password_change(self): + """This doesn't test every possible use case, only that hitting /account/password + triggers the password validation code. + """ + compliant_password = "C0mpl!antpassword" + not_compliant_password = "notcompliantpassword" + + user_id = self.register_user("kermit", compliant_password) + tok = self.login("kermit", compliant_password) + + request_data = json.dumps( + { + "new_password": not_compliant_password, + "auth": { + "password": compliant_password, + "type": LoginType.PASSWORD, + "user": user_id, + }, + } + ) + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/account/password", + request_data, + access_token=tok, + ) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT) From 060e7dce09ae2197f29811769b13db30ed340211 Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Thu, 26 Mar 2020 19:02:35 +0200 Subject: [PATCH 309/382] Allow RedirectResponse in SAML response handler Allow custom SAML handlers to redirect after processing an auth response. Fixes #7149 Signed-off-by: Jason Robinson --- changelog.d/7151.bugfix | 1 + synapse/handlers/saml_handler.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/7151.bugfix diff --git a/changelog.d/7151.bugfix b/changelog.d/7151.bugfix new file mode 100644 index 0000000000..69cde9351d --- /dev/null +++ b/changelog.d/7151.bugfix @@ -0,0 +1 @@ +Allow custom SAML handlers to redirect after processing an auth response. diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 72c109981b..dc04b53f43 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -26,6 +26,7 @@ from synapse.config import ConfigError from synapse.http.server import finish_request from synapse.http.servlet import parse_string from synapse.module_api import ModuleApi +from synapse.module_api.errors import RedirectException from synapse.types import ( UserID, map_username_to_mxid_localpart, @@ -119,6 +120,9 @@ class SamlHandler: try: user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) + except RedirectException: + # Raise the exception as per the wishes of the SAML module response + raise except Exception as e: # If decoding the response or mapping it to a user failed, then log the # error and tell the user that something went wrong. From 825fb5d0a5699fb5b5eef9a8c2170d0c76158001 Mon Sep 17 00:00:00 2001 From: Nektarios Katakis Date: Thu, 26 Mar 2020 17:13:14 +0000 Subject: [PATCH 310/382] Don't default to an invalid sqlite config if no database configuration is provided (#6573) --- changelog.d/6573.bugfix | 1 + synapse/config/database.py | 69 ++++++++++++++++++++++++++------------ 2 files changed, 48 insertions(+), 22 deletions(-) create mode 100644 changelog.d/6573.bugfix diff --git a/changelog.d/6573.bugfix b/changelog.d/6573.bugfix new file mode 100644 index 0000000000..1bb8014db7 --- /dev/null +++ b/changelog.d/6573.bugfix @@ -0,0 +1 @@ +Don't attempt to use an invalid sqlite config if no database configuration is provided. Contributed by @nekatak. diff --git a/synapse/config/database.py b/synapse/config/database.py index b8ab2f86ac..c27fef157b 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -20,6 +20,11 @@ from synapse.config._base import Config, ConfigError logger = logging.getLogger(__name__) +NON_SQLITE_DATABASE_PATH_WARNING = """\ +Ignoring 'database_path' setting: not using a sqlite3 database. +-------------------------------------------------------------------------------- +""" + DEFAULT_CONFIG = """\ ## Database ## @@ -105,6 +110,11 @@ class DatabaseConnectionConfig: class DatabaseConfig(Config): section = "database" + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.databases = [] + def read_config(self, config, **kwargs): self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K")) @@ -125,12 +135,13 @@ class DatabaseConfig(Config): multi_database_config = config.get("databases") database_config = config.get("database") + database_path = config.get("database_path") if multi_database_config and database_config: raise ConfigError("Can't specify both 'database' and 'datbases' in config") if multi_database_config: - if config.get("database_path"): + if database_path: raise ConfigError("Can't specify 'database_path' with 'databases'") self.databases = [ @@ -138,13 +149,17 @@ class DatabaseConfig(Config): for name, db_conf in multi_database_config.items() ] - else: - if database_config is None: - database_config = {"name": "sqlite3", "args": {}} - + if database_config: self.databases = [DatabaseConnectionConfig("master", database_config)] - self.set_databasepath(config.get("database_path")) + if database_path: + if self.databases and self.databases[0].name != "sqlite3": + logger.warning(NON_SQLITE_DATABASE_PATH_WARNING) + return + + database_config = {"name": "sqlite3", "args": {}} + self.databases = [DatabaseConnectionConfig("master", database_config)] + self.set_databasepath(database_path) def generate_config_section(self, data_dir_path, **kwargs): return DEFAULT_CONFIG % { @@ -152,27 +167,37 @@ class DatabaseConfig(Config): } def read_arguments(self, args): - self.set_databasepath(args.database_path) + """ + Cases for the cli input: + - If no databases are configured and no database_path is set, raise. + - No databases and only database_path available ==> sqlite3 db. + - If there are multiple databases and a database_path raise an error. + - If the database set in the config file is sqlite then + overwrite with the command line argument. + """ + + if args.database_path is None: + if not self.databases: + raise ConfigError("No database config provided") + return + + if len(self.databases) == 0: + database_config = {"name": "sqlite3", "args": {}} + self.databases = [DatabaseConnectionConfig("master", database_config)] + self.set_databasepath(args.database_path) + return + + if self.get_single_database().name == "sqlite3": + self.set_databasepath(args.database_path) + else: + logger.warning(NON_SQLITE_DATABASE_PATH_WARNING) def set_databasepath(self, database_path): - if database_path is None: - return if database_path != ":memory:": database_path = self.abspath(database_path) - # We only support setting a database path if we have a single sqlite3 - # database. - if len(self.databases) != 1: - raise ConfigError("Cannot specify 'database_path' with multiple databases") - - database = self.get_single_database() - if database.config["name"] != "sqlite3": - # We don't raise here as we haven't done so before for this case. - logger.warn("Ignoring 'database_path' for non-sqlite3 database") - return - - database.config["args"]["database"] = database_path + self.databases[0].config["args"]["database"] = database_path @staticmethod def add_arguments(parser): @@ -187,7 +212,7 @@ class DatabaseConfig(Config): def get_single_database(self) -> DatabaseConnectionConfig: """Returns the database if there is only one, useful for e.g. tests """ - if len(self.databases) != 1: + if not self.databases: raise Exception("More than one database exists") return self.databases[0] From c2ab0b30664439d09436a39e5448728c67b0414c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 26 Mar 2020 18:58:58 +0100 Subject: [PATCH 311/382] Whitelist the login fallback by default for SSO --- synapse/config/sso.py | 17 ++++++++++++++++- tests/rest/client/v1/test_login.py | 13 ++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 95762689bc..5ae9db83d0 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -39,6 +39,17 @@ class SSOConfig(Config): self.sso_client_whitelist = sso_config.get("client_whitelist") or [] + # Attempt to also whitelist the server's login fallback, since that fallback sets + # the redirect URL to itself (so it can process the login token then return + # gracefully to the client). This would make it pointless to ask the user for + # confirmation, since the URL the confirmation page would be showing wouldn't be + # the client's. + # public_baseurl is an optional setting, so we only add the fallback's URL to the + # list if it's provided (because we can't figure out what that URL is otherwise). + if self.public_baseurl: + login_fallback_url = self.public_baseurl + "_matrix/static/client/login" + self.sso_client_whitelist.append(login_fallback_url) + def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. @@ -54,7 +65,11 @@ class SSOConfig(Config): # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # - # By default, this list is empty. + # If public_baseurl is set, then the login fallback page (used by clients + # that don't have full support for SSO) is always included in this list. + # + # By default, this list is empty, except if public_baseurl is set (in which + # case the login fallback page is the only element in the list). # #client_whitelist: # - https://riot.im/develop diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index da2c9bfa1e..ed02ff1dcc 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -350,7 +350,18 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): def test_cas_redirect_whitelisted(self): """Tests that the SSO login flow serves a redirect to a whitelisted url """ - redirect_url = "https://legit-site.com/" + self._test_redirect("https://legit-site.com/") + + @override_config( + { + "public_baseurl": "https://example.com" + } + ) + def test_cas_redirect_login_fallback(self): + self._test_redirect("https://example.com/_matrix/static/client/login") + + def _test_redirect(self, redirect_url): + """Tests that the SSO login flow serves a redirect for the given redirect URL.""" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) From 7083147961d7bc45ce49047c4878da2bf5202f79 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 26 Mar 2020 19:01:54 +0100 Subject: [PATCH 312/382] Regenerate sample config --- docs/sample_config.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2ff0dd05a2..556d4419f5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1392,7 +1392,11 @@ sso: # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # - # By default, this list is empty. + # If public_baseurl is set, then the login fallback page (used by clients + # that don't have full support for SSO) is always included in this list. + # + # By default, this list is empty, except if public_baseurl is set (in which + # case the login fallback page is the only element in the list). # #client_whitelist: # - https://riot.im/develop From 48b37f61ce17f5a667a9c8b7c6ad1d6abb7fdae8 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 26 Mar 2020 19:02:59 +0100 Subject: [PATCH 313/382] Changelog --- changelog.d/7153.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7153.feature diff --git a/changelog.d/7153.feature b/changelog.d/7153.feature new file mode 100644 index 0000000000..414ebe1f69 --- /dev/null +++ b/changelog.d/7153.feature @@ -0,0 +1 @@ +Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. From bdf3cdaec800966a493998c65665ee951b3d39f5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 26 Mar 2020 19:06:44 +0100 Subject: [PATCH 314/382] Lint --- tests/rest/client/v1/test_login.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index ed02ff1dcc..aed8853d6e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -352,11 +352,7 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): """ self._test_redirect("https://legit-site.com/") - @override_config( - { - "public_baseurl": "https://example.com" - } - ) + @override_config({"public_baseurl": "https://example.com"}) def test_cas_redirect_login_fallback(self): self._test_redirect("https://example.com/_matrix/static/client/login") From 55ca6cf88cee15519cd094f60c92ab959973e4c6 Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Thu, 26 Mar 2020 20:35:50 +0200 Subject: [PATCH 315/382] Update changelog.d/7151.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/7151.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/7151.bugfix b/changelog.d/7151.bugfix index 69cde9351d..8aaa2dc659 100644 --- a/changelog.d/7151.bugfix +++ b/changelog.d/7151.bugfix @@ -1 +1 @@ -Allow custom SAML handlers to redirect after processing an auth response. +Fix error page being shown when a custom SAML handler attempted to redirect when processing an auth response. From fa4f12102d52b75d252d9209b45251d2b1591fdf Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Mar 2020 15:05:26 -0400 Subject: [PATCH 316/382] Refactor the CAS code (move the logic out of the REST layer to a handler) (#7136) --- changelog.d/7136.misc | 1 + synapse/handlers/cas_handler.py | 204 ++++++++++++++++++++++++++++++++ synapse/rest/client/v1/login.py | 169 +++----------------------- synapse/server.py | 5 + tox.ini | 1 + 5 files changed, 226 insertions(+), 154 deletions(-) create mode 100644 changelog.d/7136.misc create mode 100644 synapse/handlers/cas_handler.py diff --git a/changelog.d/7136.misc b/changelog.d/7136.misc new file mode 100644 index 0000000000..3f666d25fd --- /dev/null +++ b/changelog.d/7136.misc @@ -0,0 +1 @@ +Refactored the CAS authentication logic to a separate class. diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py new file mode 100644 index 0000000000..f8dc274b78 --- /dev/null +++ b/synapse/handlers/cas_handler.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import xml.etree.ElementTree as ET +from typing import AnyStr, Dict, Optional, Tuple + +from six.moves import urllib + +from twisted.web.client import PartialDownloadError + +from synapse.api.errors import Codes, LoginError +from synapse.http.site import SynapseRequest +from synapse.types import UserID, map_username_to_mxid_localpart + +logger = logging.getLogger(__name__) + + +class CasHandler: + """ + Utility class for to handle the response from a CAS SSO service. + + Args: + hs (synapse.server.HomeServer) + """ + + def __init__(self, hs): + self._hostname = hs.hostname + self._auth_handler = hs.get_auth_handler() + self._registration_handler = hs.get_registration_handler() + + self._cas_server_url = hs.config.cas_server_url + self._cas_service_url = hs.config.cas_service_url + self._cas_displayname_attribute = hs.config.cas_displayname_attribute + self._cas_required_attributes = hs.config.cas_required_attributes + + self._http_client = hs.get_proxied_http_client() + + def _build_service_param(self, client_redirect_url: AnyStr) -> str: + return "%s%s?%s" % ( + self._cas_service_url, + "/_matrix/client/r0/login/cas/ticket", + urllib.parse.urlencode({"redirectUrl": client_redirect_url}), + ) + + async def _handle_cas_response( + self, request: SynapseRequest, cas_response_body: str, client_redirect_url: str + ) -> None: + """ + Retrieves the user and display name from the CAS response and continues with the authentication. + + Args: + request: The original client request. + cas_response_body: The response from the CAS server. + client_redirect_url: The URl to redirect the client to when + everything is done. + """ + user, attributes = self._parse_cas_response(cas_response_body) + displayname = attributes.pop(self._cas_displayname_attribute, None) + + for required_attribute, required_value in self._cas_required_attributes.items(): + # If required attribute was not in CAS Response - Forbidden + if required_attribute not in attributes: + raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) + + # Also need to check value + if required_value is not None: + actual_value = attributes[required_attribute] + # If required attribute value does not match expected - Forbidden + if required_value != actual_value: + raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) + + await self._on_successful_auth(user, request, client_redirect_url, displayname) + + def _parse_cas_response( + self, cas_response_body: str + ) -> Tuple[str, Dict[str, Optional[str]]]: + """ + Retrieve the user and other parameters from the CAS response. + + Args: + cas_response_body: The response from the CAS query. + + Returns: + A tuple of the user and a mapping of other attributes. + """ + user = None + attributes = {} + try: + root = ET.fromstring(cas_response_body) + if not root.tag.endswith("serviceResponse"): + raise Exception("root of CAS response is not serviceResponse") + success = root[0].tag.endswith("authenticationSuccess") + for child in root[0]: + if child.tag.endswith("user"): + user = child.text + if child.tag.endswith("attributes"): + for attribute in child: + # ElementTree library expands the namespace in + # attribute tags to the full URL of the namespace. + # We don't care about namespace here and it will always + # be encased in curly braces, so we remove them. + tag = attribute.tag + if "}" in tag: + tag = tag.split("}")[1] + attributes[tag] = attribute.text + if user is None: + raise Exception("CAS response does not contain user") + except Exception: + logger.exception("Error parsing CAS response") + raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED) + if not success: + raise LoginError( + 401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED + ) + return user, attributes + + async def _on_successful_auth( + self, + username: str, + request: SynapseRequest, + client_redirect_url: str, + user_display_name: Optional[str] = None, + ) -> None: + """Called once the user has successfully authenticated with the SSO. + + Registers the user if necessary, and then returns a redirect (with + a login token) to the client. + + Args: + username: the remote user id. We'll map this onto + something sane for a MXID localpath. + + request: the incoming request from the browser. We'll + respond to it with a redirect. + + client_redirect_url: the redirect_url the client gave us when + it first started the process. + + user_display_name: if set, and we have to register a new user, + we will set their displayname to this. + """ + localpart = map_username_to_mxid_localpart(username) + user_id = UserID(localpart, self._hostname).to_string() + registered_user_id = await self._auth_handler.check_user_exists(user_id) + if not registered_user_id: + registered_user_id = await self._registration_handler.register_user( + localpart=localpart, default_display_name=user_display_name + ) + + self._auth_handler.complete_sso_login( + registered_user_id, request, client_redirect_url + ) + + def handle_redirect_request(self, client_redirect_url: bytes) -> bytes: + """ + Generates a URL to the CAS server where the client should be redirected. + + Args: + client_redirect_url: The final URL the client should go to after the + user has negotiated SSO. + + Returns: + The URL to redirect to. + """ + args = urllib.parse.urlencode( + {"service": self._build_service_param(client_redirect_url)} + ) + + return ("%s/login?%s" % (self._cas_server_url, args)).encode("ascii") + + async def handle_ticket_request( + self, request: SynapseRequest, client_redirect_url: str, ticket: str + ) -> None: + """ + Validates a CAS ticket sent by the client for login/registration. + + On a successful request, writes a redirect to the request. + """ + uri = self._cas_server_url + "/proxyValidate" + args = { + "ticket": ticket, + "service": self._build_service_param(client_redirect_url), + } + try: + body = await self._http_client.get_raw(uri, args) + except PartialDownloadError as pde: + # Twisted raises this error if the connection is closed, + # even if that's being used old-http style to signal end-of-data + body = pde.response + + await self._handle_cas_response(request, body, client_redirect_url) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 56d713462a..59593cbf6e 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -14,11 +14,6 @@ # limitations under the License. import logging -import xml.etree.ElementTree as ET - -from six.moves import urllib - -from twisted.web.client import PartialDownloadError from synapse.api.errors import Codes, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter @@ -28,9 +23,10 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.http.site import SynapseRequest from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder -from synapse.types import UserID, map_username_to_mxid_localpart +from synapse.types import UserID from synapse.util.msisdn import phone_number_to_msisdn logger = logging.getLogger(__name__) @@ -72,14 +68,6 @@ def login_id_thirdparty_from_phone(identifier): return {"type": "m.id.thirdparty", "medium": "msisdn", "address": msisdn} -def build_service_param(cas_service_url, client_redirect_url): - return "%s%s?redirectUrl=%s" % ( - cas_service_url, - "/_matrix/client/r0/login/cas/ticket", - urllib.parse.quote(client_redirect_url, safe=""), - ) - - class LoginRestServlet(RestServlet): PATTERNS = client_patterns("/login$", v1=True) CAS_TYPE = "m.login.cas" @@ -409,7 +397,7 @@ class BaseSSORedirectServlet(RestServlet): PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) - def on_GET(self, request): + def on_GET(self, request: SynapseRequest): args = request.args if b"redirectUrl" not in args: return 400, "Redirect URL not specified for SSO auth" @@ -418,15 +406,15 @@ class BaseSSORedirectServlet(RestServlet): request.redirect(sso_url) finish_request(request) - def get_sso_url(self, client_redirect_url): + def get_sso_url(self, client_redirect_url: bytes) -> bytes: """Get the URL to redirect to, to perform SSO auth Args: - client_redirect_url (bytes): the URL that we should redirect the + client_redirect_url: the URL that we should redirect the client to when everything is done Returns: - bytes: URL to redirect to + URL to redirect to """ # to be implemented by subclasses raise NotImplementedError() @@ -434,16 +422,10 @@ class BaseSSORedirectServlet(RestServlet): class CasRedirectServlet(BaseSSORedirectServlet): def __init__(self, hs): - super(CasRedirectServlet, self).__init__() - self.cas_server_url = hs.config.cas_server_url - self.cas_service_url = hs.config.cas_service_url + self._cas_handler = hs.get_cas_handler() - def get_sso_url(self, client_redirect_url): - args = urllib.parse.urlencode( - {"service": build_service_param(self.cas_service_url, client_redirect_url)} - ) - - return "%s/login?%s" % (self.cas_server_url, args) + def get_sso_url(self, client_redirect_url: bytes) -> bytes: + return self._cas_handler.handle_redirect_request(client_redirect_url) class CasTicketServlet(RestServlet): @@ -451,81 +433,15 @@ class CasTicketServlet(RestServlet): def __init__(self, hs): super(CasTicketServlet, self).__init__() - self.cas_server_url = hs.config.cas_server_url - self.cas_service_url = hs.config.cas_service_url - self.cas_displayname_attribute = hs.config.cas_displayname_attribute - self.cas_required_attributes = hs.config.cas_required_attributes - self._sso_auth_handler = SSOAuthHandler(hs) - self._http_client = hs.get_proxied_http_client() + self._cas_handler = hs.get_cas_handler() - async def on_GET(self, request): + async def on_GET(self, request: SynapseRequest) -> None: client_redirect_url = parse_string(request, "redirectUrl", required=True) - uri = self.cas_server_url + "/proxyValidate" - args = { - "ticket": parse_string(request, "ticket", required=True), - "service": build_service_param(self.cas_service_url, client_redirect_url), - } - try: - body = await self._http_client.get_raw(uri, args) - except PartialDownloadError as pde: - # Twisted raises this error if the connection is closed, - # even if that's being used old-http style to signal end-of-data - body = pde.response - result = await self.handle_cas_response(request, body, client_redirect_url) - return result - - def handle_cas_response(self, request, cas_response_body, client_redirect_url): - user, attributes = self.parse_cas_response(cas_response_body) - displayname = attributes.pop(self.cas_displayname_attribute, None) - - for required_attribute, required_value in self.cas_required_attributes.items(): - # If required attribute was not in CAS Response - Forbidden - if required_attribute not in attributes: - raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) - - # Also need to check value - if required_value is not None: - actual_value = attributes[required_attribute] - # If required attribute value does not match expected - Forbidden - if required_value != actual_value: - raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) - - return self._sso_auth_handler.on_successful_auth( - user, request, client_redirect_url, displayname + ticket = parse_string(request, "ticket", required=True) + await self._cas_handler.handle_ticket_request( + request, client_redirect_url, ticket ) - def parse_cas_response(self, cas_response_body): - user = None - attributes = {} - try: - root = ET.fromstring(cas_response_body) - if not root.tag.endswith("serviceResponse"): - raise Exception("root of CAS response is not serviceResponse") - success = root[0].tag.endswith("authenticationSuccess") - for child in root[0]: - if child.tag.endswith("user"): - user = child.text - if child.tag.endswith("attributes"): - for attribute in child: - # ElementTree library expands the namespace in - # attribute tags to the full URL of the namespace. - # We don't care about namespace here and it will always - # be encased in curly braces, so we remove them. - tag = attribute.tag - if "}" in tag: - tag = tag.split("}")[1] - attributes[tag] = attribute.text - if user is None: - raise Exception("CAS response does not contain user") - except Exception: - logger.exception("Error parsing CAS response") - raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED) - if not success: - raise LoginError( - 401, "Unsuccessful CAS response", errcode=Codes.UNAUTHORIZED - ) - return user, attributes - class SAMLRedirectServlet(BaseSSORedirectServlet): PATTERNS = client_patterns("/login/sso/redirect", v1=True) @@ -533,65 +449,10 @@ class SAMLRedirectServlet(BaseSSORedirectServlet): def __init__(self, hs): self._saml_handler = hs.get_saml_handler() - def get_sso_url(self, client_redirect_url): + def get_sso_url(self, client_redirect_url: bytes) -> bytes: return self._saml_handler.handle_redirect_request(client_redirect_url) -class SSOAuthHandler(object): - """ - Utility class for Resources and Servlets which handle the response from a SSO - service - - Args: - hs (synapse.server.HomeServer) - """ - - def __init__(self, hs): - self._hostname = hs.hostname - self._auth_handler = hs.get_auth_handler() - self._registration_handler = hs.get_registration_handler() - self._macaroon_gen = hs.get_macaroon_generator() - - # cast to tuple for use with str.startswith - self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) - - async def on_successful_auth( - self, username, request, client_redirect_url, user_display_name=None - ): - """Called once the user has successfully authenticated with the SSO. - - Registers the user if necessary, and then returns a redirect (with - a login token) to the client. - - Args: - username (unicode|bytes): the remote user id. We'll map this onto - something sane for a MXID localpath. - - request (SynapseRequest): the incoming request from the browser. We'll - respond to it with a redirect. - - client_redirect_url (unicode): the redirect_url the client gave us when - it first started the process. - - user_display_name (unicode|None): if set, and we have to register a new user, - we will set their displayname to this. - - Returns: - Deferred[none]: Completes once we have handled the request. - """ - localpart = map_username_to_mxid_localpart(username) - user_id = UserID(localpart, self._hostname).to_string() - registered_user_id = await self._auth_handler.check_user_exists(user_id) - if not registered_user_id: - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, default_display_name=user_display_name - ) - - self._auth_handler.complete_sso_login( - registered_user_id, request, client_redirect_url - ) - - def register_servlets(hs, http_server): LoginRestServlet(hs).register(http_server) if hs.config.cas_enabled: diff --git a/synapse/server.py b/synapse/server.py index d0d80e8ac5..c7ca2bda0d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -56,6 +56,7 @@ from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator +from synapse.handlers.cas_handler import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler @@ -198,6 +199,7 @@ class HomeServer(object): "sendmail", "registration_handler", "account_validity_handler", + "cas_handler", "saml_handler", "event_client_serializer", "password_policy_handler", @@ -529,6 +531,9 @@ class HomeServer(object): def build_account_validity_handler(self): return AccountValidityHandler(self) + def build_cas_handler(self): + return CasHandler(self) + def build_saml_handler(self): from synapse.handlers.saml_handler import SamlHandler diff --git a/tox.ini b/tox.ini index 8e3f09e638..a79fc93b57 100644 --- a/tox.ini +++ b/tox.ini @@ -186,6 +186,7 @@ commands = mypy \ synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/auth.py \ + synapse/handlers/cas_handler.py \ synapse/handlers/directory.py \ synapse/handlers/presence.py \ synapse/handlers/sync.py \ From 665630fcaab8f09e83ff77f35d5244a718e20701 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Mar 2020 11:39:43 +0000 Subject: [PATCH 317/382] Add tests for outbound device pokes --- changelog.d/7157.misc | 1 + tests/federation/test_federation_sender.py | 303 ++++++++++++++++++++- tests/unittest.py | 1 + 3 files changed, 302 insertions(+), 3 deletions(-) create mode 100644 changelog.d/7157.misc diff --git a/changelog.d/7157.misc b/changelog.d/7157.misc new file mode 100644 index 0000000000..0eb1128c7a --- /dev/null +++ b/changelog.d/7157.misc @@ -0,0 +1 @@ +Add tests for outbound device pokes. diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index d456267b87..7763b12159 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -12,19 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from mock import Mock +from signedjson import key, sign +from signedjson.types import BaseKey, SigningKey + from twisted.internet import defer -from synapse.types import ReadReceipt +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.types import JsonDict, ReadReceipt from tests.unittest import HomeserverTestCase, override_config -class FederationSenderTestCases(HomeserverTestCase): +class FederationSenderReceiptsTestCases(HomeserverTestCase): def make_homeserver(self, reactor, clock): - return super(FederationSenderTestCases, self).setup_test_homeserver( + return self.setup_test_homeserver( state_handler=Mock(spec=["get_current_hosts_in_room"]), federation_transport_client=Mock(spec=["send_transaction"]), ) @@ -147,3 +153,294 @@ class FederationSenderTestCases(HomeserverTestCase): } ], ) + + +class FederationSenderDevicesTestCases(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + state_handler=Mock(spec=["get_current_hosts_in_room"]), + federation_transport_client=Mock(spec=["send_transaction"]), + ) + + def default_config(self): + c = super().default_config() + c["send_federation"] = True + return c + + def prepare(self, reactor, clock, hs): + # stub out get_current_hosts_in_room + mock_state_handler = hs.get_state_handler() + mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"] + + # stub out get_users_who_share_room_with_user so that it claims that + # `@user2:host2` is in the room + def get_users_who_share_room_with_user(user_id): + return defer.succeed({"@user2:host2"}) + + hs.get_datastore().get_users_who_share_room_with_user = ( + get_users_who_share_room_with_user + ) + + # whenever send_transaction is called, record the edu data + self.edus = [] + self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.record_transaction + ) + + def record_transaction(self, txn, json_cb): + data = json_cb() + self.edus.extend(data["edus"]) + return defer.succeed({}) + + def test_send_device_updates(self): + """Basic case: each device update should result in an EDU""" + # create a device + u1 = self.register_user("user", "pass") + self.login(u1, "pass", device_id="D1") + + # expect one edu + self.assertEqual(len(self.edus), 1) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + + # a second call should produce no new device EDUs + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + self.assertEqual(self.edus, []) + + # a second device + self.login("user", "pass", device_id="D2") + + self.assertEqual(len(self.edus), 1) + self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) + + def test_upload_signatures(self): + """Uploading signatures on some devices should produce updates for that user""" + + e2e_handler = self.hs.get_e2e_keys_handler() + + # register two devices + u1 = self.register_user("user", "pass") + self.login(u1, "pass", device_id="D1") + self.login(u1, "pass", device_id="D2") + + # expect two edus + self.assertEqual(len(self.edus), 2) + stream_id = None + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) + + # upload signing keys for each device + device1_signing_key = self.generate_and_upload_device_signing_key(u1, "D1") + device2_signing_key = self.generate_and_upload_device_signing_key(u1, "D2") + + # expect two more edus + self.assertEqual(len(self.edus), 2) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) + + # upload master key and self-signing key + master_signing_key = generate_self_id_key() + master_key = { + "user_id": u1, + "usage": ["master"], + "keys": {key_id(master_signing_key): encode_pubkey(master_signing_key)}, + } + + # private key: HvQBbU+hc2Zr+JP1sE0XwBe1pfZZEYtJNPJLZJtS+F8 + selfsigning_signing_key = generate_self_id_key() + selfsigning_key = { + "user_id": u1, + "usage": ["self_signing"], + "keys": { + key_id(selfsigning_signing_key): encode_pubkey(selfsigning_signing_key) + }, + } + sign.sign_json(selfsigning_key, u1, master_signing_key) + + cross_signing_keys = { + "master_key": master_key, + "self_signing_key": selfsigning_key, + } + + self.get_success( + e2e_handler.upload_signing_keys_for_user(u1, cross_signing_keys) + ) + + # expect signing key update edu + self.assertEqual(len(self.edus), 1) + self.assertEqual(self.edus.pop(0)["edu_type"], "org.matrix.signing_key_update") + + # sign the devices + d1_json = build_device_dict(u1, "D1", device1_signing_key) + sign.sign_json(d1_json, u1, selfsigning_signing_key) + d2_json = build_device_dict(u1, "D2", device2_signing_key) + sign.sign_json(d2_json, u1, selfsigning_signing_key) + + ret = self.get_success( + e2e_handler.upload_signatures_for_device_keys( + u1, {u1: {"D1": d1_json, "D2": d2_json}}, + ) + ) + self.assertEqual(ret["failures"], {}) + + # expect two edus, in one or two transactions. We don't know what order the + # devices will be updated. + self.assertEqual(len(self.edus), 2) + stream_id = None # FIXME: there is a discontinuity in the stream IDs: see #7142 + for edu in self.edus: + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + if stream_id is not None: + self.assertEqual(c["prev_id"], [stream_id]) + stream_id = c["stream_id"] + devices = {edu["content"]["device_id"] for edu in self.edus} + self.assertEqual({"D1", "D2"}, devices) + + def test_delete_devices(self): + """If devices are deleted, that should result in EDUs too""" + + # create devices + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # expect three edus + self.assertEqual(len(self.edus), 3) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) + stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D3", stream_id) + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + # expect three edus, in an unknown order + self.assertEqual(len(self.edus), 3) + for edu in self.edus: + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + self.assertGreaterEqual( + c.items(), + {"user_id": u1, "prev_id": [stream_id], "deleted": True}.items(), + ) + stream_id = c["stream_id"] + devices = {edu["content"]["device_id"] for edu in self.edus} + self.assertEqual({"D1", "D2", "D3"}, devices) + + def test_unreachable_server(self): + """If the destination server is unreachable, all the updates should get sent on + recovery + """ + mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + + # create devices + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + self.assertGreaterEqual(mock_send_txn.call_count, 4) + + # recover the server + mock_send_txn.side_effect = self.record_transaction + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + + # for each device, there should be a single update + self.assertEqual(len(self.edus), 3) + stream_id = None + for edu in self.edus: + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + self.assertEqual(c["prev_id"], [stream_id] if stream_id is not None else []) + stream_id = c["stream_id"] + devices = {edu["content"]["device_id"] for edu in self.edus} + self.assertEqual({"D1", "D2", "D3"}, devices) + + def check_device_update_edu( + self, + edu: JsonDict, + user_id: str, + device_id: str, + prev_stream_id: Optional[int], + ) -> int: + """Check that the given EDU is an update for the given device + Returns the stream_id. + """ + self.assertEqual(edu["edu_type"], "m.device_list_update") + content = edu["content"] + + expected = { + "user_id": user_id, + "device_id": device_id, + "prev_id": [prev_stream_id] if prev_stream_id is not None else [], + } + + self.assertLessEqual(expected.items(), content.items()) + return content["stream_id"] + + def check_signing_key_update_txn(self, txn: JsonDict,) -> None: + """Check that the txn has an EDU with a signing key update. + """ + edus = txn["edus"] + self.assertEqual(len(edus), 1) + + def generate_and_upload_device_signing_key( + self, user_id: str, device_id: str + ) -> SigningKey: + """Generate a signing keypair for the given device, and upload it""" + sk = key.generate_signing_key(device_id) + + device_dict = build_device_dict(user_id, device_id, sk) + + self.get_success( + self.hs.get_e2e_keys_handler().upload_keys_for_user( + user_id, device_id, {"device_keys": device_dict}, + ) + ) + return sk + + +def generate_self_id_key() -> SigningKey: + """generate a signing key whose version is its public key + + ... as used by the cross-signing-keys. + """ + k = key.generate_signing_key("x") + k.version = encode_pubkey(k) + return k + + +def key_id(k: BaseKey) -> str: + return "%s:%s" % (k.alg, k.version) + + +def encode_pubkey(sk: SigningKey) -> str: + """Encode the public key corresponding to the given signing key as base64""" + return key.encode_verify_key_base64(key.get_verify_key(sk)) + + +def build_device_dict(user_id: str, device_id: str, sk: SigningKey): + """Build a dict representing the given device""" + return { + "user_id": user_id, + "device_id": device_id, + "algorithms": ["m.olm.curve25519-aes-sha256", "m.megolm.v1.aes-sha"], + "keys": { + "curve25519:" + device_id: "curve25519+key", + key_id(sk): encode_pubkey(sk), + }, + } diff --git a/tests/unittest.py b/tests/unittest.py index 23b59bea22..3d57b77a5d 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -490,6 +490,7 @@ class HomeserverTestCase(TestCase): "password": password, "admin": admin, "mac": want_mac, + "inhibit_login": True, } ) request, channel = self.make_request( From 09cc058a4c3eae58ee6e08c1925bffd9cf7d52c6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 27 Mar 2020 12:26:47 +0000 Subject: [PATCH 318/382] Always send the user updates to their own device list This will allow clients to notify users about new devices even if the user isn't in any rooms (yet). --- synapse/handlers/device.py | 19 ++++++++++++++++--- synapse/handlers/sync.py | 7 ++++--- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a514c30714..54931c355b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -122,11 +122,22 @@ class DeviceWorkerHandler(BaseHandler): # First we check if any devices have changed for users that we share # rooms with. - users_who_share_room = yield self.store.get_users_who_share_room_with_user( + tracked_users = yield self.store.get_users_who_share_room_with_user( user_id ) + # always tell the user about their own devices + tracked_users.add(user_id) + + logger.info( + "tracked users ids: %r", tracked_users, + ) + changed = yield self.store.get_users_whose_devices_changed( - from_token.device_list_key, users_who_share_room + from_token.device_list_key, tracked_users + ) + + logger.info( + "changed users IDs: %r", changed, ) # Then work out if any users have since joined @@ -456,7 +467,9 @@ class DeviceHandler(DeviceWorkerHandler): room_ids = yield self.store.get_rooms_for_user(user_id) - yield self.notifier.on_new_event("device_list_key", position, rooms=room_ids) + # specify the user ID too since the user should always get their own device list + # updates, even if they aren't in any rooms. + yield self.notifier.on_new_event("device_list_key", position, users=[user_id], rooms=room_ids) if hosts: logger.info( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5746fdea14..fd68a31b09 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1139,13 +1139,14 @@ class SyncHandler(object): # room with by looking at all users that have left a room plus users # that were in a room we've left. - users_who_share_room = await self.store.get_users_who_share_room_with_user( + users_we_track = await self.store.get_users_who_share_room_with_user( user_id ) + users_we_track.add(user_id) # Step 1a, check for changes in devices of users we share a room with users_that_have_changed = await self.store.get_users_whose_devices_changed( - since_token.device_list_key, users_who_share_room + since_token.device_list_key, users_we_track ) # Step 1b, check for newly joined rooms @@ -1168,7 +1169,7 @@ class SyncHandler(object): newly_left_users.update(left_users) # Remove any users that we still share a room with. - newly_left_users -= users_who_share_room + newly_left_users -= users_we_track return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: From d9965fb8d678837947cabd41c410127fb59d1b82 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 27 Mar 2020 12:30:59 +0000 Subject: [PATCH 319/382] changelog --- changelog.d/7160.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7160.feature diff --git a/changelog.d/7160.feature b/changelog.d/7160.feature new file mode 100644 index 0000000000..c1205969a1 --- /dev/null +++ b/changelog.d/7160.feature @@ -0,0 +1 @@ +Always send users their own device updates. From a07e03ce908f3de4a5ca80d0d7db6da2a6ed98b5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 27 Mar 2020 12:35:32 +0000 Subject: [PATCH 320/382] black --- synapse/handlers/device.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 54931c355b..80e7374c6a 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -122,9 +122,7 @@ class DeviceWorkerHandler(BaseHandler): # First we check if any devices have changed for users that we share # rooms with. - tracked_users = yield self.store.get_users_who_share_room_with_user( - user_id - ) + tracked_users = yield self.store.get_users_who_share_room_with_user(user_id) # always tell the user about their own devices tracked_users.add(user_id) @@ -469,7 +467,9 @@ class DeviceHandler(DeviceWorkerHandler): # specify the user ID too since the user should always get their own device list # updates, even if they aren't in any rooms. - yield self.notifier.on_new_event("device_list_key", position, users=[user_id], rooms=room_ids) + yield self.notifier.on_new_event( + "device_list_key", position, users=[user_id], rooms=room_ids + ) if hosts: logger.info( From 16ee97988abb1951e7009620f69904f9bb7c9215 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 27 Mar 2020 12:39:54 +0000 Subject: [PATCH 321/382] Fix undefined variable & remove debug logging --- synapse/handlers/device.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 80e7374c6a..07ce553995 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -126,18 +126,10 @@ class DeviceWorkerHandler(BaseHandler): # always tell the user about their own devices tracked_users.add(user_id) - logger.info( - "tracked users ids: %r", tracked_users, - ) - changed = yield self.store.get_users_whose_devices_changed( from_token.device_list_key, tracked_users ) - logger.info( - "changed users IDs: %r", changed, - ) - # Then work out if any users have since joined rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) @@ -225,8 +217,8 @@ class DeviceWorkerHandler(BaseHandler): if possibly_changed or possibly_left: # Take the intersection of the users whose devices may have changed # and those that actually still share a room with the user - possibly_joined = possibly_changed & users_who_share_room - possibly_left = (possibly_changed | possibly_left) - users_who_share_room + possibly_joined = possibly_changed & tracked_users + possibly_left = (possibly_changed | possibly_left) - tracked_users else: possibly_joined = [] possibly_left = [] From fbf0782c63bd2aba3c504dabd04abdf10d269a22 Mon Sep 17 00:00:00 2001 From: David Vo Date: Sat, 28 Mar 2020 00:20:00 +1100 Subject: [PATCH 322/382] Only import sqlite3 when type checking (#7155) Fixes: #7127 Signed-off-by: David Vo --- changelog.d/7155.bugfix | 1 + synapse/storage/engines/sqlite.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7155.bugfix diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix new file mode 100644 index 0000000000..0bf51e7aba --- /dev/null +++ b/changelog.d/7155.bugfix @@ -0,0 +1 @@ +Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 2bfeefd54e..3bc2e8b986 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -12,14 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sqlite3 import struct import threading +import typing from synapse.storage.engines import BaseDatabaseEngine +if typing.TYPE_CHECKING: + import sqlite3 # noqa: F401 -class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): + +class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): def __init__(self, database_module, database_config): super().__init__(database_module, database_config) From 12aa5a7fa761a729364d324405a033cf78da26de Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 27 Mar 2020 13:30:22 +0000 Subject: [PATCH 323/382] Ensure is_verified on /_matrix/client/r0/room_keys/keys is a boolean (#7150) --- changelog.d/7150.bugfix | 1 + synapse/rest/client/v2_alpha/room_keys.py | 2 +- synapse/storage/data_stores/main/e2e_room_keys.py | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7150.bugfix diff --git a/changelog.d/7150.bugfix b/changelog.d/7150.bugfix new file mode 100644 index 0000000000..1feb294799 --- /dev/null +++ b/changelog.d/7150.bugfix @@ -0,0 +1 @@ +Ensure `is_verified` is a boolean in responses to `GET /_matrix/client/r0/room_keys/keys`. Also warn the user if they forgot the `version` query param. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 38952a1d27..59529707df 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -188,7 +188,7 @@ class RoomKeysServlet(RestServlet): """ requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = parse_string(request, "version") + version = parse_string(request, "version", required=True) room_keys = await self.e2e_room_keys_handler.get_room_keys( user_id, version, room_id, session_id diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/data_stores/main/e2e_room_keys.py index 84594cf0a9..23f4570c4b 100644 --- a/synapse/storage/data_stores/main/e2e_room_keys.py +++ b/synapse/storage/data_stores/main/e2e_room_keys.py @@ -146,7 +146,8 @@ class EndToEndRoomKeyStore(SQLBaseStore): room_entry["sessions"][row["session_id"]] = { "first_message_index": row["first_message_index"], "forwarded_count": row["forwarded_count"], - "is_verified": row["is_verified"], + # is_verified must be returned to the client as a boolean + "is_verified": bool(row["is_verified"]), "session_data": json.loads(row["session_data"]), } From 63aea691a761a9b6a2058b54792fc5859e12cfba Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 27 Mar 2020 15:09:12 +0100 Subject: [PATCH 324/382] Update the wording of the config comment --- docs/sample_config.yaml | 6 +++--- synapse/config/sso.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 556d4419f5..07e922dc27 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1393,10 +1393,10 @@ sso: # hostname: "https://my.client/". # # If public_baseurl is set, then the login fallback page (used by clients - # that don't have full support for SSO) is always included in this list. + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. # - # By default, this list is empty, except if public_baseurl is set (in which - # case the login fallback page is the only element in the list). + # By default, this list is empty. # #client_whitelist: # - https://riot.im/develop diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 5ae9db83d0..ec3dca9efc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -66,10 +66,10 @@ class SSOConfig(Config): # hostname: "https://my.client/". # # If public_baseurl is set, then the login fallback page (used by clients - # that don't have full support for SSO) is always included in this list. + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. # - # By default, this list is empty, except if public_baseurl is set (in which - # case the login fallback page is the only element in the list). + # By default, this list is empty. # #client_whitelist: # - https://riot.im/develop From 90246344e340bce3417fb330da6be9338a701c5c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 27 Mar 2020 15:44:13 +0100 Subject: [PATCH 325/382] Improve the UX of the login fallback when using SSO (#7152) * Don't show the login forms if we're currently logging in with a password or a token. * Submit directly the SSO login form, showing only a spinner to the user, in order to eliminate from the clunkiness of SSO through this fallback. --- changelog.d/7152.feature | 1 + synapse/static/client/login/index.html | 2 +- synapse/static/client/login/js/login.js | 51 +++++++++++++++---------- 3 files changed, 32 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7152.feature diff --git a/changelog.d/7152.feature b/changelog.d/7152.feature new file mode 100644 index 0000000000..fafa79c7e7 --- /dev/null +++ b/changelog.d/7152.feature @@ -0,0 +1 @@ +Improve the support for SSO authentication on the login fallback page. diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html index bcb6bc6bb7..712b0e3980 100644 --- a/synapse/static/client/login/index.html +++ b/synapse/static/client/login/index.html @@ -9,7 +9,7 @@

-

Log in with one of the following methods

+

diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index 276c271bbe..debe464371 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -1,37 +1,41 @@ window.matrixLogin = { endpoint: location.origin + "/_matrix/client/r0/login", serverAcceptsPassword: false, - serverAcceptsCas: false, serverAcceptsSso: false, }; +var title_pre_auth = "Log in with one of the following methods"; +var title_post_auth = "Logging in..."; + var submitPassword = function(user, pwd) { console.log("Logging in with password..."); + set_title(title_post_auth); var data = { type: "m.login.password", user: user, password: pwd, }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var submitToken = function(loginToken) { console.log("Logging in with login token..."); + set_title(title_post_auth); var data = { type: "m.login.token", token: loginToken }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var errorFunc = function(err) { - show_login(); + // We want to show the error to the user rather than redirecting immediately to the + // SSO portal (if SSO is the only login option), so we inhibit the redirect. + show_login(true); if (err.responseJSON && err.responseJSON.error) { setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); @@ -45,26 +49,33 @@ var setFeedbackString = function(text) { $("#feedback").text(text); }; -var show_login = function() { - $("#loading").hide(); - +var show_login = function(inhibit_redirect) { var this_page = window.location.origin + window.location.pathname; $("#sso_redirect_url").val(this_page); + // If inhibit_redirect is false, and SSO is the only supported login method, we can + // redirect straight to the SSO page + if (matrixLogin.serverAcceptsSso) { + if (!inhibit_redirect && !matrixLogin.serverAcceptsPassword) { + $("#sso_form").submit(); + return; + } + + // Otherwise, show the SSO form + $("#sso_form").show(); + } + if (matrixLogin.serverAcceptsPassword) { $("#password_flow").show(); } - if (matrixLogin.serverAcceptsSso) { - $("#sso_flow").show(); - } else if (matrixLogin.serverAcceptsCas) { - $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect"); - $("#sso_flow").show(); - } - - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) { $("#no_login_types").show(); } + + set_title(title_pre_auth); + + $("#loading").hide(); }; var show_spinner = function() { @@ -74,17 +85,15 @@ var show_spinner = function() { $("#loading").show(); }; +var set_title = function(title) { + $("#title").text(title); +}; var fetch_info = function(cb) { $.get(matrixLogin.endpoint, function(response) { var serverAcceptsPassword = false; - var serverAcceptsCas = false; for (var i=0; i Date: Fri, 27 Mar 2020 16:02:00 +0100 Subject: [PATCH 326/382] update debian installation instructions to recommend installing `virtualenv` instead of `python3-virtualenv` (#6892) * change debian package from python3-virtualenv to virtualenv The virtualenv package is needed for the virtualenv command. The virtualenv package depends on python3-virtualenv (at least since debian jessie) so there is no need to specify python3-virtualenv additionally. Signed-off-by: Vieno Hakkerinen * Add changelog Co-authored-by: Andrew Morgan --- INSTALL.md | 2 +- changelog.d/6892.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6892.doc diff --git a/INSTALL.md b/INSTALL.md index af9a5ef439..9c6f507db8 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -112,7 +112,7 @@ Installing prerequisites on Ubuntu or Debian: ``` sudo apt-get install build-essential python3-dev libffi-dev \ python3-pip python3-setuptools sqlite3 \ - libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev + libssl-dev virtualenv libjpeg-dev libxslt1-dev ``` #### ArchLinux diff --git a/changelog.d/6892.doc b/changelog.d/6892.doc new file mode 100644 index 0000000000..0d04cf0bdb --- /dev/null +++ b/changelog.d/6892.doc @@ -0,0 +1 @@ +Update Debian installation instructions to recommend installing the `virtualenv` package instead of `python3-virtualenv`. \ No newline at end of file From 8327eb9280cbcb492e05652a96be9f1cd1c0e7c4 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 27 Mar 2020 20:15:23 +0100 Subject: [PATCH 327/382] Add options to prevent users from changing their profile. (#7096) --- changelog.d/7096.feature | 1 + docs/sample_config.yaml | 23 ++ synapse/config/registration.py | 27 ++ synapse/handlers/profile.py | 16 ++ synapse/rest/client/v2_alpha/account.py | 16 ++ tests/handlers/test_profile.py | 65 ++++- tests/rest/client/v2_alpha/test_account.py | 302 +++++++++++++++++++++ 7 files changed, 449 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7096.feature diff --git a/changelog.d/7096.feature b/changelog.d/7096.feature new file mode 100644 index 0000000000..00f47b2a14 --- /dev/null +++ b/changelog.d/7096.feature @@ -0,0 +1 @@ +Add options to prevent users from changing their profile or associated 3PIDs. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 1a1d061759..545226f753 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1086,6 +1086,29 @@ account_threepid_delegates: #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process +# Whether users are allowed to change their displayname after it has +# been initially set. Useful when provisioning users based on the +# contents of a third-party directory. +# +# Does not apply to server administrators. Defaults to 'true' +# +#enable_set_displayname: false + +# Whether users are allowed to change their avatar after it has been +# initially set. Useful when provisioning users based on the contents +# of a third-party directory. +# +# Does not apply to server administrators. Defaults to 'true' +# +#enable_set_avatar_url: false + +# Whether users can change the 3PIDs associated with their accounts +# (email address and msisdn). +# +# Defaults to 'true' +# +#enable_3pid_changes: false + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 9bb3beedbc..e7ea3a01cb 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -129,6 +129,10 @@ class RegistrationConfig(Config): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) + self.enable_set_displayname = config.get("enable_set_displayname", True) + self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) + self.enable_3pid_changes = config.get("enable_3pid_changes", True) + self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False ) @@ -330,6 +334,29 @@ class RegistrationConfig(Config): #email: https://example.com # Delegate email sending to example.com #msisdn: http://localhost:8090 # Delegate SMS sending to this local process + # Whether users are allowed to change their displayname after it has + # been initially set. Useful when provisioning users based on the + # contents of a third-party directory. + # + # Does not apply to server administrators. Defaults to 'true' + # + #enable_set_displayname: false + + # Whether users are allowed to change their avatar after it has been + # initially set. Useful when provisioning users based on the contents + # of a third-party directory. + # + # Does not apply to server administrators. Defaults to 'true' + # + #enable_set_avatar_url: false + + # Whether users can change the 3PIDs associated with their accounts + # (email address and msisdn). + # + # Defaults to 'true' + # + #enable_3pid_changes: false + # Users who register on this homeserver will automatically be joined # to these rooms # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 50ce0c585b..6aa1c0f5e0 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -157,6 +157,15 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if not by_admin and not self.hs.config.enable_set_displayname: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.display_name: + raise SynapseError( + 400, + "Changing display name is disabled on this server", + Codes.FORBIDDEN, + ) + if len(new_displayname) > MAX_DISPLAYNAME_LEN: raise SynapseError( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) @@ -218,6 +227,13 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if not by_admin and not self.hs.config.enable_set_avatar_url: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.avatar_url: + raise SynapseError( + 400, "Changing avatar is disabled on this server", Codes.FORBIDDEN + ) + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: raise SynapseError( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index b1249b664c..f80b5e40ea 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -605,6 +605,11 @@ class ThreepidRestServlet(RestServlet): return 200, {"threepids": threepids} async def on_POST(self, request): + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) + requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -649,6 +654,11 @@ class ThreepidAddRestServlet(RestServlet): @interactive_auth_handler async def on_POST(self, request): + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) + requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) @@ -744,10 +754,16 @@ class ThreepidDeleteRestServlet(RestServlet): def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() + self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() async def on_POST(self, request): + if not self.hs.config.enable_3pid_changes: + raise SynapseError( + 400, "3PID changes are disabled on this server", Codes.FORBIDDEN + ) + body = parse_json_object_from_request(request) assert_params_in_dict(body, ["medium", "address"]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index d60c124eec..be665262c6 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -19,7 +19,7 @@ from mock import Mock, NonCallableMock from twisted.internet import defer import synapse.types -from synapse.api.errors import AuthError +from synapse.api.errors import AuthError, SynapseError from synapse.handlers.profile import MasterProfileHandler from synapse.types import UserID @@ -70,6 +70,7 @@ class ProfileTestCase(unittest.TestCase): yield self.store.create_profile(self.frank.localpart) self.handler = hs.get_profile_handler() + self.hs = hs @defer.inlineCallbacks def test_get_my_name(self): @@ -90,6 +91,33 @@ class ProfileTestCase(unittest.TestCase): "Frank Jr.", ) + # Set displayname again + yield self.handler.set_displayname( + self.frank, synapse.types.create_requester(self.frank), "Frank" + ) + + self.assertEquals( + (yield self.store.get_profile_displayname(self.frank.localpart)), "Frank", + ) + + @defer.inlineCallbacks + def test_set_my_name_if_disabled(self): + self.hs.config.enable_set_displayname = False + + # Setting displayname for the first time is allowed + yield self.store.set_profile_displayname(self.frank.localpart, "Frank") + + self.assertEquals( + (yield self.store.get_profile_displayname(self.frank.localpart)), "Frank", + ) + + # Setting displayname a second time is forbidden + d = self.handler.set_displayname( + self.frank, synapse.types.create_requester(self.frank), "Frank Jr." + ) + + yield self.assertFailure(d, SynapseError) + @defer.inlineCallbacks def test_set_my_name_noauth(self): d = self.handler.set_displayname( @@ -147,3 +175,38 @@ class ProfileTestCase(unittest.TestCase): (yield self.store.get_profile_avatar_url(self.frank.localpart)), "http://my.server/pic.gif", ) + + # Set avatar again + yield self.handler.set_avatar_url( + self.frank, + synapse.types.create_requester(self.frank), + "http://my.server/me.png", + ) + + self.assertEquals( + (yield self.store.get_profile_avatar_url(self.frank.localpart)), + "http://my.server/me.png", + ) + + @defer.inlineCallbacks + def test_set_my_avatar_if_disabled(self): + self.hs.config.enable_set_avatar_url = False + + # Setting displayname for the first time is allowed + yield self.store.set_profile_avatar_url( + self.frank.localpart, "http://my.server/me.png" + ) + + self.assertEquals( + (yield self.store.get_profile_avatar_url(self.frank.localpart)), + "http://my.server/me.png", + ) + + # Set avatar a second time is forbidden + d = self.handler.set_avatar_url( + self.frank, + synapse.types.create_requester(self.frank), + "http://my.server/pic.gif", + ) + + yield self.assertFailure(d, SynapseError) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index c3facc00eb..45a9d445f8 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -24,6 +24,7 @@ import pkg_resources import synapse.rest.admin from synapse.api.constants import LoginType, Membership +from synapse.api.errors import Codes from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register @@ -325,3 +326,304 @@ class DeactivateTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(request.code, 200) + + +class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + account.register_servlets, + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Email config. + self.email_attempts = [] + + def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): + self.email_attempts.append(msg) + + config["email"] = { + "enable_notifs": False, + "template_dir": os.path.abspath( + pkg_resources.resource_filename("synapse", "res/templates") + ), + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + } + config["public_baseurl"] = "https://example.com" + + self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + return self.hs + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.user_id = self.register_user("kermit", "test") + self.user_id_tok = self.login("kermit", "test") + self.email = "test@example.com" + self.url_3pid = b"account/3pid" + + def test_add_email(self): + """Test adding an email to profile + """ + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) + + def test_add_email_if_disabled(self): + """Test adding email to profile when doing so is disallowed + """ + self.hs.config.enable_3pid_changes = False + + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_delete_email(self): + """Test deleting an email from profile + """ + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=self.user_id, + medium="email", + address=self.email, + validated_at=0, + added_at=0, + ) + ) + + request, channel = self.make_request( + "POST", + b"account/3pid/delete", + {"medium": "email", "address": self.email}, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_delete_email_if_disabled(self): + """Test deleting an email from profile when disallowed + """ + self.hs.config.enable_3pid_changes = False + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=self.user_id, + medium="email", + address=self.email, + validated_at=0, + added_at=0, + ) + ) + + request, channel = self.make_request( + "POST", + b"account/3pid/delete", + {"medium": "email", "address": self.email}, + access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) + + def test_cant_add_email_without_clicking_link(self): + """Test that we do actually need to click the link in the email + """ + client_secret = "foobar" + session_id = self._request_token(self.email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + + # Attempt to add email without clicking the link + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def test_no_valid_token(self): + """Test that we do actually need to request a token and can't just + make a session up. + """ + client_secret = "foobar" + session_id = "weasle" + + # Attempt to add email without even requesting an email + request, channel = self.make_request( + "POST", + b"/_matrix/client/unstable/account/3pid/add", + { + "client_secret": client_secret, + "sid": session_id, + "auth": { + "type": "m.login.password", + "user": self.user_id, + "password": "test", + }, + }, + access_token=self.user_id_tok, + ) + self.render(request) + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) + + # Get user + request, channel = self.make_request( + "GET", self.url_3pid, access_token=self.user_id_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertFalse(channel.json_body["threepids"]) + + def _request_token(self, email, client_secret): + request, channel = self.make_request( + "POST", + b"account/3pid/email/requestToken", + {"client_secret": client_secret, "email": email, "send_attempt": 1}, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + return channel.json_body["sid"] + + def _validate_token(self, link): + # Remove the host + path = link.replace("https://example.com", "") + + request, channel = self.make_request("GET", path, shorthand=False) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + def _get_link_from_email(self): + assert self.email_attempts, "No emails have been sent" + + raw_msg = self.email_attempts[-1].decode("UTF-8") + mail = Parser().parsestr(raw_msg) + + text = None + for part in mail.walk(): + if part.get_content_type() == "text/plain": + text = part.get_payload(decode=True).decode("UTF-8") + break + + if not text: + self.fail("Could not find text portion of email to parse") + + match = re.search(r"https://example.com\S+", text) + assert match, "Could not find link in email" + + return match.group(0) From fb69690761762092c8e44d509d4f72408c4c67e0 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 27 Mar 2020 20:16:43 +0100 Subject: [PATCH 328/382] Admin API to join users to a room. (#7051) --- changelog.d/7051.feature | 1 + docs/admin_api/room_membership.md | 34 ++++ synapse/rest/admin/__init__.py | 7 +- synapse/rest/admin/rooms.py | 79 +++++++- tests/rest/admin/test_room.py | 288 ++++++++++++++++++++++++++++++ 5 files changed, 405 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7051.feature create mode 100644 docs/admin_api/room_membership.md create mode 100644 tests/rest/admin/test_room.py diff --git a/changelog.d/7051.feature b/changelog.d/7051.feature new file mode 100644 index 0000000000..3e36a3f65e --- /dev/null +++ b/changelog.d/7051.feature @@ -0,0 +1 @@ +Admin API `POST /_synapse/admin/v1/join/` to join users to a room like `auto_join_rooms` for creation of users. \ No newline at end of file diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md new file mode 100644 index 0000000000..16736d3d37 --- /dev/null +++ b/docs/admin_api/room_membership.md @@ -0,0 +1,34 @@ +# Edit Room Membership API + +This API allows an administrator to join an user account with a given `user_id` +to a room with a given `room_id_or_alias`. You can only modify the membership of +local users. The server administrator must be in the room and have permission to +invite users. + +## Parameters + +The following parameters are available: + +* `user_id` - Fully qualified user: for example, `@user:server.com`. +* `room_id_or_alias` - The room identifier or alias to join: for example, + `!636q39766251:server.com`. + +## Usage + +``` +POST /_synapse/admin/v1/join/ + +{ + "user_id": "@user:server.com" +} +``` + +Including an `access_token` of a server admin. + +Response: + +``` +{ + "room_id": "!636q39766251:server.com" +} +``` diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 42cc2b062a..ed70d448a1 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -29,7 +29,11 @@ from synapse.rest.admin._base import ( from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet -from synapse.rest.admin.rooms import ListRoomRestServlet, ShutdownRoomRestServlet +from synapse.rest.admin.rooms import ( + JoinRoomAliasServlet, + ListRoomRestServlet, + ShutdownRoomRestServlet, +) from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.users import ( AccountValidityRenewServlet, @@ -189,6 +193,7 @@ def register_servlets(hs, http_server): """ register_servlets_for_client_rest_resource(hs, http_server) ListRoomRestServlet(hs).register(http_server) + JoinRoomAliasServlet(hs).register(http_server) PurgeRoomServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f9b8c0a4f0..659b8a10ee 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Optional -from synapse.api.constants import Membership -from synapse.api.errors import Codes, SynapseError +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -29,7 +30,7 @@ from synapse.rest.admin._base import ( historical_admin_path_patterns, ) from synapse.storage.data_stores.main.room import RoomSortOrder -from synapse.types import create_requester +from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import maybe_awaitable logger = logging.getLogger(__name__) @@ -237,3 +238,75 @@ class ListRoomRestServlet(RestServlet): response["prev_batch"] = 0 return 200, response + + +class JoinRoomAliasServlet(RestServlet): + + PATTERNS = admin_patterns("/join/(?P[^/]*)") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.room_member_handler = hs.get_room_member_handler() + self.admin_handler = hs.get_handlers().admin_handler + self.state_handler = hs.get_state_handler() + + async def on_POST(self, request, room_identifier): + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + content = parse_json_object_from_request(request) + + assert_params_in_dict(content, ["user_id"]) + target_user = UserID.from_string(content["user_id"]) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "This endpoint can only be used with local users") + + if not await self.admin_handler.get_user(target_user): + raise NotFoundError("User not found") + + if RoomID.is_valid(room_identifier): + room_id = room_identifier + try: + remote_room_hosts = [ + x.decode("ascii") for x in request.args[b"server_name"] + ] # type: Optional[List[str]] + except Exception: + remote_room_hosts = None + elif RoomAlias.is_valid(room_identifier): + handler = self.room_member_handler + room_alias = RoomAlias.from_string(room_identifier) + room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias) + room_id = room_id.to_string() + else: + raise SynapseError( + 400, "%s was not legal room ID or room alias" % (room_identifier,) + ) + + fake_requester = create_requester(target_user) + + # send invite if room has "JoinRules.INVITE" + room_state = await self.state_handler.get_current_state(room_id) + join_rules_event = room_state.get((EventTypes.JoinRules, "")) + if join_rules_event: + if not (join_rules_event.content.get("join_rule") == JoinRules.PUBLIC): + await self.room_member_handler.update_membership( + requester=requester, + target=fake_requester.user, + room_id=room_id, + action="invite", + remote_room_hosts=remote_room_hosts, + ratelimit=False, + ) + + await self.room_member_handler.update_membership( + requester=fake_requester, + target=fake_requester.user, + room_id=room_id, + action="join", + remote_room_hosts=remote_room_hosts, + ratelimit=False, + ) + + return 200, {"room_id": room_id} diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py new file mode 100644 index 0000000000..672cc3eac5 --- /dev/null +++ b/tests/rest/admin/test_room.py @@ -0,0 +1,288 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, room + +from tests import unittest + +"""Tests admin REST events for /rooms paths.""" + + +class JoinAliasRoomTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.creator = self.register_user("creator", "test") + self.creator_tok = self.login("creator", "test") + + self.second_user_id = self.register_user("second", "test") + self.second_tok = self.login("second", "test") + + self.public_room_id = self.helper.create_room_as( + self.creator, tok=self.creator_tok, is_public=True + ) + self.url = "/_synapse/admin/v1/join/{}".format(self.public_room_id) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error 403 is returned. + """ + body = json.dumps({"user_id": self.second_user_id}) + + request, channel = self.make_request( + "POST", + self.url, + content=body.encode(encoding="utf_8"), + access_token=self.second_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_invalid_parameter(self): + """ + If a parameter is missing, return an error + """ + body = json.dumps({"unknown_parameter": "@unknown:test"}) + + request, channel = self.make_request( + "POST", + self.url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) + + def test_local_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + body = json.dumps({"user_id": "@unknown:test"}) + + request, channel = self.make_request( + "POST", + self.url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_remote_user(self): + """ + Check that only local user can join rooms. + """ + body = json.dumps({"user_id": "@not:exist.bla"}) + + request, channel = self.make_request( + "POST", + self.url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual( + "This endpoint can only be used with local users", + channel.json_body["error"], + ) + + def test_room_does_not_exist(self): + """ + Check that unknown rooms/server return error 404. + """ + body = json.dumps({"user_id": self.second_user_id}) + url = "/_synapse/admin/v1/join/!unknown:test" + + request, channel = self.make_request( + "POST", + url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual("No known servers", channel.json_body["error"]) + + def test_room_is_not_valid(self): + """ + Check that invalid room names, return an error 400. + """ + body = json.dumps({"user_id": self.second_user_id}) + url = "/_synapse/admin/v1/join/invalidroom" + + request, channel = self.make_request( + "POST", + url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual( + "invalidroom was not legal room ID or room alias", + channel.json_body["error"], + ) + + def test_join_public_room(self): + """ + Test joining a local user to a public room with "JoinRules.PUBLIC" + """ + body = json.dumps({"user_id": self.second_user_id}) + + request, channel = self.make_request( + "POST", + self.url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(self.public_room_id, channel.json_body["room_id"]) + + # Validate if user is a member of the room + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, + ) + self.render(request) + self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0]) + + def test_join_private_room_if_not_member(self): + """ + Test joining a local user to a private room with "JoinRules.INVITE" + when server admin is not member of this room. + """ + private_room_id = self.helper.create_room_as( + self.creator, tok=self.creator_tok, is_public=False + ) + url = "/_synapse/admin/v1/join/{}".format(private_room_id) + body = json.dumps({"user_id": self.second_user_id}) + + request, channel = self.make_request( + "POST", + url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_join_private_room_if_member(self): + """ + Test joining a local user to a private room with "JoinRules.INVITE", + when server admin is member of this room. + """ + private_room_id = self.helper.create_room_as( + self.creator, tok=self.creator_tok, is_public=False + ) + self.helper.invite( + room=private_room_id, + src=self.creator, + targ=self.admin_user, + tok=self.creator_tok, + ) + self.helper.join( + room=private_room_id, user=self.admin_user, tok=self.admin_user_tok + ) + + # Validate if server admin is a member of the room + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) + + # Join user to room. + + url = "/_synapse/admin/v1/join/{}".format(private_room_id) + body = json.dumps({"user_id": self.second_user_id}) + + request, channel = self.make_request( + "POST", + url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(private_room_id, channel.json_body["room_id"]) + + # Validate if user is a member of the room + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, + ) + self.render(request) + self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) + + def test_join_private_room_if_owner(self): + """ + Test joining a local user to a private room with "JoinRules.INVITE", + when server admin is owner of this room. + """ + private_room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok, is_public=False + ) + url = "/_synapse/admin/v1/join/{}".format(private_room_id) + body = json.dumps({"user_id": self.second_user_id}) + + request, channel = self.make_request( + "POST", + url, + content=body.encode(encoding="utf_8"), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(private_room_id, channel.json_body["room_id"]) + + # Validate if user is a member of the room + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, + ) + self.render(request) + self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) From 84f7eaed16a8169f1d70d047c9354c8232b9fb9f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 27 Mar 2020 15:44:13 +0100 Subject: [PATCH 329/382] Improve the UX of the login fallback when using SSO (#7152) * Don't show the login forms if we're currently logging in with a password or a token. * Submit directly the SSO login form, showing only a spinner to the user, in order to eliminate from the clunkiness of SSO through this fallback. --- changelog.d/7152.feature | 1 + synapse/static/client/login/index.html | 2 +- synapse/static/client/login/js/login.js | 51 +++++++++++++++---------- 3 files changed, 32 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7152.feature diff --git a/changelog.d/7152.feature b/changelog.d/7152.feature new file mode 100644 index 0000000000..fafa79c7e7 --- /dev/null +++ b/changelog.d/7152.feature @@ -0,0 +1 @@ +Improve the support for SSO authentication on the login fallback page. diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html index bcb6bc6bb7..712b0e3980 100644 --- a/synapse/static/client/login/index.html +++ b/synapse/static/client/login/index.html @@ -9,7 +9,7 @@

-

Log in with one of the following methods

+

diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index 276c271bbe..debe464371 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -1,37 +1,41 @@ window.matrixLogin = { endpoint: location.origin + "/_matrix/client/r0/login", serverAcceptsPassword: false, - serverAcceptsCas: false, serverAcceptsSso: false, }; +var title_pre_auth = "Log in with one of the following methods"; +var title_post_auth = "Logging in..."; + var submitPassword = function(user, pwd) { console.log("Logging in with password..."); + set_title(title_post_auth); var data = { type: "m.login.password", user: user, password: pwd, }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var submitToken = function(loginToken) { console.log("Logging in with login token..."); + set_title(title_post_auth); var data = { type: "m.login.token", token: loginToken }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var errorFunc = function(err) { - show_login(); + // We want to show the error to the user rather than redirecting immediately to the + // SSO portal (if SSO is the only login option), so we inhibit the redirect. + show_login(true); if (err.responseJSON && err.responseJSON.error) { setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); @@ -45,26 +49,33 @@ var setFeedbackString = function(text) { $("#feedback").text(text); }; -var show_login = function() { - $("#loading").hide(); - +var show_login = function(inhibit_redirect) { var this_page = window.location.origin + window.location.pathname; $("#sso_redirect_url").val(this_page); + // If inhibit_redirect is false, and SSO is the only supported login method, we can + // redirect straight to the SSO page + if (matrixLogin.serverAcceptsSso) { + if (!inhibit_redirect && !matrixLogin.serverAcceptsPassword) { + $("#sso_form").submit(); + return; + } + + // Otherwise, show the SSO form + $("#sso_form").show(); + } + if (matrixLogin.serverAcceptsPassword) { $("#password_flow").show(); } - if (matrixLogin.serverAcceptsSso) { - $("#sso_flow").show(); - } else if (matrixLogin.serverAcceptsCas) { - $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect"); - $("#sso_flow").show(); - } - - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) { $("#no_login_types").show(); } + + set_title(title_pre_auth); + + $("#loading").hide(); }; var show_spinner = function() { @@ -74,17 +85,15 @@ var show_spinner = function() { $("#loading").show(); }; +var set_title = function(title) { + $("#title").text(title); +}; var fetch_info = function(cb) { $.get(matrixLogin.endpoint, function(response) { var serverAcceptsPassword = false; - var serverAcceptsCas = false; for (var i=0; i Date: Fri, 27 Mar 2020 20:24:52 +0000 Subject: [PATCH 330/382] Always whitelist the login fallback for SSO (#7153) That fallback sets the redirect URL to itself (so it can process the login token then return gracefully to the client). This would make it pointless to ask the user for confirmation, since the URL the confirmation page would be showing wouldn't be the client's. --- changelog.d/7153.feature | 1 + docs/sample_config.yaml | 4 ++++ synapse/config/sso.py | 15 +++++++++++++++ tests/rest/client/v1/test_login.py | 9 ++++++++- 4 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7153.feature diff --git a/changelog.d/7153.feature b/changelog.d/7153.feature new file mode 100644 index 0000000000..414ebe1f69 --- /dev/null +++ b/changelog.d/7153.feature @@ -0,0 +1 @@ +Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 545226f753..743949945a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1444,6 +1444,10 @@ sso: # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 95762689bc..ec3dca9efc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -39,6 +39,17 @@ class SSOConfig(Config): self.sso_client_whitelist = sso_config.get("client_whitelist") or [] + # Attempt to also whitelist the server's login fallback, since that fallback sets + # the redirect URL to itself (so it can process the login token then return + # gracefully to the client). This would make it pointless to ask the user for + # confirmation, since the URL the confirmation page would be showing wouldn't be + # the client's. + # public_baseurl is an optional setting, so we only add the fallback's URL to the + # list if it's provided (because we can't figure out what that URL is otherwise). + if self.public_baseurl: + login_fallback_url = self.public_baseurl + "_matrix/static/client/login" + self.sso_client_whitelist.append(login_fallback_url) + def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. @@ -54,6 +65,10 @@ class SSOConfig(Config): # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index da2c9bfa1e..aed8853d6e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -350,7 +350,14 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): def test_cas_redirect_whitelisted(self): """Tests that the SSO login flow serves a redirect to a whitelisted url """ - redirect_url = "https://legit-site.com/" + self._test_redirect("https://legit-site.com/") + + @override_config({"public_baseurl": "https://example.com"}) + def test_cas_redirect_login_fallback(self): + self._test_redirect("https://example.com/_matrix/static/client/login") + + def _test_redirect(self, redirect_url): + """Tests that the SSO login flow serves a redirect for the given redirect URL.""" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) From 9fc588e6dc03bb64f569b4e27a786abd78c36218 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 30 Mar 2020 10:11:26 +0100 Subject: [PATCH 331/382] Just add own user ID to the list we track device changes for --- synapse/handlers/device.py | 8 +++++--- synapse/handlers/sync.py | 10 ++++++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 07ce553995..51bfad01c8 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -122,7 +122,9 @@ class DeviceWorkerHandler(BaseHandler): # First we check if any devices have changed for users that we share # rooms with. - tracked_users = yield self.store.get_users_who_share_room_with_user(user_id) + users_who_share_room = yield self.store.get_users_who_share_room_with_user(user_id) + + tracked_users = set(users_who_share_room) # always tell the user about their own devices tracked_users.add(user_id) @@ -217,8 +219,8 @@ class DeviceWorkerHandler(BaseHandler): if possibly_changed or possibly_left: # Take the intersection of the users whose devices may have changed # and those that actually still share a room with the user - possibly_joined = possibly_changed & tracked_users - possibly_left = (possibly_changed | possibly_left) - tracked_users + possibly_joined = possibly_changed & users_who_share_room + possibly_left = (possibly_changed | possibly_left) - users_who_share_room else: possibly_joined = [] possibly_left = [] diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index fd68a31b09..725f41c4d9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1139,14 +1139,16 @@ class SyncHandler(object): # room with by looking at all users that have left a room plus users # that were in a room we've left. - users_we_track = await self.store.get_users_who_share_room_with_user( + users_who_share_room = await self.store.get_users_who_share_room_with_user( user_id ) - users_we_track.add(user_id) + + tracked_users = set(users_who_share_room) + tracked_users.add(user_id) # Step 1a, check for changes in devices of users we share a room with users_that_have_changed = await self.store.get_users_whose_devices_changed( - since_token.device_list_key, users_we_track + since_token.device_list_key, tracked_users ) # Step 1b, check for newly joined rooms @@ -1169,7 +1171,7 @@ class SyncHandler(object): newly_left_users.update(left_users) # Remove any users that we still share a room with. - newly_left_users -= users_we_track + newly_left_users -= users_who_share_room return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: From 740647752576228e469918bafbb97ff556ff5ebe Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 30 Mar 2020 10:18:33 +0100 Subject: [PATCH 332/382] black --- synapse/handlers/device.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 51bfad01c8..d256b86ce5 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -122,7 +122,9 @@ class DeviceWorkerHandler(BaseHandler): # First we check if any devices have changed for users that we share # rooms with. - users_who_share_room = yield self.store.get_users_who_share_room_with_user(user_id) + users_who_share_room = yield self.store.get_users_who_share_room_with_user( + user_id + ) tracked_users = set(users_who_share_room) # always tell the user about their own devices From c5f89fba55b2529b2c8a76e272a21d551ffa82fe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 30 Mar 2020 07:28:42 -0400 Subject: [PATCH 333/382] Add developer documentation for running a local CAS server (#7147) --- changelog.d/7147.doc | 1 + docs/dev/cas.md | 64 ++++++++++++++++++++++++++++++++++++++++++++ docs/dev/saml.md | 8 ++++-- 3 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7147.doc create mode 100644 docs/dev/cas.md diff --git a/changelog.d/7147.doc b/changelog.d/7147.doc new file mode 100644 index 0000000000..2c855ff5f7 --- /dev/null +++ b/changelog.d/7147.doc @@ -0,0 +1 @@ +Add documentation for running a local CAS server for testing. diff --git a/docs/dev/cas.md b/docs/dev/cas.md new file mode 100644 index 0000000000..f8d02cc82c --- /dev/null +++ b/docs/dev/cas.md @@ -0,0 +1,64 @@ +# How to test CAS as a developer without a server + +The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an +easy to run CAS implementation built on top of Django. + +## Prerequisites + +1. Create a new virtualenv: `python3 -m venv ` +2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate` +3. Install Django and django-mama-cas: + ``` + python -m pip install "django<3" "django-mama-cas==2.4.0" + ``` +4. Create a Django project in the current directory: + ``` + django-admin startproject cas_test . + ``` +5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas +6. Setup the SQLite database: `python manage.py migrate` +7. Create a user: + ``` + python manage.py createsuperuser + ``` + 1. Use whatever you want as the username and password. + 2. Leave the other fields blank. +8. Use the built-in Django test server to serve the CAS endpoints on port 8000: + ``` + python manage.py runserver + ``` + +You should now have a Django project configured to serve CAS authentication with +a single user created. + +## Configure Synapse (and Riot) to use CAS + +1. Modify your `homeserver.yaml` to enable CAS and point it to your locally + running Django test server: + ```yaml + cas_config: + enabled: true + server_url: "http://localhost:8000" + service_url: "http://localhost:8081" + #displayname_attribute: name + #required_attributes: + # name: value + ``` +2. Restart Synapse. + +Note that the above configuration assumes the homeserver is running on port 8081 +and that the CAS server is on port 8000, both on localhost. + +## Testing the configuration + +Then in Riot: + +1. Visit the login page with a Riot pointing at your homeserver. +2. Click the Single Sign-On button. +3. Login using the credentials created with `createsuperuser`. +4. You should be logged in. + +If you want to repeat this process you'll need to manually logout first: + +1. http://localhost:8000/admin/ +2. Click "logout" in the top right. diff --git a/docs/dev/saml.md b/docs/dev/saml.md index f41aadce47..a9bfd2dc05 100644 --- a/docs/dev/saml.md +++ b/docs/dev/saml.md @@ -18,9 +18,13 @@ To make Synapse (and therefore Riot) use it: metadata: local: ["samling.xml"] ``` -5. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure +5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`: + ```yaml + public_baseurl: http://localhost:8080/ + ``` +6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure the dependencies are installed and ready to go. -6. Restart Synapse. +7. Restart Synapse. Then in Riot: From e577c5d607b1edb827a5a137dd77587e394fdf5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 30 Mar 2020 13:55:01 +0100 Subject: [PATCH 334/382] Attempt to clarify Python version requirements (#7161) In particular, we depend on `typing.TYPE_CHECKING`, which is only present in 3.5.2. It turns out that Ubuntu Xenial, despite having a package called `python 3 (3.5.1-3)`, actually has python 3.5.2, so I think this is fine. --- INSTALL.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index c0926ba590..8ded6e9092 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.5, 3.6, 3.7 or 3.8. +- Python 3.5.2 or later, up to Python 3.8. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org Synapse is written in Python but some of the libraries it uses are written in @@ -427,8 +427,8 @@ so, you will need to edit `homeserver.yaml`, as follows: for having Synapse automatically provision and renew federation certificates through ACME can be found at [ACME.md](docs/ACME.md). Note that, as pointed out in that document, this feature will not - work with installs set up after November 2019. - + work with installs set up after November 2019. + If you are using your own certificate, be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use `fullchain.pem` as your certificate, not From 104844c1e1a99df9c4a7e022e715517578533db7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 30 Mar 2020 14:00:11 +0100 Subject: [PATCH 335/382] Add explanatory comment --- synapse/handlers/device.py | 3 ++- synapse/handlers/sync.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d256b86ce5..993499f446 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -127,7 +127,8 @@ class DeviceWorkerHandler(BaseHandler): ) tracked_users = set(users_who_share_room) - # always tell the user about their own devices + + # Always tell the user about their own devices tracked_users.add(user_id) changed = yield self.store.get_users_whose_devices_changed( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 725f41c4d9..1f1cde2feb 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1144,6 +1144,8 @@ class SyncHandler(object): ) tracked_users = set(users_who_share_room) + + # Always tell the user about their own devices tracked_users.add(user_id) # Step 1a, check for changes in devices of users we share a room with From 4f21c33be301b8ea6369039c3ad8baa51878e4d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Mar 2020 16:37:24 +0100 Subject: [PATCH 336/382] Remove usage of "conn_id" for presence. (#7128) * Remove `conn_id` usage for UserSyncCommand. Each tcp replication connection is assigned a "conn_id", which is used to give an ID to a remotely connected worker. In a redis world, there will no longer be a one to one mapping between connection and instance, so instead we need to replace such usages with an ID generated by the remote instances and included in the replicaiton commands. This really only effects UserSyncCommand. * Add CLEAR_USER_SYNCS command that is sent on shutdown. This should help with the case where a synchrotron gets restarted gracefully, rather than rely on 5 minute timeout. --- changelog.d/7128.misc | 1 + docs/tcp_replication.md | 6 +++++ synapse/app/generic_worker.py | 20 ++++++++++++---- synapse/replication/tcp/client.py | 6 +++-- synapse/replication/tcp/commands.py | 36 +++++++++++++++++++++++++---- synapse/replication/tcp/protocol.py | 9 ++++++-- synapse/replication/tcp/resource.py | 17 ++++++-------- synapse/server.py | 11 +++++++++ synapse/server.pyi | 2 ++ 9 files changed, 86 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7128.misc diff --git a/changelog.d/7128.misc b/changelog.d/7128.misc new file mode 100644 index 0000000000..5703f6d2ec --- /dev/null +++ b/changelog.d/7128.misc @@ -0,0 +1 @@ +Add explicit `instance_id` for USER_SYNC commands and remove implicit `conn_id` usage. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index d4f7d9ec18..3be8e50c4c 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -198,6 +198,12 @@ Asks the server for the current position of all streams. A user has started or stopped syncing +#### CLEAR_USER_SYNC (C) + + The server should clear all associated user sync data from the worker. + + This is used when a worker is shutting down. + #### FEDERATION_ACK (C) Acknowledge receipt of some federation data diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index fba7ad9551..1ee266f7c5 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -65,6 +65,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, @@ -124,7 +125,6 @@ from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole -from synapse.util.stringutils import random_string from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") @@ -233,6 +233,7 @@ class GenericWorkerPresence(object): self.user_to_num_current_syncs = {} self.clock = hs.get_clock() self.notifier = hs.get_notifier() + self.instance_id = hs.get_instance_id() active_presence = self.store.take_presence_startup_info() self.user_to_current_state = {state.user_id: state for state in active_presence} @@ -245,13 +246,24 @@ class GenericWorkerPresence(object): self.send_stop_syncing, UPDATE_SYNCING_USERS_MS ) - self.process_id = random_string(16) - logger.info("Presence process_id is %r", self.process_id) + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "generic_presence.on_shutdown", + self._on_shutdown, + ) + + def _on_shutdown(self): + if self.hs.config.use_presence: + self.hs.get_tcp_replication().send_command( + ClearUserSyncsCommand(self.instance_id) + ) def send_user_sync(self, user_id, is_syncing, last_sync_ms): if self.hs.config.use_presence: self.hs.get_tcp_replication().send_user_sync( - user_id, is_syncing, last_sync_ms + self.instance_id, user_id, is_syncing, last_sync_ms ) def mark_as_coming_online(self, user_id): diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 7e7ad0f798..e86d9805f1 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -189,10 +189,12 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): """ self.send_command(FederationAckCommand(token)) - def send_user_sync(self, user_id, is_syncing, last_sync_ms): + def send_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): """Poke the master that a user has started/stopped syncing. """ - self.send_command(UserSyncCommand(user_id, is_syncing, last_sync_ms)) + self.send_command( + UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) + ) def send_remove_pusher(self, app_id, push_key, user_id): """Poke the master to remove a pusher for a user diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 5a6b734094..e4eec643f7 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -207,30 +207,32 @@ class UserSyncCommand(Command): Format:: - USER_SYNC + USER_SYNC Where is either "start" or "stop" """ NAME = "USER_SYNC" - def __init__(self, user_id, is_syncing, last_sync_ms): + def __init__(self, instance_id, user_id, is_syncing, last_sync_ms): + self.instance_id = instance_id self.user_id = user_id self.is_syncing = is_syncing self.last_sync_ms = last_sync_ms @classmethod def from_line(cls, line): - user_id, state, last_sync_ms = line.split(" ", 2) + instance_id, user_id, state, last_sync_ms = line.split(" ", 3) if state not in ("start", "end"): raise Exception("Invalid USER_SYNC state %r" % (state,)) - return cls(user_id, state == "start", int(last_sync_ms)) + return cls(instance_id, user_id, state == "start", int(last_sync_ms)) def to_line(self): return " ".join( ( + self.instance_id, self.user_id, "start" if self.is_syncing else "end", str(self.last_sync_ms), @@ -238,6 +240,30 @@ class UserSyncCommand(Command): ) +class ClearUserSyncsCommand(Command): + """Sent by the client to inform the server that it should drop all + information about syncing users sent by the client. + + Mainly used when client is about to shut down. + + Format:: + + CLEAR_USER_SYNC + """ + + NAME = "CLEAR_USER_SYNC" + + def __init__(self, instance_id): + self.instance_id = instance_id + + @classmethod + def from_line(cls, line): + return cls(line) + + def to_line(self): + return self.instance_id + + class FederationAckCommand(Command): """Sent by the client when it has processed up to a given point in the federation stream. This allows the master to drop in-memory caches of the @@ -398,6 +424,7 @@ _COMMANDS = ( InvalidateCacheCommand, UserIpCommand, RemoteServerUpCommand, + ClearUserSyncsCommand, ) # type: Tuple[Type[Command], ...] # Map of command name to command type. @@ -420,6 +447,7 @@ VALID_CLIENT_COMMANDS = ( ReplicateCommand.NAME, PingCommand.NAME, UserSyncCommand.NAME, + ClearUserSyncsCommand.NAME, FederationAckCommand.NAME, RemovePusherCommand.NAME, InvalidateCacheCommand.NAME, diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index f81d2e2442..dae246825f 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -423,9 +423,12 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): async def on_USER_SYNC(self, cmd): await self.streamer.on_user_sync( - self.conn_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms + cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms ) + async def on_CLEAR_USER_SYNC(self, cmd): + await self.streamer.on_clear_user_syncs(cmd.instance_id) + async def on_REPLICATE(self, cmd): # Subscribe to all streams we're publishing to. for stream_name in self.streamer.streams_by_name: @@ -551,6 +554,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): ): BaseReplicationStreamProtocol.__init__(self, clock) + self.instance_id = hs.get_instance_id() + self.client_name = client_name self.server_name = server_name self.handler = handler @@ -580,7 +585,7 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): currently_syncing = self.handler.get_currently_syncing_users() now = self.clock.time_msec() for user_id in currently_syncing: - self.send_command(UserSyncCommand(user_id, True, now)) + self.send_command(UserSyncCommand(self.instance_id, user_id, True, now)) # We've now finished connecting to so inform the client handler self.handler.update_connection(self) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 4374e99e32..8b6067e20d 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -251,14 +251,19 @@ class ReplicationStreamer(object): self.federation_sender.federation_ack(token) @measure_func("repl.on_user_sync") - async def on_user_sync(self, conn_id, user_id, is_syncing, last_sync_ms): + async def on_user_sync(self, instance_id, user_id, is_syncing, last_sync_ms): """A client has started/stopped syncing on a worker. """ user_sync_counter.inc() await self.presence_handler.update_external_syncs_row( - conn_id, user_id, is_syncing, last_sync_ms + instance_id, user_id, is_syncing, last_sync_ms ) + async def on_clear_user_syncs(self, instance_id): + """A replication client wants us to drop all their UserSync data. + """ + await self.presence_handler.update_external_syncs_clear(instance_id) + @measure_func("repl.on_remove_pusher") async def on_remove_pusher(self, app_id, push_key, user_id): """A client has asked us to remove a pusher @@ -321,14 +326,6 @@ class ReplicationStreamer(object): except ValueError: pass - # We need to tell the presence handler that the connection has been - # lost so that it can handle any ongoing syncs on that connection. - run_as_background_process( - "update_external_syncs_clear", - self.presence_handler.update_external_syncs_clear, - connection.conn_id, - ) - def _batch_updates(updates): """Takes a list of updates of form [(token, row)] and sets the token to diff --git a/synapse/server.py b/synapse/server.py index c7ca2bda0d..cd86475d6b 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -103,6 +103,7 @@ from synapse.storage import DataStores, Storage from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor +from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) @@ -230,6 +231,8 @@ class HomeServer(object): self._listening_services = [] self.start_time = None + self.instance_id = random_string(5) + self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() @@ -242,6 +245,14 @@ class HomeServer(object): for depname in kwargs: setattr(self, depname, kwargs[depname]) + def get_instance_id(self): + """A unique ID for this synapse process instance. + + This is used to distinguish running instances in worker-based + deployments. + """ + return self.instance_id + def setup(self): logger.info("Setting up.") self.start_time = int(self.get_clock().time()) diff --git a/synapse/server.pyi b/synapse/server.pyi index 3844f0e12f..9d1dfa71e7 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -114,3 +114,5 @@ class HomeServer(object): pass def is_mine_id(self, domain_id: str) -> bool: pass + def get_instance_id(self) -> str: + pass From d9f29f8daef2f49464382b0e80ee93ff38681e99 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 30 Mar 2020 17:38:21 +0100 Subject: [PATCH 337/382] Fix a small typo in the `metrics_flags` config option. (#7171) --- changelog.d/7171.doc | 1 + docs/sample_config.yaml | 2 +- synapse/config/metrics.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7171.doc diff --git a/changelog.d/7171.doc b/changelog.d/7171.doc new file mode 100644 index 0000000000..25a3bd8ac6 --- /dev/null +++ b/changelog.d/7171.doc @@ -0,0 +1 @@ +Fix a small typo in the `metrics_flags` config option. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 743949945a..6a770508f9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1144,7 +1144,7 @@ account_threepid_delegates: # enabled by default, either for performance reasons or limited use. # metrics_flags: - # Publish synapse_federation_known_servers, a g auge of the number of + # Publish synapse_federation_known_servers, a gauge of the number of # servers this homeserver knows about, including itself. May cause # performance problems on large homeservers. # diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 22538153e1..6f517a71d0 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -86,7 +86,7 @@ class MetricsConfig(Config): # enabled by default, either for performance reasons or limited use. # metrics_flags: - # Publish synapse_federation_known_servers, a g auge of the number of + # Publish synapse_federation_known_servers, a gauge of the number of # servers this homeserver knows about, including itself. May cause # performance problems on large homeservers. # From 7042840b3201644ee71ea3e446576aa347b6d2a3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 30 Mar 2020 17:53:25 +0100 Subject: [PATCH 338/382] Transfer alias mappings when joining an upgraded room (#6946) --- changelog.d/6946.bugfix | 1 + synapse/handlers/room_member.py | 3 +++ synapse/storage/data_stores/main/directory.py | 26 ++++++++++++++++--- 3 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6946.bugfix diff --git a/changelog.d/6946.bugfix b/changelog.d/6946.bugfix new file mode 100644 index 0000000000..a238c83a18 --- /dev/null +++ b/changelog.d/6946.bugfix @@ -0,0 +1 @@ +Transfer alias mappings on room upgrade. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 4260426369..c3ee8db4f0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -519,6 +519,9 @@ class RoomMemberHandler(object): yield self.store.set_room_is_public(old_room_id, False) yield self.store.set_room_is_public(room_id, True) + # Transfer alias mappings in the room directory + yield self.store.update_aliases_for_room(old_room_id, room_id) + # Check if any groups we own contain the predecessor room local_group_ids = yield self.store.get_local_groups_for_room(old_room_id) for group_id in local_group_ids: diff --git a/synapse/storage/data_stores/main/directory.py b/synapse/storage/data_stores/main/directory.py index c9e7de7d12..e1d1bc3e05 100644 --- a/synapse/storage/data_stores/main/directory.py +++ b/synapse/storage/data_stores/main/directory.py @@ -14,6 +14,7 @@ # limitations under the License. from collections import namedtuple +from typing import Optional from twisted.internet import defer @@ -159,10 +160,29 @@ class DirectoryStore(DirectoryWorkerStore): return room_id - def update_aliases_for_room(self, old_room_id, new_room_id, creator): + def update_aliases_for_room( + self, old_room_id: str, new_room_id: str, creator: Optional[str] = None, + ): + """Repoint all of the aliases for a given room, to a different room. + + Args: + old_room_id: + new_room_id: + creator: The user to record as the creator of the new mapping. + If None, the creator will be left unchanged. + """ + def _update_aliases_for_room_txn(txn): - sql = "UPDATE room_aliases SET room_id = ?, creator = ? WHERE room_id = ?" - txn.execute(sql, (new_room_id, creator, old_room_id)) + update_creator_sql = "" + sql_params = (new_room_id, old_room_id) + if creator: + update_creator_sql = ", creator = ?" + sql_params = (new_room_id, creator, old_room_id) + + sql = "UPDATE room_aliases SET room_id = ? %s WHERE room_id = ?" % ( + update_creator_sql, + ) + txn.execute(sql, sql_params) self._invalidate_cache_and_stream( txn, self.get_aliases_for_room, (old_room_id,) ) From 7966a1cde9d4b598faa06620424844f2b35c94af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 30 Mar 2020 19:06:52 +0100 Subject: [PATCH 339/382] Rewrite prune_old_outbound_device_pokes for efficiency (#7159) make sure we clear out all but one update for the user --- changelog.d/7159.bugfix | 1 + synapse/handlers/federation.py | 25 +----- synapse/storage/data_stores/main/devices.py | 71 +++++++++++++--- synapse/util/stringutils.py | 21 ++++- tests/federation/test_federation_sender.py | 92 +++++++++++++++++++++ 5 files changed, 173 insertions(+), 37 deletions(-) create mode 100644 changelog.d/7159.bugfix diff --git a/changelog.d/7159.bugfix b/changelog.d/7159.bugfix new file mode 100644 index 0000000000..1b341b127b --- /dev/null +++ b/changelog.d/7159.bugfix @@ -0,0 +1 @@ +Fix excessive CPU usage by `prune_old_outbound_device_pokes` job. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 38ab6a8fc3..c7aa7acf3b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -49,6 +49,7 @@ from synapse.event_auth import auth_types_for_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator +from synapse.handlers._base import BaseHandler from synapse.logging.context import ( make_deferred_yieldable, nested_logging_context, @@ -69,10 +70,9 @@ from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server -from ._base import BaseHandler - logger = logging.getLogger(__name__) @@ -93,27 +93,6 @@ class _NewEventInfo: auth_events = attr.ib(type=Optional[StateMap[EventBase]], default=None) -def shortstr(iterable, maxitems=5): - """If iterable has maxitems or fewer, return the stringification of a list - containing those items. - - Otherwise, return the stringification of a a list with the first maxitems items, - followed by "...". - - Args: - iterable (Iterable): iterable to truncate - maxitems (int): number of items to return before truncating - - Returns: - unicode - """ - - items = list(itertools.islice(iterable, maxitems + 1)) - if len(items) <= maxitems: - return str(items) - return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]" - - class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 2d47cfd131..3140e1b722 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -41,6 +41,7 @@ from synapse.util.caches.descriptors import ( cachedList, ) from synapse.util.iterutils import batch_iter +from synapse.util.stringutils import shortstr logger = logging.getLogger(__name__) @@ -1092,18 +1093,47 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ], ) - def _prune_old_outbound_device_pokes(self): + def _prune_old_outbound_device_pokes(self, prune_age=24 * 60 * 60 * 1000): """Delete old entries out of the device_lists_outbound_pokes to ensure - that we don't fill up due to dead servers. We keep one entry per - (destination, user_id) tuple to ensure that the prev_ids remain correct - if the server does come back. + that we don't fill up due to dead servers. + + Normally, we try to send device updates as a delta since a previous known point: + this is done by setting the prev_id in the m.device_list_update EDU. However, + for that to work, we have to have a complete record of each change to + each device, which can add up to quite a lot of data. + + An alternative mechanism is that, if the remote server sees that it has missed + an entry in the stream_id sequence for a given user, it will request a full + list of that user's devices. Hence, we can reduce the amount of data we have to + store (and transmit in some future transaction), by clearing almost everything + for a given destination out of the database, and having the remote server + resync. + + All we need to do is make sure we keep at least one row for each + (user, destination) pair, to remind us to send a m.device_list_update EDU for + that user when the destination comes back. It doesn't matter which device + we keep. """ - yesterday = self._clock.time_msec() - 24 * 60 * 60 * 1000 + yesterday = self._clock.time_msec() - prune_age def _prune_txn(txn): + # look for (user, destination) pairs which have an update older than + # the cutoff. + # + # For each pair, we also need to know the most recent stream_id, and + # an arbitrary device_id at that stream_id. select_sql = """ - SELECT destination, user_id, max(stream_id) as stream_id - FROM device_lists_outbound_pokes + SELECT + dlop1.destination, + dlop1.user_id, + MAX(dlop1.stream_id) AS stream_id, + (SELECT MIN(dlop2.device_id) AS device_id FROM + device_lists_outbound_pokes dlop2 + WHERE dlop2.destination = dlop1.destination AND + dlop2.user_id=dlop1.user_id AND + dlop2.stream_id=MAX(dlop1.stream_id) + ) + FROM device_lists_outbound_pokes dlop1 GROUP BY destination, user_id HAVING min(ts) < ? AND count(*) > 1 """ @@ -1114,14 +1144,29 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): if not rows: return + logger.info( + "Pruning old outbound device list updates for %i users/destinations: %s", + len(rows), + shortstr((row[0], row[1]) for row in rows), + ) + + # we want to keep the update with the highest stream_id for each user. + # + # there might be more than one update (with different device_ids) with the + # same stream_id, so we also delete all but one rows with the max stream id. delete_sql = """ DELETE FROM device_lists_outbound_pokes - WHERE ts < ? AND destination = ? AND user_id = ? AND stream_id < ? + WHERE destination = ? AND user_id = ? AND ( + stream_id < ? OR + (stream_id = ? AND device_id != ?) + ) """ - - txn.executemany( - delete_sql, ((yesterday, row[0], row[1], row[2]) for row in rows) - ) + count = 0 + for (destination, user_id, stream_id, device_id) in rows: + txn.execute( + delete_sql, (destination, user_id, stream_id, stream_id, device_id) + ) + count += txn.rowcount # Since we've deleted unsent deltas, we need to remove the entry # of last successful sent so that the prev_ids are correctly set. @@ -1131,7 +1176,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): """ txn.executemany(sql, ((row[0], row[1]) for row in rows)) - logger.info("Pruned %d device list outbound pokes", txn.rowcount) + logger.info("Pruned %d device list outbound pokes", count) return run_as_background_process( "prune_old_outbound_device_pokes", diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 2c0dcb5208..6899bcb788 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -13,10 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import itertools import random import re import string +from collections import Iterable import six from six import PY2, PY3 @@ -126,3 +127,21 @@ def assert_valid_client_secret(client_secret): raise SynapseError( 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM ) + + +def shortstr(iterable: Iterable, maxitems: int = 5) -> str: + """If iterable has maxitems or fewer, return the stringification of a list + containing those items. + + Otherwise, return the stringification of a a list with the first maxitems items, + followed by "...". + + Args: + iterable: iterable to truncate + maxitems: number of items to return before truncating + """ + + items = list(itertools.islice(iterable, maxitems + 1)) + if len(items) <= maxitems: + return str(items) + return "[" + ", ".join(repr(r) for r in items[:maxitems]) + ", ...]" diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 7763b12159..a5fe5c6880 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -370,6 +370,98 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) + def test_prune_outbound_device_pokes1(self): + """If a destination is unreachable, and the updates are pruned, we should get + a single update. + + This case tests the behaviour when the server has never been reachable. + """ + mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + + # create devices + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + self.assertGreaterEqual(mock_send_txn.call_count, 4) + + # run the prune job + self.reactor.advance(10) + self.get_success( + self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + ) + + # recover the server + mock_send_txn.side_effect = self.record_transaction + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + + # there should be a single update for this user. + self.assertEqual(len(self.edus), 1) + edu = self.edus.pop(0) + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + + # synapse uses an empty prev_id list to indicate "needs a full resync". + self.assertEqual(c["prev_id"], []) + + def test_prune_outbound_device_pokes2(self): + """If a destination is unreachable, and the updates are pruned, we should get + a single update. + + This case tests the behaviour when the server was reachable, but then goes + offline. + """ + + # create first device + u1 = self.register_user("user", "pass") + self.login("user", "pass", device_id="D1") + + # expect the update EDU + self.assertEqual(len(self.edus), 1) + self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + + # now the server goes offline + mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn.side_effect = lambda t, cb: defer.fail("fail") + + self.login("user", "pass", device_id="D2") + self.login("user", "pass", device_id="D3") + + # delete them again + self.get_success( + self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) + ) + + self.assertGreaterEqual(mock_send_txn.call_count, 3) + + # run the prune job + self.reactor.advance(10) + self.get_success( + self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + ) + + # recover the server + mock_send_txn.side_effect = self.record_transaction + self.hs.get_federation_sender().send_device_messages("host2") + self.pump() + + # ... and we should get a single update for this user. + self.assertEqual(len(self.edus), 1) + edu = self.edus.pop(0) + self.assertEqual(edu["edu_type"], "m.device_list_update") + c = edu["content"] + + # synapse uses an empty prev_id list to indicate "needs a full resync". + self.assertEqual(c["prev_id"], []) + def check_device_update_edu( self, edu: JsonDict, From db098ec994a924fc9c5792b316c6a151c00bac47 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Mar 2020 10:35:00 +0000 Subject: [PATCH 340/382] Fix starting workers when federation sending not split out. --- synapse/app/generic_worker.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index b2c764bfe8..5363642d64 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -860,6 +860,9 @@ def start(config_options): # Force the appservice to start since they will be disabled in the main config config.notify_appservices = True + else: + # For other worker types we force this to off. + config.notify_appservices = False if config.worker_app == "synapse.app.pusher": if config.start_pushers: @@ -873,6 +876,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.start_pushers = True + else: + # For other worker types we force this to off. + config.start_pushers = False if config.worker_app == "synapse.app.user_dir": if config.update_user_directory: @@ -886,6 +892,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.update_user_directory = True + else: + # For other worker types we force this to off. + config.update_user_directory = False if config.worker_app == "synapse.app.federation_sender": if config.send_federation: @@ -899,6 +908,9 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.send_federation = True + else: + # For other worker types we force this to off. + config.send_federation = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts From b5ecafd1576649c86fdb31d4d4a2f374a3464184 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 30 Mar 2020 18:05:09 +0100 Subject: [PATCH 341/382] Only setdefault for signatures if device has key_json --- synapse/storage/data_stores/main/devices.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index d55733a4cd..84d8deca18 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -317,14 +317,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) else: result["deleted"] = True From b5d0b038f4f2400dfd3ca7c698638c9821af6086 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 30 Mar 2020 19:18:00 +0100 Subject: [PATCH 342/382] Fix another instance --- synapse/storage/data_stores/main/devices.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 84d8deca18..8af5f7de54 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -527,14 +527,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) results.append(result) From 5bd2b275254bcbf001bee20d821c0ef567b9587f Mon Sep 17 00:00:00 2001 From: David Vo Date: Fri, 27 Mar 2020 12:26:55 +1100 Subject: [PATCH 343/382] Only import sqlite3 when type checking Fixes: #7127 Signed-off-by: David Vo --- changelog.d/7155.bugfix | 1 + synapse/storage/engines/sqlite.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7155.bugfix diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix new file mode 100644 index 0000000000..0bf51e7aba --- /dev/null +++ b/changelog.d/7155.bugfix @@ -0,0 +1 @@ +Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 2bfeefd54e..3bc2e8b986 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -12,14 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sqlite3 import struct import threading +import typing from synapse.storage.engines import BaseDatabaseEngine +if typing.TYPE_CHECKING: + import sqlite3 # noqa: F401 -class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]): + +class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): def __init__(self, database_module, database_config): super().__init__(database_module, database_config) From 2cb38ca871a65eaa4236a908c34b2b9873371b93 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 30 Mar 2020 19:15:06 +0100 Subject: [PATCH 344/382] Add changelog --- changelog.d/7177.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7177.bugfix diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix new file mode 100644 index 0000000000..908801c360 --- /dev/null +++ b/changelog.d/7177.bugfix @@ -0,0 +1 @@ +Mitigation for a bug in `_get_e2e_device_keys_txn` which broke federation for some servers. \ No newline at end of file From 2cf115f0ea3932bb65cbe3e1e563dca69aa642d7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 31 Mar 2020 11:20:07 +0100 Subject: [PATCH 345/382] Rewrite changelog --- changelog.d/7177.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix index 908801c360..329a96cb0b 100644 --- a/changelog.d/7177.bugfix +++ b/changelog.d/7177.bugfix @@ -1 +1 @@ -Mitigation for a bug in `_get_e2e_device_keys_txn` which broke federation for some servers. \ No newline at end of file +Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file From 5d99bde7883201d2c6011ca40ee87dbc64b938e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 Mar 2020 10:36:44 +0000 Subject: [PATCH 346/382] Newsfile --- changelog.d/7133.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7133.bugfix diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix new file mode 100644 index 0000000000..61a86fd34e --- /dev/null +++ b/changelog.d/7133.bugfix @@ -0,0 +1 @@ +Fix starting workers when federation sending not split out. From 3fb9fc40f59e3688f82672410f812022a1af9daa Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 31 Mar 2020 11:49:43 +0100 Subject: [PATCH 347/382] 1.12.1rc1 --- CHANGES.md | 11 +++++++++++ changelog.d/7133.bugfix | 1 - changelog.d/7155.bugfix | 1 - changelog.d/7177.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/7133.bugfix delete mode 100644 changelog.d/7155.bugfix delete mode 100644 changelog.d/7177.bugfix diff --git a/CHANGES.md b/CHANGES.md index f794c585b7..5b97d7ff82 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.12.1rc1 (2020-03-31) +============================== + +Bugfixes +-------- + +- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)) +- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155)) +- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177)) + + Synapse 1.12.0 (2020-03-23) =========================== diff --git a/changelog.d/7133.bugfix b/changelog.d/7133.bugfix deleted file mode 100644 index 61a86fd34e..0000000000 --- a/changelog.d/7133.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix starting workers when federation sending not split out. diff --git a/changelog.d/7155.bugfix b/changelog.d/7155.bugfix deleted file mode 100644 index 0bf51e7aba..0000000000 --- a/changelog.d/7155.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix deleted file mode 100644 index 329a96cb0b..0000000000 --- a/changelog.d/7177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 5b86008945..c3c5b20f11 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.0" +__version__ = "1.12.1rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 677d0edbac97b7069413446b1ca01e6df6d9728e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 31 Mar 2020 11:58:48 +0100 Subject: [PATCH 348/382] Note where bugs were introduced --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5b97d7ff82..a77768de58 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,9 +4,9 @@ Synapse 1.12.1rc1 (2020-03-31) Bugfixes -------- -- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)) -- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155)) -- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177)) +- Fix starting workers when federation sending not split out. ([\#7133](https://github.com/matrix-org/synapse/issues/7133)). Introduced in v1.12.0. +- Avoid importing `sqlite3` when using the postgres backend. Contributed by David Vo. ([\#7155](https://github.com/matrix-org/synapse/issues/7155)). Introduced in v1.12.0rc1. +- Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. ([\#7177](https://github.com/matrix-org/synapse/issues/7177)). Introduced in v1.11.0. Synapse 1.12.0 (2020-03-23) From 62a7289133840b4f4a55844b4f24ec664c3d917b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 31 Mar 2020 13:09:16 +0100 Subject: [PATCH 349/382] Fix a bug which could cause incorrect 'cyclic dependency' error. (#7178) If there was an exception setting up one of the attributes of the Homeserver god object, then future attempts to fetch that attribute would raise a confusing "Cyclic dependency" error. Let's make sure that we clear the `building` flag so that we just get the original exception. Ref: #7169 --- changelog.d/7178.bugfix | 1 + synapse/server.py | 22 ++++++++++------------ 2 files changed, 11 insertions(+), 12 deletions(-) create mode 100644 changelog.d/7178.bugfix diff --git a/changelog.d/7178.bugfix b/changelog.d/7178.bugfix new file mode 100644 index 0000000000..35ea645d75 --- /dev/null +++ b/changelog.d/7178.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause incorrect 'cyclic dependency' error. diff --git a/synapse/server.py b/synapse/server.py index cd86475d6b..9228e1c892 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -583,24 +583,22 @@ def _make_dependency_method(depname): try: builder = getattr(hs, "build_%s" % (depname)) except AttributeError: - builder = None + raise NotImplementedError( + "%s has no %s nor a builder for it" % (type(hs).__name__, depname) + ) - if builder: - # Prevent cyclic dependencies from deadlocking - if depname in hs._building: - raise ValueError("Cyclic dependency while building %s" % (depname,)) - hs._building[depname] = 1 + # Prevent cyclic dependencies from deadlocking + if depname in hs._building: + raise ValueError("Cyclic dependency while building %s" % (depname,)) + hs._building[depname] = 1 + try: dep = builder() setattr(hs, depname, dep) - + finally: del hs._building[depname] - return dep - - raise NotImplementedError( - "%s has no %s nor a builder for it" % (type(hs).__name__, depname) - ) + return dep setattr(HomeServer, "get_%s" % (depname), _get) From 0a7b0882c1d1f52bde46d6f367f265bc330e8bd0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Mar 2020 09:33:02 -0400 Subject: [PATCH 350/382] Fix use of async/await in media code (#7184) --- changelog.d/7184.misc | 1 + synapse/storage/data_stores/main/media_repository.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7184.misc diff --git a/changelog.d/7184.misc b/changelog.d/7184.misc new file mode 100644 index 0000000000..fac5bc0403 --- /dev/null +++ b/changelog.d/7184.misc @@ -0,0 +1 @@ +Convert some of synapse.rest.media to async/await. diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index 80ca36dedf..cf195f8aa6 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -340,7 +340,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "get_expired_url_cache", _get_expired_url_cache_txn ) - def delete_url_cache(self, media_ids): + async def delete_url_cache(self, media_ids): if len(media_ids) == 0: return @@ -349,7 +349,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): def _delete_url_cache_txn(txn): txn.executemany(sql, [(media_id,) for media_id in media_ids]) - return self.db.runInteraction("delete_url_cache", _delete_url_cache_txn) + return await self.db.runInteraction("delete_url_cache", _delete_url_cache_txn) def get_url_cache_media_before(self, before_ts): sql = ( From b994e86e359fd095f82feabbf38fb18a5d10e0ae Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 31 Mar 2020 14:51:22 +0100 Subject: [PATCH 351/382] Only setdefault for signatures if device has key_json (#7177) --- changelog.d/7177.bugfix | 1 + synapse/storage/data_stores/main/devices.py | 24 ++++++++++++--------- 2 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 changelog.d/7177.bugfix diff --git a/changelog.d/7177.bugfix b/changelog.d/7177.bugfix new file mode 100644 index 0000000000..329a96cb0b --- /dev/null +++ b/changelog.d/7177.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause outbound federation traffic to stop working if a client uploaded an incorrect e2e device signature. \ No newline at end of file diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 3140e1b722..20995e1b78 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -286,14 +286,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) else: result["deleted"] = True @@ -494,14 +496,16 @@ class DeviceWorkerStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: result["keys"] = db_to_json(key_json) + + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) + device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name - if "signatures" in device: - for sig_user_id, sigs in device["signatures"].items(): - result["keys"].setdefault("signatures", {}).setdefault( - sig_user_id, {} - ).update(sigs) results.append(result) From fe1580bfd91151c2c375d3c403ed911828f3899e Mon Sep 17 00:00:00 2001 From: Karlinde Date: Tue, 31 Mar 2020 16:08:56 +0200 Subject: [PATCH 352/382] Fill in the 'default' field for user-defined push rules (#6639) Signed-off-by: Karl Linderhed --- changelog.d/6639.bugfix | 1 + synapse/storage/data_stores/main/push_rule.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/6639.bugfix diff --git a/changelog.d/6639.bugfix b/changelog.d/6639.bugfix new file mode 100644 index 0000000000..c7593a6e84 --- /dev/null +++ b/changelog.d/6639.bugfix @@ -0,0 +1 @@ +Fix missing field `default` when fetching user-defined push rules. diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py index 62ac88d9f2..46f9bda773 100644 --- a/synapse/storage/data_stores/main/push_rule.py +++ b/synapse/storage/data_stores/main/push_rule.py @@ -41,6 +41,7 @@ def _load_rules(rawrules, enabled_map): rule = dict(rawrule) rule["conditions"] = json.loads(rawrule["conditions"]) rule["actions"] = json.loads(rawrule["actions"]) + rule["default"] = False ruleslist.append(rule) # We're going to be mutating this a lot, so do a deep copy From 60adcbed919afd5c85442775eca822fec43d816d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 31 Mar 2020 15:18:41 +0100 Subject: [PATCH 353/382] Fix "'NoneType' has no attribute start|stop" logcontext errors (#7181) Fixes #7179. --- changelog.d/7181.misc | 1 + synapse/http/site.py | 13 ++++++------- synapse/logging/context.py | 5 +++++ 3 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/7181.misc diff --git a/changelog.d/7181.misc b/changelog.d/7181.misc new file mode 100644 index 0000000000..731f4dcb52 --- /dev/null +++ b/changelog.d/7181.misc @@ -0,0 +1 @@ +Clean up some LoggingContext code. diff --git a/synapse/http/site.py b/synapse/http/site.py index e092193c9c..32feb0d968 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -193,6 +193,12 @@ class SynapseRequest(Request): self.finish_time = time.time() Request.connectionLost(self, reason) + if self.logcontext is None: + logger.info( + "Connection from %s lost before request headers were read", self.client + ) + return + # we only get here if the connection to the client drops before we send # the response. # @@ -236,13 +242,6 @@ class SynapseRequest(Request): def _finished_processing(self): """Log the completion of this request and update the metrics """ - - if self.logcontext is None: - # this can happen if the connection closed before we read the - # headers (so render was never called). In that case we'll already - # have logged a warning, so just bail out. - return - usage = self.logcontext.get_resource_usage() if self._processing_finished_time is None: diff --git a/synapse/logging/context.py b/synapse/logging/context.py index a8eafb1c7c..3254d6a8df 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -539,6 +539,11 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe Returns: The context that was previously active """ + # everything blows up if we allow current_context to be set to None, so sanity-check + # that now. + if context is None: + raise TypeError("'context' argument may not be None") + current = current_context() if current is not context: From 2e826cd80c97cbdcec3e600b802c43ec27263e39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jostein=20Kj=C3=B8nigsen?= Date: Tue, 31 Mar 2020 16:50:48 +0200 Subject: [PATCH 354/382] Improve TURN documentation. (#7167) --- changelog.d/7167.doc | 1 + docs/turn-howto.md | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/7167.doc diff --git a/changelog.d/7167.doc b/changelog.d/7167.doc new file mode 100644 index 0000000000..a7e7ba9b51 --- /dev/null +++ b/changelog.d/7167.doc @@ -0,0 +1 @@ +Improve README.md by being explicit about public IP recommendation for TURN relaying. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index 1bd3943f54..b26e41f19e 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -11,6 +11,13 @@ TURN server. The following sections describe how to install [coturn]() (which implements the TURN REST API) and integrate it with synapse. +## Requirements + +For TURN relaying with `coturn` to work, it must be hosted on a server/endpoint with a public IP. + +Hosting TURN behind a NAT (even with appropriate port forwarding) is known to cause issues +and to often not work. + ## `coturn` Setup ### Initial installation From cfe8c8ab8e412b6320e5963ced0670fbc7b00d1b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:24:06 +0100 Subject: [PATCH 355/382] Remove unused `start_background_update` This was only used in a unit test, so let's just inline it in the test. --- synapse/storage/background_updates.py | 21 --------------------- tests/storage/test_background_update.py | 14 +++++++++----- 2 files changed, 9 insertions(+), 26 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index eb1a7e5002..d4e26eab6c 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -400,27 +400,6 @@ class BackgroundUpdater(object): self.register_background_update_handler(update_name, updater) - def start_background_update(self, update_name, progress): - """Starts a background update running. - - Args: - update_name: The update to set running. - progress: The initial state of the progress of the update. - - Returns: - A deferred that completes once the task has been added to the - queue. - """ - # Clear the background update queue so that we will pick up the new - # task on the next iteration of do_background_update. - self._background_update_queue = [] - progress_json = json.dumps(progress) - - return self.db.simple_insert( - "background_updates", - {"update_name": update_name, "progress_json": progress_json}, - ) - def _end_background_update(self, update_name): """Removes a completed background update task from the queue. diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index ae14fb407d..aca41eb215 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -25,12 +25,20 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): # the target runtime for each bg update target_background_update_duration_ms = 50000 + store = self.hs.get_datastore() + self.get_success( + store.db.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + # first step: make a bit of progress @defer.inlineCallbacks def update(progress, count): yield self.clock.sleep((count * duration_ms) / 1000) progress = {"my_key": progress["my_key"] + 1} - yield self.hs.get_datastore().db.runInteraction( + yield store.db.runInteraction( "update_progress", self.updates._background_update_progress_txn, "test_update", @@ -39,10 +47,6 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): return count self.update_handler.side_effect = update - - self.get_success( - self.updates.start_background_update("test_update", {"my_key": 1}) - ) self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update( From 26d17b9bdc0de51d5f1a7526e8ab70e7f7796e4d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:25:10 +0100 Subject: [PATCH 356/382] Make `has_completed_background_updates` async (Almost) everywhere that uses it is happy with an awaitable. --- synapse/storage/background_updates.py | 7 +++---- tests/storage/test_background_update.py | 4 +++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index d4e26eab6c..79494bcdf5 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -119,12 +119,11 @@ class BackgroundUpdater(object): self._all_done = True return None - @defer.inlineCallbacks - def has_completed_background_updates(self): + async def has_completed_background_updates(self) -> bool: """Check if all the background updates have completed Returns: - Deferred[bool]: True if all background updates have completed + True if all background updates have completed """ # if we've previously determined that there is nothing left to do, that # is easy @@ -138,7 +137,7 @@ class BackgroundUpdater(object): # otherwise, check if there are updates to be run. This is important, # as we may be running on a worker which doesn't perform the bg updates # itself, but still wants to wait for them to happen. - updates = yield self.db.simple_select_onecol( + updates = await self.db.simple_select_onecol( "background_updates", keyvalues=None, retcol="1", diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index aca41eb215..e578de8acf 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -11,7 +11,9 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.updates = self.hs.get_datastore().db.updates # type: BackgroundUpdater # the base test class should have run the real bg updates for us - self.assertTrue(self.updates.has_completed_background_updates()) + self.assertTrue( + self.get_success(self.updates.has_completed_background_updates()) + ) self.update_handler = Mock() self.updates.register_background_update_handler( From 51f4d52cb4663a056372d779b78488aeae45f554 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:27:56 +0100 Subject: [PATCH 357/382] Set a logging context while running the bg updates This mostly just reduces the amount of "running from sentinel context" spam during unittest setup. --- tests/unittest.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/unittest.py b/tests/unittest.py index d0406ca2fd..27af5228fe 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -40,6 +40,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import ( SENTINEL_CONTEXT, + LoggingContext, current_context, set_current_context, ) @@ -419,15 +420,17 @@ class HomeserverTestCase(TestCase): config_obj.parse_config_dict(config, "", "") kwargs["config"] = config_obj + async def run_bg_updates(): + with LoggingContext("run_bg_updates", request="run_bg_updates-1"): + while not await stor.db.updates.has_completed_background_updates(): + await stor.db.updates.do_next_background_update(1) + hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) stor = hs.get_datastore() # Run the database background updates, when running against "master". if hs.__class__.__name__ == "TestHomeServer": - while not self.get_success( - stor.db.updates.has_completed_background_updates() - ): - self.get_success(stor.db.updates.do_next_background_update(1)) + self.get_success(run_bg_updates()) return hs From b4c22342320d7de86c02dfb36415a38c62bec88d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:31:32 +0100 Subject: [PATCH 358/382] Make do_next_background_update return a bool returning a None or an int that we don't use is confusing. --- synapse/storage/background_updates.py | 12 +++++------- tests/storage/test_background_update.py | 6 +++--- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 79494bcdf5..4a59132bf3 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -111,7 +111,7 @@ class BackgroundUpdater(object): except Exception: logger.exception("Error doing update") else: - if result is None: + if result: logger.info( "No more background updates to do." " Unscheduling background update task." @@ -169,9 +169,7 @@ class BackgroundUpdater(object): return not update_exists - async def do_next_background_update( - self, desired_duration_ms: float - ) -> Optional[int]: + async def do_next_background_update(self, desired_duration_ms: float) -> bool: """Does some amount of work on the next queued background update Returns once some amount of work is done. @@ -180,7 +178,7 @@ class BackgroundUpdater(object): desired_duration_ms(float): How long we want to spend updating. Returns: - None if there is no more work to do, otherwise an int + True if there is no more work to do, otherwise False """ if not self._background_update_queue: updates = await self.db.simple_select_list( @@ -195,14 +193,14 @@ class BackgroundUpdater(object): if not self._background_update_queue: # no work left to do - return None + return True # pop from the front, and add back to the back update_name = self._background_update_queue.pop(0) self._background_update_queue.append(update_name) res = await self._do_background_update(update_name, desired_duration_ms) - return res + return False async def _do_background_update( self, update_name: str, desired_duration_ms: float diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index e578de8acf..940b166129 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -56,7 +56,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): ), by=0.1, ) - self.assertIsNotNone(res) + self.assertFalse(res) # on the first call, we should get run with the default background update size self.update_handler.assert_called_once_with( @@ -79,7 +79,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): result = self.get_success( self.updates.do_next_background_update(target_background_update_duration_ms) ) - self.assertIsNotNone(result) + self.assertFalse(result) self.update_handler.assert_called_once() # third step: we don't expect to be called any more @@ -87,5 +87,5 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): result = self.get_success( self.updates.do_next_background_update(target_background_update_duration_ms) ) - self.assertIsNone(result) + self.assertTrue(result) self.assertFalse(self.update_handler.called) From 7b608cf4683c0df2dbb55aacd472c407a0f6b1fa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:43:19 +0100 Subject: [PATCH 359/382] Only run one background update at a time --- synapse/storage/background_updates.py | 74 ++++++++++++------- synapse/storage/prepare_database.py | 2 +- .../delta/58/00background_update_ordering.sql | 19 +++++ 3 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 synapse/storage/schema/delta/58/00background_update_ordering.sql diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 4a59132bf3..0e430356cd 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -90,8 +90,10 @@ class BackgroundUpdater(object): self._clock = hs.get_clock() self.db = database + # if a background update is currently running, its name. + self._current_background_update = None # type: Optional[str] + self._background_update_performance = {} - self._background_update_queue = [] self._background_update_handlers = {} self._all_done = False @@ -131,7 +133,7 @@ class BackgroundUpdater(object): return True # obviously, if we have things in our queue, we're not done. - if self._background_update_queue: + if self._current_background_update: return False # otherwise, check if there are updates to be run. This is important, @@ -152,11 +154,10 @@ class BackgroundUpdater(object): async def has_completed_background_update(self, update_name) -> bool: """Check if the given background update has finished running. """ - if self._all_done: return True - if update_name in self._background_update_queue: + if update_name == self._current_background_update: return False update_exists = await self.db.simple_select_one_onecol( @@ -180,31 +181,49 @@ class BackgroundUpdater(object): Returns: True if there is no more work to do, otherwise False """ - if not self._background_update_queue: - updates = await self.db.simple_select_list( - "background_updates", - keyvalues=None, - retcols=("update_name", "depends_on"), + + def get_background_updates_txn(txn): + txn.execute( + """ + SELECT update_name, depends_on FROM background_updates + ORDER BY ordering, update_name + """ ) - in_flight = {update["update_name"] for update in updates} - for update in updates: - if update["depends_on"] not in in_flight: - self._background_update_queue.append(update["update_name"]) + return self.db.cursor_to_dict(txn) - if not self._background_update_queue: - # no work left to do - return True + if not self._current_background_update: + all_pending_updates = await self.db.runInteraction( + "background_updates", get_background_updates_txn, + ) + if not all_pending_updates: + # no work left to do + return True - # pop from the front, and add back to the back - update_name = self._background_update_queue.pop(0) - self._background_update_queue.append(update_name) + # find the first update which isn't dependent on another one in the queue. + pending = {update["update_name"] for update in all_pending_updates} + for upd in all_pending_updates: + depends_on = upd["depends_on"] + if not depends_on or depends_on not in pending: + break + logger.info( + "Not starting on bg update %s until %s is done", + upd["update_name"], + depends_on, + ) + else: + # if we get to the end of that for loop, there is a problem + raise Exception( + "Unable to find a background update which doesn't depend on " + "another: dependency cycle?" + ) - res = await self._do_background_update(update_name, desired_duration_ms) + self._current_background_update = upd["update_name"] + + await self._do_background_update(desired_duration_ms) return False - async def _do_background_update( - self, update_name: str, desired_duration_ms: float - ) -> int: + async def _do_background_update(self, desired_duration_ms: float) -> int: + update_name = self._current_background_update logger.info("Starting update batch on background update '%s'", update_name) update_handler = self._background_update_handlers[update_name] @@ -405,9 +424,12 @@ class BackgroundUpdater(object): Returns: A deferred that completes once the task is removed. """ - self._background_update_queue = [ - name for name in self._background_update_queue if name != update_name - ] + if update_name != self._current_background_update: + raise Exception( + "Cannot end background update %s which isn't currently running" + % update_name + ) + self._current_background_update = None return self.db.simple_delete_one( "background_updates", keyvalues={"update_name": update_name} ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 6cb7d4b922..1712932f31 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 57 +SCHEMA_VERSION = 58 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/58/00background_update_ordering.sql b/synapse/storage/schema/delta/58/00background_update_ordering.sql new file mode 100644 index 0000000000..02dae587cc --- /dev/null +++ b/synapse/storage/schema/delta/58/00background_update_ordering.sql @@ -0,0 +1,19 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* add an "ordering" column to background_updates, which can be used to sort them + to achieve some level of consistency. */ + +ALTER TABLE background_updates ADD COLUMN ordering INT NOT NULL DEFAULT 0; From b413ab8aa64a3e1a01db8e3e6bce0c486f916618 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 31 Mar 2020 17:44:36 +0100 Subject: [PATCH 360/382] changelog --- changelog.d/7190.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/7190.misc diff --git a/changelog.d/7190.misc b/changelog.d/7190.misc new file mode 100644 index 0000000000..34348873f1 --- /dev/null +++ b/changelog.d/7190.misc @@ -0,0 +1 @@ +Only run one background database update at a time. From dfa07822542da96b93ef9d871d43bf1a36dc4664 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Apr 2020 10:40:46 +0100 Subject: [PATCH 361/382] Remove connections per replication stream metric. (#7195) This broke in a recent PR (#7024) and is no longer useful due to all replication clients implicitly subscribing to all streams, so let's just remove it. --- changelog.d/7195.misc | 1 + synapse/replication/tcp/resource.py | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/7195.misc diff --git a/changelog.d/7195.misc b/changelog.d/7195.misc new file mode 100644 index 0000000000..676f285377 --- /dev/null +++ b/changelog.d/7195.misc @@ -0,0 +1 @@ +Move catchup of replication streams logic to worker. diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 8b6067e20d..30021ee309 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -99,22 +99,6 @@ class ReplicationStreamer(object): self.streams_by_name = {stream.NAME: stream for stream in self.streams} - LaterGauge( - "synapse_replication_tcp_resource_connections_per_stream", - "", - ["stream_name"], - lambda: { - (stream_name,): len( - [ - conn - for conn in self.connections - if stream_name in conn.replication_streams - ] - ) - for stream_name in self.streams_by_name - }, - ) - self.federation_sender = None if not hs.config.send_federation: self.federation_sender = hs.get_federation_sender() From 250f87d0dec15f33fced7d06252e27d9c258b90c Mon Sep 17 00:00:00 2001 From: siroccal <41478263+siroccal@users.noreply.github.com> Date: Wed, 1 Apr 2020 13:44:51 +0200 Subject: [PATCH 362/382] Update postgres.md (#7119) --- changelog.d/7119.doc | 1 + docs/postgres.md | 28 +++++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7119.doc diff --git a/changelog.d/7119.doc b/changelog.d/7119.doc new file mode 100644 index 0000000000..05192966c3 --- /dev/null +++ b/changelog.d/7119.doc @@ -0,0 +1 @@ +Update postgres docs with login troubleshooting information. \ No newline at end of file diff --git a/docs/postgres.md b/docs/postgres.md index 04aa746051..70fe29cdcc 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -61,7 +61,33 @@ Note that the PostgreSQL database *must* have the correct encoding set You may need to enable password authentication so `synapse_user` can connect to the database. See -. +. + +If you get an error along the lines of `FATAL: Ident authentication failed for +user "synapse_user"`, you may need to use an authentication method other than +`ident`: + +* If the `synapse_user` user has a password, add the password to the `database:` + section of `homeserver.yaml`. Then add the following to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that + ``` + +* If the `synapse_user` user does not have a password, then a password doesn't + have to be added to `homeserver.yaml`. But the following does need to be added + to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 trust + ``` + +Note that line order matters in `pg_hba.conf`, so make sure that if you do add a +new line, it is inserted before: + +``` +host all all ::1/128 ident +``` ### Fixing incorrect `COLLATE` or `CTYPE` From 468dcc767bf379ba2b4ed4b6d1c6537473175eab Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Apr 2020 08:27:05 -0400 Subject: [PATCH 363/382] Allow admins to create aliases when they are not in the room (#7191) --- changelog.d/7191.feature | 1 + synapse/handlers/directory.py | 6 +++- tests/handlers/test_directory.py | 62 ++++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7191.feature diff --git a/changelog.d/7191.feature b/changelog.d/7191.feature new file mode 100644 index 0000000000..83d5685bb2 --- /dev/null +++ b/changelog.d/7191.feature @@ -0,0 +1 @@ +Admin users are no longer required to be in a room to create an alias for it. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1d842c369b..53e5f585d9 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -127,7 +127,11 @@ class DirectoryHandler(BaseHandler): errcode=Codes.EXCLUSIVE, ) else: - if self.require_membership and check_membership: + # Server admins are not subject to the same constraints as normal + # users when creating an alias (e.g. being in the room). + is_admin = yield self.auth.is_server_admin(requester.user) + + if (self.require_membership and check_membership) and not is_admin: rooms_for_user = yield self.store.get_rooms_for_user(user_id) if room_id not in rooms_for_user: raise AuthError( diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 5e40adba52..00bb776271 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -102,6 +102,68 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) +class TestCreateAlias(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.handler = hs.get_handlers().directory_handler + + # Create user + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create a test room + self.room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.test_alias = "#test:test" + self.room_alias = RoomAlias.from_string(self.test_alias) + + # Create a test user. + self.test_user = self.register_user("user", "pass", admin=False) + self.test_user_tok = self.login("user", "pass") + self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) + + def test_create_alias_joined_room(self): + """A user can create an alias for a room they're in.""" + self.get_success( + self.handler.create_association( + create_requester(self.test_user), self.room_alias, self.room_id, + ) + ) + + def test_create_alias_other_room(self): + """A user cannot create an alias for a room they're NOT in.""" + other_room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + + self.get_failure( + self.handler.create_association( + create_requester(self.test_user), self.room_alias, other_room_id, + ), + synapse.api.errors.SynapseError, + ) + + def test_create_alias_admin(self): + """An admin can create an alias for a room they're NOT in.""" + other_room_id = self.helper.create_room_as( + self.test_user, tok=self.test_user_tok + ) + + self.get_success( + self.handler.create_association( + create_requester(self.admin_user), self.room_alias, other_room_id, + ) + ) + + class TestDeleteAlias(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, From b9930d24a05e47c36845d8607b12a45eea889be0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Apr 2020 08:48:00 -0400 Subject: [PATCH 364/382] Support SAML in the user interactive authentication workflow. (#7102) --- CHANGES.md | 8 ++ changelog.d/7102.feature | 1 + synapse/api/constants.py | 1 + synapse/handlers/auth.py | 116 +++++++++++++++++++- synapse/handlers/saml_handler.py | 51 +++++++-- synapse/res/templates/sso_auth_confirm.html | 14 +++ synapse/rest/client/v2_alpha/account.py | 19 +++- synapse/rest/client/v2_alpha/auth.py | 42 +++---- synapse/rest/client/v2_alpha/devices.py | 12 +- synapse/rest/client/v2_alpha/keys.py | 6 +- synapse/rest/client/v2_alpha/register.py | 1 + 11 files changed, 227 insertions(+), 44 deletions(-) create mode 100644 changelog.d/7102.feature create mode 100644 synapse/res/templates/sso_auth_confirm.html diff --git a/CHANGES.md b/CHANGES.md index f794c585b7..b997af1630 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,11 @@ +Next version +============ + +* A new template (`sso_auth_confirm.html`) was added to Synapse. If your Synapse + is configured to use SSO and a custom `sso_redirect_confirm_template_dir` + configuration then this template will need to be duplicated into that + directory. + Synapse 1.12.0 (2020-03-23) =========================== diff --git a/changelog.d/7102.feature b/changelog.d/7102.feature new file mode 100644 index 0000000000..01057aa396 --- /dev/null +++ b/changelog.d/7102.feature @@ -0,0 +1 @@ +Support SSO in the user interactive authentication workflow. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index cc8577552b..fda2c2e5bb 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -61,6 +61,7 @@ class LoginType(object): MSISDN = "m.login.msisdn" RECAPTCHA = "m.login.recaptcha" TERMS = "m.login.terms" + SSO = "org.matrix.login.sso" DUMMY = "m.login.dummy" # Only for C/S API v1 diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2ce1425dfa..7c09d15a72 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -53,6 +53,31 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +SUCCESS_TEMPLATE = """ + + +Success! + + + + + +
+

Thank you

+

You may now close this window and return to the application

+
+ + +""" + + class AuthHandler(BaseHandler): SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 @@ -91,6 +116,7 @@ class AuthHandler(BaseHandler): self.hs = hs # FIXME better possibility to access registrationHandler later? self.macaroon_gen = hs.get_macaroon_generator() self._password_enabled = hs.config.password_enabled + self._saml2_enabled = hs.config.saml2_enabled # we keep this as a list despite the O(N^2) implication so that we can # keep PASSWORD first and avoid confusing clients which pick the first @@ -106,6 +132,13 @@ class AuthHandler(BaseHandler): if t not in login_types: login_types.append(t) self._supported_login_types = login_types + # Login types and UI Auth types have a heavy overlap, but are not + # necessarily identical. Login types have SSO (and other login types) + # added in the rest layer, see synapse.rest.client.v1.login.LoginRestServerlet.on_GET. + ui_auth_types = login_types.copy() + if self._saml2_enabled: + ui_auth_types.append(LoginType.SSO) + self._supported_ui_auth_types = ui_auth_types # Ratelimiter for failed auth during UIA. Uses same ratelimit config # as per `rc_login.failed_attempts`. @@ -113,10 +146,21 @@ class AuthHandler(BaseHandler): self._clock = self.hs.get_clock() - # Load the SSO redirect confirmation page HTML template + # Load the SSO HTML templates. + + # The following template is shown to the user during a client login via SSO, + # after the SSO completes and before redirecting them back to their client. + # It notifies the user they are about to give access to their matrix account + # to the client. self._sso_redirect_confirm_template = load_jinja2_templates( hs.config.sso_redirect_confirm_template_dir, ["sso_redirect_confirm.html"], )[0] + # The following template is shown during user interactive authentication + # in the fallback auth scenario. It notifies the user that they are + # authenticating for an operation to occur on their account. + self._sso_auth_confirm_template = load_jinja2_templates( + hs.config.sso_redirect_confirm_template_dir, ["sso_auth_confirm.html"], + )[0] self._server_name = hs.config.server_name @@ -130,6 +174,7 @@ class AuthHandler(BaseHandler): request: SynapseRequest, request_body: Dict[str, Any], clientip: str, + description: str, ): """ Checks that the user is who they claim to be, via a UI auth. @@ -147,6 +192,9 @@ class AuthHandler(BaseHandler): clientip: The IP address of the client. + description: A human readable string to be displayed to the user that + describes the operation happening on their account. + Returns: defer.Deferred[dict]: the parameters for this request (which may have been given only in a previous call). @@ -175,11 +223,11 @@ class AuthHandler(BaseHandler): ) # build a list of supported flows - flows = [[login_type] for login_type in self._supported_login_types] + flows = [[login_type] for login_type in self._supported_ui_auth_types] try: result, params, _ = yield self.check_auth( - flows, request, request_body, clientip + flows, request, request_body, clientip, description ) except LoginError: # Update the ratelimite to say we failed (`can_do_action` doesn't raise). @@ -193,7 +241,7 @@ class AuthHandler(BaseHandler): raise # find the completed login type - for login_type in self._supported_login_types: + for login_type in self._supported_ui_auth_types: if login_type not in result: continue @@ -224,6 +272,7 @@ class AuthHandler(BaseHandler): request: SynapseRequest, clientdict: Dict[str, Any], clientip: str, + description: str, ): """ Takes a dictionary sent by the client in the login / registration @@ -250,6 +299,9 @@ class AuthHandler(BaseHandler): clientip: The IP address of the client. + description: A human readable string to be displayed to the user that + describes the operation happening on their account. + Returns: defer.Deferred[dict, dict, str]: a deferred tuple of (creds, params, session_id). @@ -299,12 +351,18 @@ class AuthHandler(BaseHandler): comparator = (request.uri, request.method, clientdict) if "ui_auth" not in session: session["ui_auth"] = comparator + self._save_session(session) elif session["ui_auth"] != comparator: raise SynapseError( 403, "Requested operation has changed during the UI authentication session.", ) + # Add a human readable description to the session. + if "description" not in session: + session["description"] = description + self._save_session(session) + if not authdict: raise InteractiveAuthIncompleteError( self._auth_dict_for_flows(flows, session) @@ -991,6 +1049,56 @@ class AuthHandler(BaseHandler): else: return defer.succeed(False) + def start_sso_ui_auth(self, redirect_url: str, session_id: str) -> str: + """ + Get the HTML for the SSO redirect confirmation page. + + Args: + redirect_url: The URL to redirect to the SSO provider. + session_id: The user interactive authentication session ID. + + Returns: + The HTML to render. + """ + session = self._get_session_info(session_id) + # Get the human readable operation of what is occurring, falling back to + # a generic message if it isn't available for some reason. + description = session.get("description", "modify your account") + return self._sso_auth_confirm_template.render( + description=description, redirect_url=redirect_url, + ) + + def complete_sso_ui_auth( + self, registered_user_id: str, session_id: str, request: SynapseRequest, + ): + """Having figured out a mxid for this user, complete the HTTP request + + Args: + registered_user_id: The registered user ID to complete SSO login for. + request: The request to complete. + client_redirect_url: The URL to which to redirect the user at the end of the + process. + """ + # Mark the stage of the authentication as successful. + sess = self._get_session_info(session_id) + if "creds" not in sess: + sess["creds"] = {} + creds = sess["creds"] + + # Save the user who authenticated with SSO, this will be used to ensure + # that the account be modified is also the person who logged in. + creds[LoginType.SSO] = registered_user_id + self._save_session(sess) + + # Render the HTML and return. + html_bytes = SUCCESS_TEMPLATE.encode("utf8") + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % (len(html_bytes),)) + + request.write(html_bytes) + finish_request(request) + def complete_sso_login( self, registered_user_id: str, diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index dc04b53f43..4741c82f61 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import re -from typing import Tuple +from typing import Optional, Tuple import attr import saml2 @@ -44,11 +44,15 @@ class Saml2SessionData: # time the session was created, in milliseconds creation_time = attr.ib() + # The user interactive authentication session ID associated with this SAML + # session (or None if this SAML session is for an initial login). + ui_auth_session_id = attr.ib(type=Optional[str], default=None) class SamlHandler: def __init__(self, hs): self._saml_client = Saml2Client(hs.config.saml2_sp_config) + self._auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() @@ -77,12 +81,14 @@ class SamlHandler: self._error_html_content = hs.config.saml2_error_html_content - def handle_redirect_request(self, client_redirect_url): + def handle_redirect_request(self, client_redirect_url, ui_auth_session_id=None): """Handle an incoming request to /login/sso/redirect Args: client_redirect_url (bytes): the URL that we should redirect the client to when everything is done + ui_auth_session_id (Optional[str]): The session ID of the ongoing UI Auth (or + None if this is a login). Returns: bytes: URL to redirect to @@ -92,7 +98,9 @@ class SamlHandler: ) now = self._clock.time_msec() - self._outstanding_requests_dict[reqid] = Saml2SessionData(creation_time=now) + self._outstanding_requests_dict[reqid] = Saml2SessionData( + creation_time=now, ui_auth_session_id=ui_auth_session_id, + ) for key, value in info["headers"]: if key == "Location": @@ -119,7 +127,9 @@ class SamlHandler: self.expire_sessions() try: - user_id = await self._map_saml_response_to_user(resp_bytes, relay_state) + user_id, current_session = await self._map_saml_response_to_user( + resp_bytes, relay_state + ) except RedirectException: # Raise the exception as per the wishes of the SAML module response raise @@ -137,9 +147,28 @@ class SamlHandler: finish_request(request) return - self._auth_handler.complete_sso_login(user_id, request, relay_state) + # Complete the interactive auth session or the login. + if current_session and current_session.ui_auth_session_id: + self._auth_handler.complete_sso_ui_auth( + user_id, current_session.ui_auth_session_id, request + ) - async def _map_saml_response_to_user(self, resp_bytes, client_redirect_url): + else: + self._auth_handler.complete_sso_login(user_id, request, relay_state) + + async def _map_saml_response_to_user( + self, resp_bytes: str, client_redirect_url: str + ) -> Tuple[str, Optional[Saml2SessionData]]: + """ + Given a sample response, retrieve the cached session and user for it. + + Args: + resp_bytes: The SAML response. + client_redirect_url: The redirect URL passed in by the client. + + Returns: + Tuple of the user ID and SAML session associated with this response. + """ try: saml2_auth = self._saml_client.parse_authn_request_response( resp_bytes, @@ -167,7 +196,9 @@ class SamlHandler: logger.info("SAML2 mapped attributes: %s", saml2_auth.ava) - self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) + current_session = self._outstanding_requests_dict.pop( + saml2_auth.in_response_to, None + ) remote_user_id = self._user_mapping_provider.get_remote_user_id( saml2_auth, client_redirect_url @@ -188,7 +219,7 @@ class SamlHandler: ) if registered_user_id is not None: logger.info("Found existing mapping %s", registered_user_id) - return registered_user_id + return registered_user_id, current_session # backwards-compatibility hack: see if there is an existing user with a # suitable mapping from the uid @@ -213,7 +244,7 @@ class SamlHandler: await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id + return registered_user_id, current_session # Map saml response to user attributes using the configured mapping provider for i in range(1000): @@ -260,7 +291,7 @@ class SamlHandler: await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) - return registered_user_id + return registered_user_id, current_session def expire_sessions(self): expire_before = self._clock.time_msec() - self._saml2_session_lifetime diff --git a/synapse/res/templates/sso_auth_confirm.html b/synapse/res/templates/sso_auth_confirm.html new file mode 100644 index 0000000000..0d9de9d465 --- /dev/null +++ b/synapse/res/templates/sso_auth_confirm.html @@ -0,0 +1,14 @@ + + + Authentication + + +
+

+ A client is trying to {{ description | e }}. To confirm this action, + re-authenticate with single sign-on. + If you did not expect this, your account may be compromised! +

+
+ + diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f80b5e40ea..31435b1e1c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -234,7 +234,11 @@ class PasswordRestServlet(RestServlet): if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req(request) params = await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "modify your account password", ) user_id = requester.user.to_string() else: @@ -244,6 +248,7 @@ class PasswordRestServlet(RestServlet): request, body, self.hs.get_ip_from_request(request), + "modify your account password", ) if LoginType.EMAIL_IDENTITY in result: @@ -311,7 +316,11 @@ class DeactivateAccountRestServlet(RestServlet): return 200, {} await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "deactivate your account", ) result = await self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase, id_server=body.get("id_server") @@ -669,7 +678,11 @@ class ThreepidAddRestServlet(RestServlet): assert_valid_client_secret(client_secret) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "add a third-party identifier to your account", ) validation_session = await self.identity_handler.validate_threepid_session( diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 85cf5a14c6..1787562b90 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -18,6 +18,7 @@ import logging from synapse.api.constants import LoginType from synapse.api.errors import SynapseError from synapse.api.urls import CLIENT_API_PREFIX +from synapse.handlers.auth import SUCCESS_TEMPLATE from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string @@ -89,30 +90,6 @@ TERMS_TEMPLATE = """ """ -SUCCESS_TEMPLATE = """ - - -Success! - - - - - -
-

Thank you

-

You may now close this window and return to the application

-
- - -""" - class AuthRestServlet(RestServlet): """ @@ -130,6 +107,11 @@ class AuthRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() + # SSO configuration. + self._saml_enabled = hs.config.saml2_enabled + if self._saml_enabled: + self._saml_handler = hs.get_saml_handler() + def on_GET(self, request, stagetype): session = parse_string(request, "session") if not session: @@ -150,6 +132,15 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } + + elif stagetype == LoginType.SSO and self._saml_enabled: + # Display a confirmation page which prompts the user to + # re-authenticate with their SSO provider. + client_redirect_url = "" + sso_redirect_url = self._saml_handler.handle_redirect_request( + client_redirect_url, session + ) + html = self.auth_handler.start_sso_ui_auth(sso_redirect_url, session) else: raise SynapseError(404, "Unknown auth stage type") @@ -210,6 +201,9 @@ class AuthRestServlet(RestServlet): "myurl": "%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), } + elif stagetype == LoginType.SSO: + # The SSO fallback workflow should not post here, + raise SynapseError(404, "Fallback SSO auth does not support POST requests.") else: raise SynapseError(404, "Unknown auth stage type") diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 119d979052..c0714fcfb1 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -81,7 +81,11 @@ class DeleteDevicesRestServlet(RestServlet): assert_params_in_dict(body, ["devices"]) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "remove device(s) from your account", ) await self.device_handler.delete_devices( @@ -127,7 +131,11 @@ class DeviceRestServlet(RestServlet): raise await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "remove a device from your account", ) await self.device_handler.delete_device(requester.user.to_string(), device_id) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 5eb7ef35a4..8f41a3edbf 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -263,7 +263,11 @@ class SigningKeyUploadServlet(RestServlet): body = parse_json_object_from_request(request) await self.auth_handler.validate_user_via_ui_auth( - requester, request, body, self.hs.get_ip_from_request(request), + requester, + request, + body, + self.hs.get_ip_from_request(request), + "add a device signing key to your account", ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 66fc8ec179..431ecf4f84 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -505,6 +505,7 @@ class RegisterRestServlet(RestServlet): request, body, self.hs.get_ip_from_request(request), + "register a new account", ) # Check that we're not trying to register a denied 3pid. From 529462b5c044f7f7491fd41ab7d0682d01a6236b Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 2 Apr 2020 11:30:49 +0100 Subject: [PATCH 365/382] 1.12.1 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a77768de58..e5608baa11 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.12.1 (2020-04-02) +=========================== + +No significant changes since 1.12.1rc1. + + Synapse 1.12.1rc1 (2020-03-31) ============================== diff --git a/debian/changelog b/debian/changelog index 39ec9da7ab..8e14e59c0d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.12.1) stable; urgency=medium + + * New synapse release 1.12.1. + + -- Synapse Packaging team Mon, 02 Apr 2020 11:30:47 +0000 + matrix-synapse-py3 (1.12.0) stable; urgency=medium * New synapse release 1.12.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index c3c5b20f11..5df7d51ab1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.1rc1" +__version__ = "1.12.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From af47264b78c33698f6a70ce1ce3d32774d65de72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 2 Apr 2020 12:04:55 +0100 Subject: [PATCH 366/382] review comment --- synapse/storage/background_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 0e430356cd..510963eb7f 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -132,7 +132,7 @@ class BackgroundUpdater(object): if self._all_done: return True - # obviously, if we have things in our queue, we're not done. + # obviously, if we are currently processing an update, we're not done. if self._current_background_update: return False From b730480abbb31ce0e9c9957ef30dfcd0c646a1b1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 2 Apr 2020 11:30:49 +0100 Subject: [PATCH 367/382] 1.12.1 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a77768de58..e5608baa11 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.12.1 (2020-04-02) +=========================== + +No significant changes since 1.12.1rc1. + + Synapse 1.12.1rc1 (2020-03-31) ============================== diff --git a/debian/changelog b/debian/changelog index 39ec9da7ab..8e14e59c0d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.12.1) stable; urgency=medium + + * New synapse release 1.12.1. + + -- Synapse Packaging team Mon, 02 Apr 2020 11:30:47 +0000 + matrix-synapse-py3 (1.12.0) stable; urgency=medium * New synapse release 1.12.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index c3c5b20f11..5df7d51ab1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.1rc1" +__version__ = "1.12.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From ec56620ff66aa06953b2d675e71df177ef7f375d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 2 Apr 2020 18:08:03 +0100 Subject: [PATCH 368/382] Pin Pillow>=4.3.0,<7.1.0 to fix dep issue --- synapse/python_dependencies.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 8de8cb2c12..3274eb9863 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -61,7 +61,9 @@ REQUIREMENTS = [ "pyasn1-modules>=0.0.7", "daemonize>=2.3.1", "bcrypt>=3.1.0", - "pillow>=4.3.0", + # Pillow 7.1.0 causes the following issue on debian buster: + # https://github.com/python-pillow/Pillow/issues/2377 + "pillow>=4.3.0,<7.1.0", "sortedcontainers>=1.4.4", "pymacaroons>=0.13.0", "msgpack>=0.5.2", From 08edefe694a59294aae1f24df31ec09979987a95 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 2 Apr 2020 19:02:45 +0100 Subject: [PATCH 369/382] 1.12.2 --- CHANGES.md | 10 ++++++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e5608baa11..1ac3c60043 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.12.2 (2020-04-02) +=========================== + +This release fixes [an +issue](https://github.com/matrix-org/synapse/issues/7208) with building the +debian packages. + +No other significant changes since 1.12.1. + + Synapse 1.12.1 (2020-04-02) =========================== diff --git a/debian/changelog b/debian/changelog index 8e14e59c0d..03b30cd12f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.12.2) stable; urgency=medium + + * New synapse release 1.12.2. + + -- Synapse Packaging team Mon, 02 Apr 2020 19:02:17 +0000 + matrix-synapse-py3 (1.12.1) stable; urgency=medium * New synapse release 1.12.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5df7d51ab1..bdad75113d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.1" +__version__ = "1.12.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From f7d6e849b35bef390c5234d1309a301dd04f8330 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 2 Apr 2020 19:08:06 +0100 Subject: [PATCH 370/382] Fix changelog wording --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1ac3c60043..5cec3d817d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.12.2 (2020-04-02) =========================== -This release fixes [an +This release works around [an issue](https://github.com/matrix-org/synapse/issues/7208) with building the debian packages. From 6d7cec7a57ca258bcf28e7eb174d970670f7a652 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 10:23:36 +0100 Subject: [PATCH 371/382] Fix the debian build in a better way. (#7212) --- changelog.d/7212.misc | 1 + debian/changelog | 7 +++++++ debian/rules | 33 +++++++++++++++++++++++++++------ synapse/python_dependencies.py | 4 +--- 4 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 changelog.d/7212.misc diff --git a/changelog.d/7212.misc b/changelog.d/7212.misc new file mode 100644 index 0000000000..b57fc5f288 --- /dev/null +++ b/changelog.d/7212.misc @@ -0,0 +1 @@ +Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. diff --git a/debian/changelog b/debian/changelog index 03b30cd12f..6bafe468d9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (1.12.2ubuntu1) UNRELEASED; urgency=medium + + * Update the Debian build scripts to handle the new installation paths + for the support libraries introduced by Pillow 7.1.1. + + -- Richard van der Hoff Thu, 02 Apr 2020 23:18:52 +0100 + matrix-synapse-py3 (1.12.2) stable; urgency=medium * New synapse release 1.12.2. diff --git a/debian/rules b/debian/rules index a4d2ce2ba4..c744060a57 100755 --- a/debian/rules +++ b/debian/rules @@ -15,17 +15,38 @@ override_dh_installinit: # we don't really want to strip the symbols from our object files. override_dh_strip: +# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files +# (executables and shared libs) in the package, and looks for the shared +# libraries that they depend on. It then adds a dependency on the package that +# contains that library to the package. +# +# We make two modifications to that process... +# override_dh_shlibdeps: - # make the postgres package's dependencies a recommendation - # rather than a hard dependency. + # Firstly, postgres is not a hard dependency for us, so we want to make + # the things that psycopg2 depends on (such as libpq) be + # recommendations rather than hard dependencies. We do so by + # running dpkg-shlibdeps manually on psycopg2's libs. + # find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \ xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \ -pshlibs1 -dRecommends - # all the other dependencies can be normal 'Depends' requirements, - # except for PIL's, which is self-contained and which confuses - # dpkg-shlibdeps. - dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2 + # secondly, we exclude PIL's libraries from the process. They are known + # to be self-contained, but they have interdependencies and + # dpkg-shlibdeps doesn't know how to resolve them. + # + # As of Pillow 7.1.0, these libraries are in + # site-packages/Pillow.libs. Previously, they were in + # site-packages/PIL/.libs. + # + # (we also need to exclude psycopg2, of course, since we've already + # dealt with that.) + # + dh_shlibdeps \ + -X site-packages/PIL/.libs \ + -X site-packages/Pillow.libs \ + -X site-packages/psycopg2 override_dh_virtualenv: ./debian/build_virtualenv diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 3274eb9863..8de8cb2c12 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -61,9 +61,7 @@ REQUIREMENTS = [ "pyasn1-modules>=0.0.7", "daemonize>=2.3.1", "bcrypt>=3.1.0", - # Pillow 7.1.0 causes the following issue on debian buster: - # https://github.com/python-pillow/Pillow/issues/2377 - "pillow>=4.3.0,<7.1.0", + "pillow>=4.3.0", "sortedcontainers>=1.4.4", "pymacaroons>=0.13.0", "msgpack>=0.5.2", From daa1ac89a0be4dd3cc941da4caeb2ddcbd701eff Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 10:40:22 +0100 Subject: [PATCH 372/382] Fix device list update stream ids going backward (#7158) Occasionally we could get a federation device list update transaction which looked like: ``` [ {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D2', 'prev_id': [], 'stream_id': 12, 'deleted': True}}, {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D1', 'prev_id': [12], 'stream_id': 11, 'deleted': True}}, {'edu_type': 'm.device_list_update', 'content': {'user_id': '@user:test', 'device_id': 'D3', 'prev_id': [11], 'stream_id': 13, 'deleted': True}} ] ``` Having `stream_ids` which are lower than `prev_ids` looks odd. It might work (I'm not actually sure), but in any case it doesn't seem like a reasonable thing to expect other implementations to support. --- changelog.d/7158.misc | 1 + synapse/storage/data_stores/main/devices.py | 10 ++++++++-- tests/federation/test_federation_sender.py | 6 ++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7158.misc diff --git a/changelog.d/7158.misc b/changelog.d/7158.misc new file mode 100644 index 0000000000..269b8daeb0 --- /dev/null +++ b/changelog.d/7158.misc @@ -0,0 +1 @@ +Fix device list update stream ids going backward. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 20995e1b78..dd3561e9b2 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -165,7 +165,6 @@ class DeviceWorkerStore(SQLBaseStore): # the max stream_id across each set of duplicate entries # # maps (user_id, device_id) -> (stream_id, opentracing_context) - # as long as their stream_id does not match that of the last row # # opentracing_context contains the opentracing metadata for the request # that created the poke @@ -270,7 +269,14 @@ class DeviceWorkerStore(SQLBaseStore): prev_id = yield self._get_last_device_update_for_remote_user( destination, user_id, from_stream_id ) - for device_id, device in iteritems(user_devices): + + # make sure we go through the devices in stream order + device_ids = sorted( + user_devices.keys(), key=lambda i: query_map[(user_id, i)][0], + ) + + for device_id in device_ids: + device = user_devices[device_id] stream_id, opentracing_context = query_map[(user_id, device_id)] result = { "user_id": user_id, diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index a5fe5c6880..33105576af 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -297,6 +297,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): c = edu["content"] if stream_id is not None: self.assertEqual(c["prev_id"], [stream_id]) + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2"}, devices) @@ -330,6 +331,7 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): c.items(), {"user_id": u1, "prev_id": [stream_id], "deleted": True}.items(), ) + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) @@ -366,6 +368,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): self.assertEqual(edu["edu_type"], "m.device_list_update") c = edu["content"] self.assertEqual(c["prev_id"], [stream_id] if stream_id is not None else []) + if stream_id is not None: + self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) @@ -482,6 +486,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): } self.assertLessEqual(expected.items(), content.items()) + if prev_stream_id is not None: + self.assertGreaterEqual(content["stream_id"], prev_stream_id) return content["stream_id"] def check_signing_key_update_txn(self, txn: JsonDict,) -> None: From fcc2de7a0c0c7a02b935c4b35394f228a5d5a304 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 Apr 2020 10:51:32 +0100 Subject: [PATCH 373/382] Update docstring per review comments --- synapse/storage/background_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 510963eb7f..59f3394b0a 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -179,7 +179,7 @@ class BackgroundUpdater(object): desired_duration_ms(float): How long we want to spend updating. Returns: - True if there is no more work to do, otherwise False + True if we have finished running all the background updates, otherwise False """ def get_background_updates_txn(txn): From 29ce90358c06ca2452b2ecb55670103de3557109 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 Apr 2020 10:57:07 +0100 Subject: [PATCH 374/382] 1.12.3 --- CHANGES.md | 9 +++++++++ changelog.d/7212.misc | 1 - debian/changelog | 8 ++++++-- synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/7212.misc diff --git a/CHANGES.md b/CHANGES.md index 5cec3d817d..e9ca767644 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.12.3 (2020-04-03) +=========================== + +Internal Changes +---------------- + +- Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. ([\#7212](https://github.com/matrix-org/synapse/issues/7212)) + + Synapse 1.12.2 (2020-04-02) =========================== diff --git a/changelog.d/7212.misc b/changelog.d/7212.misc deleted file mode 100644 index b57fc5f288..0000000000 --- a/changelog.d/7212.misc +++ /dev/null @@ -1 +0,0 @@ -Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. diff --git a/debian/changelog b/debian/changelog index 6bafe468d9..642115fc5a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,13 @@ -matrix-synapse-py3 (1.12.2ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.12.3) stable; urgency=medium + [ Richard van der Hoff ] * Update the Debian build scripts to handle the new installation paths for the support libraries introduced by Pillow 7.1.1. - -- Richard van der Hoff Thu, 02 Apr 2020 23:18:52 +0100 + [ Synapse Packaging team ] + * New synapse release 1.12.3. + + -- Synapse Packaging team Fri, 03 Apr 2020 10:55:03 +0100 matrix-synapse-py3 (1.12.2) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index bdad75113d..3bf2d02450 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.12.2" +__version__ = "1.12.3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 553c8a9b6b23c6aa79f70e61e9490fd6f97def2e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 Apr 2020 11:00:57 +0100 Subject: [PATCH 375/382] tweak changelog --- CHANGES.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e9ca767644..e78c8e48e7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,11 +1,8 @@ Synapse 1.12.3 (2020-04-03) =========================== -Internal Changes ----------------- - -- Roll back the pin to Pillow 7.0 which was introduced in Synapse 1.12.2. ([\#7212](https://github.com/matrix-org/synapse/issues/7212)) - +- Remove the the pin to Pillow 7.0 which was introduced in Synapse 1.12.2, and +correctly fix the issue with building the Debian packages. ([\#7212](https://github.com/matrix-org/synapse/issues/7212)) Synapse 1.12.2 (2020-04-02) =========================== From 8d4cbdeaa9765ae0d87ec82b053f12ed8162f6f5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 27 Mar 2020 20:18:56 +0000 Subject: [PATCH 376/382] Revert "Improve the UX of the login fallback when using SSO (#7152)" This was incorrectly merged to `master` instead of develop. This reverts commit 90246344e340bce3417fb330da6be9338a701c5c. --- changelog.d/7152.feature | 1 - synapse/static/client/login/index.html | 2 +- synapse/static/client/login/js/login.js | 51 ++++++++++--------------- 3 files changed, 22 insertions(+), 32 deletions(-) delete mode 100644 changelog.d/7152.feature diff --git a/changelog.d/7152.feature b/changelog.d/7152.feature deleted file mode 100644 index fafa79c7e7..0000000000 --- a/changelog.d/7152.feature +++ /dev/null @@ -1 +0,0 @@ -Improve the support for SSO authentication on the login fallback page. diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html index 712b0e3980..bcb6bc6bb7 100644 --- a/synapse/static/client/login/index.html +++ b/synapse/static/client/login/index.html @@ -9,7 +9,7 @@

-

+

Log in with one of the following methods

diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index debe464371..276c271bbe 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -1,41 +1,37 @@ window.matrixLogin = { endpoint: location.origin + "/_matrix/client/r0/login", serverAcceptsPassword: false, + serverAcceptsCas: false, serverAcceptsSso: false, }; -var title_pre_auth = "Log in with one of the following methods"; -var title_post_auth = "Logging in..."; - var submitPassword = function(user, pwd) { console.log("Logging in with password..."); - set_title(title_post_auth); var data = { type: "m.login.password", user: user, password: pwd, }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { + show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var submitToken = function(loginToken) { console.log("Logging in with login token..."); - set_title(title_post_auth); var data = { type: "m.login.token", token: loginToken }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { + show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var errorFunc = function(err) { - // We want to show the error to the user rather than redirecting immediately to the - // SSO portal (if SSO is the only login option), so we inhibit the redirect. - show_login(true); + show_login(); if (err.responseJSON && err.responseJSON.error) { setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); @@ -49,33 +45,26 @@ var setFeedbackString = function(text) { $("#feedback").text(text); }; -var show_login = function(inhibit_redirect) { +var show_login = function() { + $("#loading").hide(); + var this_page = window.location.origin + window.location.pathname; $("#sso_redirect_url").val(this_page); - // If inhibit_redirect is false, and SSO is the only supported login method, we can - // redirect straight to the SSO page - if (matrixLogin.serverAcceptsSso) { - if (!inhibit_redirect && !matrixLogin.serverAcceptsPassword) { - $("#sso_form").submit(); - return; - } - - // Otherwise, show the SSO form - $("#sso_form").show(); - } - if (matrixLogin.serverAcceptsPassword) { $("#password_flow").show(); } - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) { - $("#no_login_types").show(); + if (matrixLogin.serverAcceptsSso) { + $("#sso_flow").show(); + } else if (matrixLogin.serverAcceptsCas) { + $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect"); + $("#sso_flow").show(); } - set_title(title_pre_auth); - - $("#loading").hide(); + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { + $("#no_login_types").show(); + } }; var show_spinner = function() { @@ -85,15 +74,17 @@ var show_spinner = function() { $("#loading").show(); }; -var set_title = function(title) { - $("#title").text(title); -}; var fetch_info = function(cb) { $.get(matrixLogin.endpoint, function(response) { var serverAcceptsPassword = false; + var serverAcceptsCas = false; for (var i=0; i Date: Fri, 27 Mar 2020 20:21:09 +0000 Subject: [PATCH 377/382] Revert "Merge pull request #7153 from matrix-org/babolivier/sso_whitelist_login_fallback" This was incorrectly merged to master. This reverts commit 319c41f573eb14a966367b60b2e6e93bf6b028d9, reversing changes made to 229eb81498b0fe1da81e9b5b333a0285acde9446. --- changelog.d/7153.feature | 1 - docs/sample_config.yaml | 4 ---- synapse/config/sso.py | 15 --------------- tests/rest/client/v1/test_login.py | 9 +-------- 4 files changed, 1 insertion(+), 28 deletions(-) delete mode 100644 changelog.d/7153.feature diff --git a/changelog.d/7153.feature b/changelog.d/7153.feature deleted file mode 100644 index 414ebe1f69..0000000000 --- a/changelog.d/7153.feature +++ /dev/null @@ -1 +0,0 @@ -Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 07e922dc27..2ff0dd05a2 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1392,10 +1392,6 @@ sso: # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # - # If public_baseurl is set, then the login fallback page (used by clients - # that don't natively support the required login flows) is whitelisted in - # addition to any URLs in this list. - # # By default, this list is empty. # #client_whitelist: diff --git a/synapse/config/sso.py b/synapse/config/sso.py index ec3dca9efc..95762689bc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -39,17 +39,6 @@ class SSOConfig(Config): self.sso_client_whitelist = sso_config.get("client_whitelist") or [] - # Attempt to also whitelist the server's login fallback, since that fallback sets - # the redirect URL to itself (so it can process the login token then return - # gracefully to the client). This would make it pointless to ask the user for - # confirmation, since the URL the confirmation page would be showing wouldn't be - # the client's. - # public_baseurl is an optional setting, so we only add the fallback's URL to the - # list if it's provided (because we can't figure out what that URL is otherwise). - if self.public_baseurl: - login_fallback_url = self.public_baseurl + "_matrix/static/client/login" - self.sso_client_whitelist.append(login_fallback_url) - def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. @@ -65,10 +54,6 @@ class SSOConfig(Config): # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # - # If public_baseurl is set, then the login fallback page (used by clients - # that don't natively support the required login flows) is whitelisted in - # addition to any URLs in this list. - # # By default, this list is empty. # #client_whitelist: diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index aed8853d6e..da2c9bfa1e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -350,14 +350,7 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): def test_cas_redirect_whitelisted(self): """Tests that the SSO login flow serves a redirect to a whitelisted url """ - self._test_redirect("https://legit-site.com/") - - @override_config({"public_baseurl": "https://example.com"}) - def test_cas_redirect_login_fallback(self): - self._test_redirect("https://example.com/_matrix/static/client/login") - - def _test_redirect(self, redirect_url): - """Tests that the SSO login flow serves a redirect for the given redirect URL.""" + redirect_url = "https://legit-site.com/" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) From 14a8e7129793be3c7d27ea810b26efe4fba09bc9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 Apr 2020 11:28:43 +0100 Subject: [PATCH 378/382] Revert "Revert "Improve the UX of the login fallback when using SSO (#7152)"" This reverts commit 8d4cbdeaa9765ae0d87ec82b053f12ed8162f6f5. --- changelog.d/7152.feature | 1 + synapse/static/client/login/index.html | 2 +- synapse/static/client/login/js/login.js | 51 +++++++++++++++---------- 3 files changed, 32 insertions(+), 22 deletions(-) create mode 100644 changelog.d/7152.feature diff --git a/changelog.d/7152.feature b/changelog.d/7152.feature new file mode 100644 index 0000000000..fafa79c7e7 --- /dev/null +++ b/changelog.d/7152.feature @@ -0,0 +1 @@ +Improve the support for SSO authentication on the login fallback page. diff --git a/synapse/static/client/login/index.html b/synapse/static/client/login/index.html index bcb6bc6bb7..712b0e3980 100644 --- a/synapse/static/client/login/index.html +++ b/synapse/static/client/login/index.html @@ -9,7 +9,7 @@

-

Log in with one of the following methods

+

diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js index 276c271bbe..debe464371 100644 --- a/synapse/static/client/login/js/login.js +++ b/synapse/static/client/login/js/login.js @@ -1,37 +1,41 @@ window.matrixLogin = { endpoint: location.origin + "/_matrix/client/r0/login", serverAcceptsPassword: false, - serverAcceptsCas: false, serverAcceptsSso: false, }; +var title_pre_auth = "Log in with one of the following methods"; +var title_post_auth = "Logging in..."; + var submitPassword = function(user, pwd) { console.log("Logging in with password..."); + set_title(title_post_auth); var data = { type: "m.login.password", user: user, password: pwd, }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var submitToken = function(loginToken) { console.log("Logging in with login token..."); + set_title(title_post_auth); var data = { type: "m.login.token", token: loginToken }; $.post(matrixLogin.endpoint, JSON.stringify(data), function(response) { - show_login(); matrixLogin.onLogin(response); }).error(errorFunc); }; var errorFunc = function(err) { - show_login(); + // We want to show the error to the user rather than redirecting immediately to the + // SSO portal (if SSO is the only login option), so we inhibit the redirect. + show_login(true); if (err.responseJSON && err.responseJSON.error) { setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); @@ -45,26 +49,33 @@ var setFeedbackString = function(text) { $("#feedback").text(text); }; -var show_login = function() { - $("#loading").hide(); - +var show_login = function(inhibit_redirect) { var this_page = window.location.origin + window.location.pathname; $("#sso_redirect_url").val(this_page); + // If inhibit_redirect is false, and SSO is the only supported login method, we can + // redirect straight to the SSO page + if (matrixLogin.serverAcceptsSso) { + if (!inhibit_redirect && !matrixLogin.serverAcceptsPassword) { + $("#sso_form").submit(); + return; + } + + // Otherwise, show the SSO form + $("#sso_form").show(); + } + if (matrixLogin.serverAcceptsPassword) { $("#password_flow").show(); } - if (matrixLogin.serverAcceptsSso) { - $("#sso_flow").show(); - } else if (matrixLogin.serverAcceptsCas) { - $("#sso_form").attr("action", "/_matrix/client/r0/login/cas/redirect"); - $("#sso_flow").show(); - } - - if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) { + if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsSso) { $("#no_login_types").show(); } + + set_title(title_pre_auth); + + $("#loading").hide(); }; var show_spinner = function() { @@ -74,17 +85,15 @@ var show_spinner = function() { $("#loading").show(); }; +var set_title = function(title) { + $("#title").text(title); +}; var fetch_info = function(cb) { $.get(matrixLogin.endpoint, function(response) { var serverAcceptsPassword = false; - var serverAcceptsCas = false; for (var i=0; i Date: Fri, 3 Apr 2020 11:28:49 +0100 Subject: [PATCH 379/382] Revert "Revert "Merge pull request #7153 from matrix-org/babolivier/sso_whitelist_login_fallback"" This reverts commit 0122ef1037c8bfe826ea09d9fc7cd63fb9c59fd1. --- changelog.d/7153.feature | 1 + docs/sample_config.yaml | 4 ++++ synapse/config/sso.py | 15 +++++++++++++++ tests/rest/client/v1/test_login.py | 9 ++++++++- 4 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7153.feature diff --git a/changelog.d/7153.feature b/changelog.d/7153.feature new file mode 100644 index 0000000000..414ebe1f69 --- /dev/null +++ b/changelog.d/7153.feature @@ -0,0 +1 @@ +Always whitelist the login fallback in the SSO configuration if `public_baseurl` is set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2ff0dd05a2..07e922dc27 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1392,6 +1392,10 @@ sso: # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 95762689bc..ec3dca9efc 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -39,6 +39,17 @@ class SSOConfig(Config): self.sso_client_whitelist = sso_config.get("client_whitelist") or [] + # Attempt to also whitelist the server's login fallback, since that fallback sets + # the redirect URL to itself (so it can process the login token then return + # gracefully to the client). This would make it pointless to ask the user for + # confirmation, since the URL the confirmation page would be showing wouldn't be + # the client's. + # public_baseurl is an optional setting, so we only add the fallback's URL to the + # list if it's provided (because we can't figure out what that URL is otherwise). + if self.public_baseurl: + login_fallback_url = self.public_baseurl + "_matrix/static/client/login" + self.sso_client_whitelist.append(login_fallback_url) + def generate_config_section(self, **kwargs): return """\ # Additional settings to use with single-sign on systems such as SAML2 and CAS. @@ -54,6 +65,10 @@ class SSOConfig(Config): # phishing attacks from evil.site. To avoid this, include a slash after the # hostname: "https://my.client/". # + # If public_baseurl is set, then the login fallback page (used by clients + # that don't natively support the required login flows) is whitelisted in + # addition to any URLs in this list. + # # By default, this list is empty. # #client_whitelist: diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index da2c9bfa1e..aed8853d6e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -350,7 +350,14 @@ class CASRedirectConfirmTestCase(unittest.HomeserverTestCase): def test_cas_redirect_whitelisted(self): """Tests that the SSO login flow serves a redirect to a whitelisted url """ - redirect_url = "https://legit-site.com/" + self._test_redirect("https://legit-site.com/") + + @override_config({"public_baseurl": "https://example.com"}) + def test_cas_redirect_login_fallback(self): + self._test_redirect("https://example.com/_matrix/static/client/login") + + def _test_redirect(self, redirect_url): + """Tests that the SSO login flow serves a redirect for the given redirect URL.""" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" % (urllib.parse.quote(redirect_url)) From bae32740daa5551b6613cafafb5d5bc1a73141ec Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 12:29:30 +0100 Subject: [PATCH 380/382] Remove some `run_in_background` calls in replication code (#7203) By running this stuff with `run_in_background`, it won't be correctly reported against the relevant CPU usage stats. Fixes #7202 --- changelog.d/7203.bugfix | 1 + synapse/app/generic_worker.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 changelog.d/7203.bugfix diff --git a/changelog.d/7203.bugfix b/changelog.d/7203.bugfix new file mode 100644 index 0000000000..8b383952e5 --- /dev/null +++ b/changelog.d/7203.bugfix @@ -0,0 +1 @@ +Fix some worker-mode replication handling not being correctly recorded in CPU usage stats. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1ee266f7c5..174bef360f 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -42,7 +42,7 @@ from synapse.handlers.presence import PresenceHandler, get_interested_parties from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite -from synapse.logging.context import LoggingContext, run_in_background +from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ @@ -635,7 +635,7 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): await super(GenericWorkerReplicationHandler, self).on_rdata( stream_name, token, rows ) - run_in_background(self.process_and_notify, stream_name, token, rows) + await self.process_and_notify(stream_name, token, rows) def get_streams_to_replicate(self): args = super(GenericWorkerReplicationHandler, self).get_streams_to_replicate() @@ -650,7 +650,9 @@ class GenericWorkerReplicationHandler(ReplicationClientHandler): async def process_and_notify(self, stream_name, token, rows): try: if self.send_handler: - self.send_handler.process_replication_rows(stream_name, token, rows) + await self.send_handler.process_replication_rows( + stream_name, token, rows + ) if stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so @@ -782,12 +784,12 @@ class FederationSenderHandler(object): def stream_positions(self): return {"federation": self.federation_position} - def process_replication_rows(self, stream_name, token, rows): + async def process_replication_rows(self, stream_name, token, rows): # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": send_queue.process_rows_for_federation(self.federation_sender, rows) - run_in_background(self.update_token, token) + await self.update_token(token) # We also need to poke the federation sender when new events happen elif stream_name == "events": @@ -795,9 +797,7 @@ class FederationSenderHandler(object): # ... and when new receipts happen elif stream_name == ReceiptsStream.NAME: - run_as_background_process( - "process_receipts_for_federation", self._on_new_receipts, rows - ) + await self._on_new_receipts(rows) # ... as well as device updates and messages elif stream_name == DeviceListsStream.NAME: From 0f05fd15304f1931ef167351de63cc8ffa1d3a98 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 3 Apr 2020 13:21:30 +0100 Subject: [PATCH 381/382] Reduce the number of calls to `resource.getrusage` (#7183) Let's just call `getrusage` once on each logcontext change, rather than twice. --- changelog.d/7183.misc | 1 + synapse/logging/context.py | 102 +++++++++++++++++++++++-------------- 2 files changed, 64 insertions(+), 39 deletions(-) create mode 100644 changelog.d/7183.misc diff --git a/changelog.d/7183.misc b/changelog.d/7183.misc new file mode 100644 index 0000000000..731f4dcb52 --- /dev/null +++ b/changelog.d/7183.misc @@ -0,0 +1 @@ +Clean up some LoggingContext code. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 3254d6a8df..a8f674d13d 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -51,7 +51,7 @@ try: is_thread_resource_usage_supported = True - def get_thread_resource_usage(): + def get_thread_resource_usage() -> "Optional[resource._RUsage]": return resource.getrusage(RUSAGE_THREAD) @@ -60,7 +60,7 @@ except Exception: # won't track resource usage. is_thread_resource_usage_supported = False - def get_thread_resource_usage(): + def get_thread_resource_usage() -> "Optional[resource._RUsage]": return None @@ -201,10 +201,10 @@ class _Sentinel(object): record["request"] = None record["scope"] = None - def start(self): + def start(self, rusage: "Optional[resource._RUsage]"): pass - def stop(self): + def stop(self, rusage: "Optional[resource._RUsage]"): pass def add_database_transaction(self, duration_sec): @@ -261,7 +261,7 @@ class LoggingContext(object): # The thread resource usage when the logcontext became active. None # if the context is not currently active. - self.usage_start = None + self.usage_start = None # type: Optional[resource._RUsage] self.main_thread = get_thread_id() self.request = None @@ -336,7 +336,17 @@ class LoggingContext(object): record["request"] = self.request record["scope"] = self.scope - def start(self) -> None: + def start(self, rusage: "Optional[resource._RUsage]") -> None: + """ + Record that this logcontext is currently running. + + This should not be called directly: use set_current_context + + Args: + rusage: the resources used by the current thread, at the point of + switching to this logcontext. May be None if this platform doesn't + support getrusuage. + """ if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) return @@ -349,36 +359,48 @@ class LoggingContext(object): if self.usage_start: logger.warning("Re-starting already-active log context %s", self) else: - self.usage_start = get_thread_resource_usage() + self.usage_start = rusage - def stop(self) -> None: - if get_thread_id() != self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) - return + def stop(self, rusage: "Optional[resource._RUsage]") -> None: + """ + Record that this logcontext is no longer running. - # When we stop, let's record the cpu used since we started - if not self.usage_start: - # Log a warning on platforms that support thread usage tracking - if is_thread_resource_usage_supported: + This should not be called directly: use set_current_context + + Args: + rusage: the resources used by the current thread, at the point of + switching away from this logcontext. May be None if this platform + doesn't support getrusuage. + """ + + try: + if get_thread_id() != self.main_thread: + logger.warning("Stopped logcontext %s on different thread", self) + return + + if not rusage: + return + + # Record the cpu used since we started + if not self.usage_start: logger.warning( - "Called stop on logcontext %s without calling start", self + "Called stop on logcontext %s without recording a start rusage", + self, ) - return + return - utime_delta, stime_delta = self._get_cputime() - self._resource_usage.ru_utime += utime_delta - self._resource_usage.ru_stime += stime_delta + utime_delta, stime_delta = self._get_cputime(rusage) + self._resource_usage.ru_utime += utime_delta + self._resource_usage.ru_stime += stime_delta - self.usage_start = None + # if we have a parent, pass our CPU usage stats on + if self.parent_context: + self.parent_context._resource_usage += self._resource_usage - # if we have a parent, pass our CPU usage stats on - if self.parent_context is not None and hasattr( - self.parent_context, "_resource_usage" - ): - self.parent_context._resource_usage += self._resource_usage - - # reset them in case we get entered again - self._resource_usage.reset() + # reset them in case we get entered again + self._resource_usage.reset() + finally: + self.usage_start = None def get_resource_usage(self) -> ContextResourceUsage: """Get resources used by this logcontext so far. @@ -394,24 +416,24 @@ class LoggingContext(object): # can include resource usage so far. is_main_thread = get_thread_id() == self.main_thread if self.usage_start and is_main_thread: - utime_delta, stime_delta = self._get_cputime() + rusage = get_thread_resource_usage() + assert rusage is not None + utime_delta, stime_delta = self._get_cputime(rusage) res.ru_utime += utime_delta res.ru_stime += stime_delta return res - def _get_cputime(self) -> Tuple[float, float]: - """Get the cpu usage time so far + def _get_cputime(self, current: "resource._RUsage") -> Tuple[float, float]: + """Get the cpu usage time between start() and the given rusage + + Args: + rusage: the current resource usage Returns: Tuple[float, float]: seconds in user mode, seconds in system mode """ assert self.usage_start is not None - current = get_thread_resource_usage() - - # Indicate to mypy that we know that self.usage_start is None. - assert self.usage_start is not None - utime_delta = current.ru_utime - self.usage_start.ru_utime stime_delta = current.ru_stime - self.usage_start.ru_stime @@ -547,9 +569,11 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe current = current_context() if current is not context: - current.stop() + rusage = get_thread_resource_usage() + current.stop(rusage) _thread_local.current_context = context - context.start() + context.start(rusage) + return current From 07b88c546de1b24f5cbc9b4cb6da98400a8155af Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 3 Apr 2020 14:26:07 +0100 Subject: [PATCH 382/382] Convert http.HTTPStatus objects to their int equivalent (#7188) --- changelog.d/7188.misc | 1 + synapse/api/errors.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7188.misc diff --git a/changelog.d/7188.misc b/changelog.d/7188.misc new file mode 100644 index 0000000000..f72955b95b --- /dev/null +++ b/changelog.d/7188.misc @@ -0,0 +1 @@ +Fix consistency of HTTP status codes reported in log lines. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 11da016ac5..d54dfb385d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -86,7 +86,14 @@ class CodeMessageException(RuntimeError): def __init__(self, code, msg): super(CodeMessageException, self).__init__("%d: %s" % (code, msg)) - self.code = code + + # Some calls to this method pass instances of http.HTTPStatus for `code`. + # While HTTPStatus is a subclass of int, it has magic __str__ methods + # which emit `HTTPStatus.FORBIDDEN` when converted to a str, instead of `403`. + # This causes inconsistency in our log lines. + # + # To eliminate this behaviour, we convert them to their integer equivalents here. + self.code = int(code) self.msg = msg