From 5336acd46fae8f16846970f2cddbeebc2a2b0e81 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 16 Sep 2016 19:02:42 +0100 Subject: [PATCH 01/10] Make public room search case insensitive --- synapse/handlers/room_list.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index f15987b265..9a6bbeb603 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -361,12 +361,12 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", ( def _matches_room_entry(room_entry, search_filter): if search_filter and search_filter.get("generic_search_term", None): - generic_search_term = search_filter["generic_search_term"] - if generic_search_term in room_entry.get("name", ""): + generic_search_term = search_filter["generic_search_term"].upper() + if generic_search_term in room_entry.get("name", "").upper(): return True - elif generic_search_term in room_entry.get("topic", ""): + elif generic_search_term in room_entry.get("topic", "").upper(): return True - elif generic_search_term in room_entry.get("canonical_alias", ""): + elif generic_search_term in room_entry.get("canonical_alias", "").upper(): return True else: return True From 883df2e9831a98d3f2598e52e6369e40c6f14ec5 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 17 Sep 2016 14:12:04 +0100 Subject: [PATCH 02/10] fix logger for client_reader worker --- synapse/app/client_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 9aaa3bace2..9fccc73db3 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -51,7 +51,7 @@ import sys import logging import gc -logger = logging.getLogger("synapse.app.federation_reader") +logger = logging.getLogger("synapse.app.client_reader") class ClientReaderSlavedStore( From 64527f94cc804edbc41f925e498edfc48221767f Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 17 Sep 2016 14:15:10 +0100 Subject: [PATCH 03/10] mention client_reader worker --- docs/workers.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/workers.rst b/docs/workers.rst index 4eb05b0e59..65b6e690f7 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -42,6 +42,7 @@ The current available worker applications are: * synapse.app.appservice - handles output traffic to Application Services * synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API) * synapse.app.media_repository - handles the media repository. + * synapse.app.client_reader - handles client API endpoints like /publicRooms Each worker configuration file inherits the configuration of the main homeserver configuration file. You can then override configuration specific to that worker, From 71edaae9812671bed7ffb7f08347251612cef71f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 17 Sep 2016 14:40:28 +0100 Subject: [PATCH 04/10] Fix and clean up publicRooms pagination --- synapse/handlers/room_list.py | 233 ++++++++++++++++++---------------- 1 file changed, 123 insertions(+), 110 deletions(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index f15987b265..a33b644d0f 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -115,134 +115,63 @@ class RoomListHandler(BaseHandler): sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1]) sorted_rooms = [room_id for room_id, _ in sorted_entries] + # `sorted_rooms` should now be a list of all public room ids that is + # stable across pagination. Therefore, we can use indices into this + # list as our pagination tokens. + + # Filter out rooms that we don't want to return + rooms_to_scan = [ + r for r in sorted_rooms + if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 + ] + if since_token: + # Filter out rooms we've already returned previously + # `since_token.current_limit` is the index of the last room we + # sent down, so we exclude it and everything before/after it. if since_token.direction_is_forward: - sorted_rooms = sorted_rooms[since_token.current_limit:] + rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:] else: - sorted_rooms = sorted_rooms[:since_token.current_limit] - sorted_rooms.reverse() + rooms_to_scan = rooms_to_scan[:since_token.current_limit] + rooms_to_scan.reverse() - rooms_to_scan = sorted_rooms + # If there's not search filter just limit the range since we'll + # return the vast majority of things. if limit and not search_filter: - rooms_to_scan = sorted_rooms[:limit + 1] + rooms_to_scan = rooms_to_scan[:limit + 1] + # Actually generate the entries. _generate_room_entry will append to + # chunk but will stop if len(chunk) > limit chunk = [] - - @defer.inlineCallbacks - def handle_room(room_id): - if limit and len(chunk) > limit + 1: - # We've already got enough, so lets just drop it. - return - - num_joined_users = rooms_to_num_joined[room_id] - if num_joined_users == 0: - return - - if room_id in newly_unpublished: - return - - result = { - "room_id": room_id, - "num_joined_members": num_joined_users, - } - - current_state_ids = yield self.state_handler.get_current_state_ids(room_id) - - event_map = yield self.store.get_events([ - event_id for key, event_id in current_state_ids.items() - if key[0] in ( - EventTypes.JoinRules, - EventTypes.Name, - EventTypes.Topic, - EventTypes.CanonicalAlias, - EventTypes.RoomHistoryVisibility, - EventTypes.GuestAccess, - "m.room.avatar", - ) - ]) - - current_state = { - (ev.type, ev.state_key): ev - for ev in event_map.values() - } - - # Double check that this is actually a public room. - join_rules_event = current_state.get((EventTypes.JoinRules, "")) - if join_rules_event: - join_rule = join_rules_event.content.get("join_rule", None) - if join_rule and join_rule != JoinRules.PUBLIC: - defer.returnValue(None) - - aliases = yield self.store.get_aliases_for_room(room_id) - if aliases: - result["aliases"] = aliases - - name_event = yield current_state.get((EventTypes.Name, "")) - if name_event: - name = name_event.content.get("name", None) - if name: - result["name"] = name - - topic_event = current_state.get((EventTypes.Topic, "")) - if topic_event: - topic = topic_event.content.get("topic", None) - if topic: - result["topic"] = topic - - canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) - if canonical_event: - canonical_alias = canonical_event.content.get("alias", None) - if canonical_alias: - result["canonical_alias"] = canonical_alias - - visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) - visibility = None - if visibility_event: - visibility = visibility_event.content.get("history_visibility", None) - result["world_readable"] = visibility == "world_readable" - - guest_event = current_state.get((EventTypes.GuestAccess, "")) - guest = None - if guest_event: - guest = guest_event.content.get("guest_access", None) - result["guest_can_join"] = guest == "can_join" - - avatar_event = current_state.get(("m.room.avatar", "")) - if avatar_event: - avatar_url = avatar_event.content.get("url", None) - if avatar_url: - result["avatar_url"] = avatar_url - - if _matches_room_entry(result, search_filter): - chunk.append(result) - - yield concurrently_execute(handle_room, rooms_to_scan, 10) + yield concurrently_execute( + lambda r: self._generate_room_entry( + r, rooms_to_num_joined[r], + chunk, limit, search_filter + ), + rooms_to_scan, 10 + ) chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) # Work out the new limit of the batch for pagination, or None if we # know there are no more results that would be returned. + # i.e., [since_token.current_limit..new_limit] is the batch of rooms + # we've returned (or the reverse if we paginated backwards) + # We tried to pull out limit + 1 rooms above, so if we have <= limit + # then we know there are no more results to return new_limit = None if chunk and (not limit or len(chunk) > limit): - if limit: - chunk = chunk[:limit] - - addition = 1 - if since_token: - addition += since_token.current_limit if not since_token or since_token.direction_is_forward: + if limit: + chunk = chunk[:limit] last_room_id = chunk[-1]["room_id"] else: + if limit: + chunk = chunk[-limit:] last_room_id = chunk[0]["room_id"] - addition *= -1 - try: - new_limit = sorted_rooms.index(last_room_id) + addition - if new_limit >= len(sorted_rooms): - new_limit = None - except ValueError: - pass + new_limit = sorted_rooms.index(last_room_id) results = { "chunk": chunk, @@ -252,7 +181,7 @@ class RoomListHandler(BaseHandler): results["new_rooms"] = bool(newly_visible) if not since_token or since_token.direction_is_forward: - if new_limit: + if new_limit is not None: results["next_batch"] = RoomListNextBatch( stream_ordering=stream_token, public_room_stream_id=public_room_stream_id, @@ -263,9 +192,10 @@ class RoomListHandler(BaseHandler): if since_token: results["prev_batch"] = since_token.copy_and_replace( direction_is_forward=False, + current_limit=since_token.current_limit + 1, ).to_token() else: - if new_limit: + if new_limit is not None: results["prev_batch"] = RoomListNextBatch( stream_ordering=stream_token, public_room_stream_id=public_room_stream_id, @@ -276,10 +206,93 @@ class RoomListHandler(BaseHandler): if since_token: results["next_batch"] = since_token.copy_and_replace( direction_is_forward=True, + current_limit=since_token.current_limit - 1, ).to_token() defer.returnValue(results) + @defer.inlineCallbacks + def _generate_room_entry(self, room_id, num_joined_users, chunk, limit, + search_filter): + if limit and len(chunk) > limit + 1: + # We've already got enough, so lets just drop it. + return + + result = { + "room_id": room_id, + "num_joined_members": num_joined_users, + } + + current_state_ids = yield self.state_handler.get_current_state_ids(room_id) + + event_map = yield self.store.get_events([ + event_id for key, event_id in current_state_ids.items() + if key[0] in ( + EventTypes.JoinRules, + EventTypes.Name, + EventTypes.Topic, + EventTypes.CanonicalAlias, + EventTypes.RoomHistoryVisibility, + EventTypes.GuestAccess, + "m.room.avatar", + ) + ]) + + current_state = { + (ev.type, ev.state_key): ev + for ev in event_map.values() + } + + # Double check that this is actually a public room. + join_rules_event = current_state.get((EventTypes.JoinRules, "")) + if join_rules_event: + join_rule = join_rules_event.content.get("join_rule", None) + if join_rule and join_rule != JoinRules.PUBLIC: + defer.returnValue(None) + + aliases = yield self.store.get_aliases_for_room(room_id) + if aliases: + result["aliases"] = aliases + + name_event = yield current_state.get((EventTypes.Name, "")) + if name_event: + name = name_event.content.get("name", None) + if name: + result["name"] = name + + topic_event = current_state.get((EventTypes.Topic, "")) + if topic_event: + topic = topic_event.content.get("topic", None) + if topic: + result["topic"] = topic + + canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) + if canonical_event: + canonical_alias = canonical_event.content.get("alias", None) + if canonical_alias: + result["canonical_alias"] = canonical_alias + + visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) + visibility = None + if visibility_event: + visibility = visibility_event.content.get("history_visibility", None) + result["world_readable"] = visibility == "world_readable" + + guest_event = current_state.get((EventTypes.GuestAccess, "")) + guest = None + if guest_event: + guest = guest_event.content.get("guest_access", None) + result["guest_can_join"] = guest == "can_join" + + avatar_event = current_state.get(("m.room.avatar", "")) + if avatar_event: + avatar_url = avatar_event.content.get("url", None) + if avatar_url: + result["avatar_url"] = avatar_url + + if _matches_room_entry(result, search_filter): + chunk.append(result) + @defer.inlineCallbacks def get_remote_public_room_list(self, server_name, limit=None, since_token=None, search_filter=None): From a298331de42fd0931e9a4d068925ac8ff1c52f46 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 17 Sep 2016 14:59:40 +0100 Subject: [PATCH 05/10] Spelling --- synapse/handlers/room_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index a33b644d0f..ff7a7a06b4 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -135,7 +135,7 @@ class RoomListHandler(BaseHandler): rooms_to_scan = rooms_to_scan[:since_token.current_limit] rooms_to_scan.reverse() - # If there's not search filter just limit the range since we'll + # If there's no search filter just limit the range since we'll # return the vast majority of things. if limit and not search_filter: rooms_to_scan = rooms_to_scan[:limit + 1] From ddc89df89d26fab5c26bf4a67d4a8470cc13593d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 17 Sep 2016 15:55:24 +0100 Subject: [PATCH 06/10] Enable guest access to POST /publicRooms --- synapse/rest/client/v1/room.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5584bfbfc0..45287bf05b 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -307,7 +307,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet): server = parse_string(request, "server", default=None) try: - yield self.auth.get_user_by_req(request) + yield self.auth.get_user_by_req(request, allow_guest=True) except AuthError as e: # We allow people to not be authed if they're just looking at our # room list, but require auth when we proxy the request. @@ -339,7 +339,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - yield self.auth.get_user_by_req(request) + yield self.auth.get_user_by_req(request, allow_guest=True) server = parse_string(request, "server", default=None) content = parse_json_object_from_request(request) From 81570abfb2eb90b7e5d8dfa319935150d4e78751 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 17 Sep 2016 18:01:54 +0100 Subject: [PATCH 07/10] Handle fact that _generate_room_entry may not return a room entry --- synapse/handlers/room_list.py | 37 +++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index ff7a7a06b4..45a689ef2c 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -135,21 +135,34 @@ class RoomListHandler(BaseHandler): rooms_to_scan = rooms_to_scan[:since_token.current_limit] rooms_to_scan.reverse() - # If there's no search filter just limit the range since we'll - # return the vast majority of things. - if limit and not search_filter: - rooms_to_scan = rooms_to_scan[:limit + 1] - # Actually generate the entries. _generate_room_entry will append to # chunk but will stop if len(chunk) > limit chunk = [] - yield concurrently_execute( - lambda r: self._generate_room_entry( - r, rooms_to_num_joined[r], - chunk, limit, search_filter - ), - rooms_to_scan, 10 - ) + if limit and not search_filter: + step = limit + 1 + for i in xrange(0 , len(rooms_to_scan), step): + # We iterate here because the vast majority of cases we'll stop + # at first iteration, but occaisonally _generate_room_entry + # won't append to the chunk and so we need to loop again. + # We don't want to scan over the entire range either as that + # would potentially waste a lot of work. + yield concurrently_execute( + lambda r: self._generate_room_entry( + r, rooms_to_num_joined[r], + chunk, limit, search_filter + ), + rooms_to_scan[i:i + step], 10 + ) + if len(chunk) >= limit + 1: + break + else: + yield concurrently_execute( + lambda r: self._generate_room_entry( + r, rooms_to_num_joined[r], + chunk, limit, search_filter + ), + rooms_to_scan, 5 + ) chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) From 4d49e0bdfd01fcd4f3cad580e3ca1184c432c9ff Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sat, 17 Sep 2016 18:09:22 +0100 Subject: [PATCH 08/10] PEP8 --- synapse/handlers/room_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 45a689ef2c..a9558e6314 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -140,7 +140,7 @@ class RoomListHandler(BaseHandler): chunk = [] if limit and not search_filter: step = limit + 1 - for i in xrange(0 , len(rooms_to_scan), step): + for i in xrange(0, len(rooms_to_scan), step): # We iterate here because the vast majority of cases we'll stop # at first iteration, but occaisonally _generate_room_entry # won't append to the chunk and so we need to loop again. From 3f6ec271ba84699f2dda48c2b2dc35135c658c21 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 17 Sep 2016 22:05:06 +0100 Subject: [PATCH 09/10] proposal for notifying on e2e events --- synapse/push/baserules.py | 49 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index edb00ed206..6454fe7388 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -263,6 +263,8 @@ BASE_APPEND_UNDERRIDE_RULES = [ } ] }, + # XXX: once m.direct is standardised everywhere, we should use it to detect + # a DM from the user's perspective rather than this heuristic. { 'rule_id': 'global/underride/.m.rule.room_one_to_one', 'conditions': [ @@ -289,6 +291,34 @@ BASE_APPEND_UNDERRIDE_RULES = [ } ] }, + # XXX: this is going to fire for events which aren't m.room.messages + # but are encrypted (e.g. m.call.*)... + { + 'rule_id': 'global/underride/.m.rule.encrypted_room_one_to_one', + 'conditions': [ + { + 'kind': 'room_member_count', + 'is': '2', + '_id': 'member_count', + }, + { + 'kind': 'event_match', + 'key': 'type', + 'pattern': 'm.room.encrypted', + '_id': '_message', + } + ], + 'actions': [ + 'notify', + { + 'set_tweak': 'sound', + 'value': 'default' + }, { + 'set_tweak': 'highlight', + 'value': False + } + ] + }, { 'rule_id': 'global/underride/.m.rule.message', 'conditions': [ @@ -305,6 +335,25 @@ BASE_APPEND_UNDERRIDE_RULES = [ 'value': False } ] + }, + # XXX: this is going to fire for events which aren't m.room.messages + # but are encrypted (e.g. m.call.*)... + { + 'rule_id': 'global/underride/.m.rule.encrypted', + 'conditions': [ + { + 'kind': 'event_match', + 'key': 'type', + 'pattern': 'm.room.encrypted', + '_id': '_message', + } + ], + 'actions': [ + 'notify', { + 'set_tweak': 'highlight', + 'value': False + } + ] } ] From 49cf205dc715608e45b9a47a250e7921d1836912 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Sep 2016 10:34:01 +0100 Subject: [PATCH 10/10] _id field must uniquely identify different conditions --- synapse/push/baserules.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 6454fe7388..85effdfa46 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -305,7 +305,7 @@ BASE_APPEND_UNDERRIDE_RULES = [ 'kind': 'event_match', 'key': 'type', 'pattern': 'm.room.encrypted', - '_id': '_message', + '_id': '_encrypted', } ], 'actions': [ @@ -345,7 +345,7 @@ BASE_APPEND_UNDERRIDE_RULES = [ 'kind': 'event_match', 'key': 'type', 'pattern': 'm.room.encrypted', - '_id': '_message', + '_id': '_encrypted', } ], 'actions': [