From e83f8c0aa57cdc084edb19044c60a69c504f8cef Mon Sep 17 00:00:00 2001 From: Oleg Girko Date: Thu, 14 Apr 2016 14:46:18 +0100 Subject: [PATCH 001/414] Add environment file to systemd unit configuration. Now there is at least one environment variable that controls synapse server's behaviour: SYNAPSE_CACHE_FACTOR. So, it makes sense to make systemd unit file to use environment configuration file that can set this variable's value. Signed-off-by: Oleg Girko --- contrib/systemd/synapse.service | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/systemd/synapse.service b/contrib/systemd/synapse.service index 2e8cd21c9e..967a4debfd 100644 --- a/contrib/systemd/synapse.service +++ b/contrib/systemd/synapse.service @@ -9,6 +9,7 @@ Description=Synapse Matrix homeserver Type=simple User=synapse Group=synapse +EnvironmentFile=-/etc/sysconfig/synapse WorkingDirectory=/var/lib/synapse ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml --log-config=/etc/synapse/log_config.yaml From 5164ccc3e5cf82a6b47757f02ec994421bd54ada Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Apr 2016 11:20:08 +0100 Subject: [PATCH 002/414] Bump changelog and version --- CHANGES.rst | 42 ++++++++++++++++++++++++++++++++++++++---- synapse/__init__.py | 2 +- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 8c180750ad..b027fb970c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,37 @@ +Changes in synapse v0.15.0-rc1 (2016-04-26) +=========================================== + +Features: + +* Add login support for Javascript Web Tokens, thanks to Niklas Riekenbrauck + (PR #671,#687) +* Add URL previewing support (PR #688) +* Add login support for LDAP, thanks to Christoph Witzany (PR #701) +* Add GET endpoint for pushers (PR #716) + +Changes: + +* Never notify for member events (PR #667) +* Deduplicate identical ``/sync`` requests (PR #668) +* Require user to have left room to forget room (PR #673) +* Use DNS cache if within TTL (PR #677) +* Let users see their own leave events (PR #699) +* Deduplicate membership changes (PR #700) +* Increase performance of pusher code (PR #705) +* Respond with error status 504 if failed to talk to remote server (PR #731) +* Increase search performance on postgres (PR #745) + +Bug fixes: + +* Fix bug where disabling all notifications still resulted in push (PR #678) +* Fix bug where users couldn't reject remote invites if remote refused (PR #691) +* Fix bug where synapse attempted to backfill from itself (PR #693) +* Fix bug where profile information was not correctly added when joining remote + rooms (PR #703) +* Fix bug where register API required incorrect key name for AS registration + (PR #727) + + Changes in synapse v0.14.0 (2016-03-30) ======================================= @@ -511,7 +545,7 @@ Configuration: * Add support for changing the bind host of the metrics listener via the ``metrics_bind_host`` option. - + Changes in synapse v0.9.0-r5 (2015-05-21) ========================================= @@ -853,7 +887,7 @@ See UPGRADE for information about changes to the client server API, including breaking backwards compatibility with VoIP calls and registration API. Homeserver: - * When a user changes their displayname or avatar the server will now update + * When a user changes their displayname or avatar the server will now update all their join states to reflect this. * The server now adds "age" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to @@ -911,7 +945,7 @@ Changes in synapse 0.2.2 (2014-09-06) ===================================== Homeserver: - * When the server returns state events it now also includes the previous + * When the server returns state events it now also includes the previous content. * Add support for inviting people when creating a new room. * Make the homeserver inform the room via `m.room.aliases` when a new alias @@ -923,7 +957,7 @@ Webclient: * Handle `m.room.aliases` events. * Asynchronously send messages and show a local echo. * Inform the UI when a message failed to send. - * Only autoscroll on receiving a new message if the user was already at the + * Only autoscroll on receiving a new message if the user was already at the bottom of the screen. * Add support for ban/kick reasons. diff --git a/synapse/__init__.py b/synapse/__init__.py index 7de51fbe8d..988318f5ea 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.14.0" +__version__ = "0.15.0-rc1" From 1b45e6a9bce2d46d956c0923bf02bfd170839550 Mon Sep 17 00:00:00 2001 From: David Date: Fri, 6 May 2016 02:07:59 +0800 Subject: [PATCH 003/414] Fix Typo in README.rst s/Halp/Help/ --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 285fc5aa8a..95e7257115 100644 --- a/README.rst +++ b/README.rst @@ -587,7 +587,7 @@ Building internal API documentation:: -Halp!! Synapse eats all my RAM! +Help!! Synapse eats all my RAM! =============================== Synapse's architecture is quite RAM hungry currently - we deliberately From 1f1dee94f6025ce0a6e414cd6098cb766567bdd8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 May 2016 10:13:25 +0100 Subject: [PATCH 004/414] Manually run GC on reactor tick. This also adds a metric for amount of time spent in GC. --- synapse/metrics/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 5664d5a381..17be491b93 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -22,6 +22,7 @@ import functools import os import stat import time +import gc from twisted.internet import reactor @@ -155,6 +156,7 @@ get_metrics_for("process").register_callback("fds", _process_fds, labels=["type" reactor_metrics = get_metrics_for("reactor") tick_time = reactor_metrics.register_distribution("tick_time") pending_calls_metric = reactor_metrics.register_distribution("pending_calls") +gc_time = reactor_metrics.register_distribution("gc_time") def runUntilCurrentTimer(func): @@ -182,6 +184,18 @@ def runUntilCurrentTimer(func): end = time.time() * 1000 tick_time.inc_by(end - start) pending_calls_metric.inc_by(num_pending) + + threshold = gc.get_threshold() + counts = gc.get_count() + + start = time.time() * 1000 + for i in [2, 1, 0]: + if threshold[i] < counts[i]: + logger.info("Collecting gc %d", i) + gc.collect(i) + end = time.time() * 1000 + gc_time.inc_by(end - start) + return ret return f @@ -196,5 +210,6 @@ try: # runUntilCurrent is called when we have pending calls. It is called once # per iteratation after fd polling. reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent) + gc.disable() except AttributeError: pass From 7d6e89ed22aab4bae9ce033c2e4757a595257942 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 May 2016 16:31:08 +0100 Subject: [PATCH 005/414] Add a comment --- synapse/metrics/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 17be491b93..c82685a524 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -185,6 +185,8 @@ def runUntilCurrentTimer(func): tick_time.inc_by(end - start) pending_calls_metric.inc_by(num_pending) + # Check if we need to do a manual GC (since its been disabled), and do + # one if necessary. threshold = gc.get_threshold() counts = gc.get_count() @@ -210,6 +212,9 @@ try: # runUntilCurrent is called when we have pending calls. It is called once # per iteratation after fd polling. reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent) + + # We manually run the GC each reactor tick so that we can get some metrics + # about time spent doing GC, gc.disable() except AttributeError: pass From 60d53f9e9596520472954831ce8fea251a462d46 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 16 May 2016 09:32:29 +0100 Subject: [PATCH 006/414] Count number of GC collects --- synapse/metrics/__init__.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index c82685a524..bba2657075 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -156,7 +156,12 @@ get_metrics_for("process").register_callback("fds", _process_fds, labels=["type" reactor_metrics = get_metrics_for("reactor") tick_time = reactor_metrics.register_distribution("tick_time") pending_calls_metric = reactor_metrics.register_distribution("pending_calls") -gc_time = reactor_metrics.register_distribution("gc_time") + +gc_time = ( + reactor_metrics.register_distribution("gc_time_gen0"), + reactor_metrics.register_distribution("gc_time_gen2"), + reactor_metrics.register_distribution("gc_time_gen2"), +) def runUntilCurrentTimer(func): @@ -189,14 +194,15 @@ def runUntilCurrentTimer(func): # one if necessary. threshold = gc.get_threshold() counts = gc.get_count() - - start = time.time() * 1000 for i in [2, 1, 0]: if threshold[i] < counts[i]: logger.info("Collecting gc %d", i) + + start = time.time() * 1000 gc.collect(i) - end = time.time() * 1000 - gc_time.inc_by(end - start) + end = time.time() * 1000 + + gc_time[i].inc_by(end - start) return ret From 09804c98625187d289c0123908da2fc8eecec346 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 23 May 2016 16:29:38 +0100 Subject: [PATCH 007/414] Fix link to A-S spec --- docs/application_services.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/application_services.rst b/docs/application_services.rst index 7e87ac9ad6..fbc0c7e960 100644 --- a/docs/application_services.rst +++ b/docs/application_services.rst @@ -32,5 +32,4 @@ The format of the AS configuration file is as follows: See the spec_ for further details on how application services work. -.. _spec: https://github.com/matrix-org/matrix-doc/blob/master/specification/25_application_service_api.rst#application-service-api - +.. _spec: https://matrix.org/docs/spec/application_service/unstable.html From b5605dfecc1f277e03165b374bd6ce81638ccb36 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 May 2016 17:37:01 +0100 Subject: [PATCH 008/414] Refactor SyncHandler --- synapse/handlers/sync.py | 978 +++++++++++++++++++-------------------- 1 file changed, 481 insertions(+), 497 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9ebfccc8bf..80eccf19ae 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -194,157 +194,7 @@ class SyncHandler(object): Returns: A Deferred SyncResult. """ - if since_token is None or full_state: - return self.full_state_sync(sync_config, since_token) - else: - return self.incremental_sync_with_gap(sync_config, since_token) - - @defer.inlineCallbacks - def full_state_sync(self, sync_config, timeline_since_token): - """Get a sync for a client which is starting without any state. - - If a 'message_since_token' is given, only timeline events which have - happened since that token will be returned. - - Returns: - A Deferred SyncResult. - """ - now_token = yield self.event_sources.get_current_token() - - now_token, ephemeral_by_room = yield self.ephemeral_by_room( - sync_config, now_token - ) - - presence_stream = self.event_sources.sources["presence"] - # TODO (mjark): This looks wrong, shouldn't we be getting the presence - # UP to the present rather than after the present? - pagination_config = PaginationConfig(from_token=now_token) - presence, _ = yield presence_stream.get_pagination_rows( - user=sync_config.user, - pagination_config=pagination_config.get_source_config("presence"), - key=None - ) - - membership_list = ( - Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN - ) - - room_list = yield self.store.get_rooms_for_user_where_membership_is( - user_id=sync_config.user.to_string(), - membership_list=membership_list - ) - - account_data, account_data_by_room = ( - yield self.store.get_account_data_for_user( - sync_config.user.to_string() - ) - ) - - account_data['m.push_rules'] = yield self.push_rules_for_user( - sync_config.user - ) - - tags_by_room = yield self.store.get_tags_for_user( - sync_config.user.to_string() - ) - - ignored_users = account_data.get( - "m.ignored_user_list", {} - ).get("ignored_users", {}).keys() - - joined = [] - invited = [] - archived = [] - - user_id = sync_config.user.to_string() - - @defer.inlineCallbacks - def _generate_room_entry(event): - if event.membership == Membership.JOIN: - room_result = yield self.full_state_sync_for_joined_room( - room_id=event.room_id, - sync_config=sync_config, - now_token=now_token, - timeline_since_token=timeline_since_token, - ephemeral_by_room=ephemeral_by_room, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) - joined.append(room_result) - elif event.membership == Membership.INVITE: - if event.sender in ignored_users: - return - invite = yield self.store.get_event(event.event_id) - invited.append(InvitedSyncResult( - room_id=event.room_id, - invite=invite, - )) - elif event.membership in (Membership.LEAVE, Membership.BAN): - # Always send down rooms we were banned or kicked from. - if not sync_config.filter_collection.include_leave: - if event.membership == Membership.LEAVE: - if user_id == event.sender: - return - - leave_token = now_token.copy_and_replace( - "room_key", "s%d" % (event.stream_ordering,) - ) - room_result = yield self.full_state_sync_for_archived_room( - sync_config=sync_config, - room_id=event.room_id, - leave_event_id=event.event_id, - leave_token=leave_token, - timeline_since_token=timeline_since_token, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - ) - archived.append(room_result) - - yield concurrently_execute(_generate_room_entry, room_list, 10) - - account_data_for_user = sync_config.filter_collection.filter_account_data( - self.account_data_for_user(account_data) - ) - - presence = sync_config.filter_collection.filter_presence( - presence - ) - - defer.returnValue(SyncResult( - presence=presence, - account_data=account_data_for_user, - joined=joined, - invited=invited, - archived=archived, - next_batch=now_token, - )) - - @defer.inlineCallbacks - def full_state_sync_for_joined_room(self, room_id, sync_config, - now_token, timeline_since_token, - ephemeral_by_room, tags_by_room, - account_data_by_room): - """Sync a room for a client which is starting without any state - Returns: - A Deferred JoinedSyncResult. - """ - - batch = yield self.load_filtered_recents( - room_id, sync_config, now_token, since_token=timeline_since_token - ) - - room_sync = yield self.incremental_sync_with_gap_for_room( - room_id, sync_config, - now_token=now_token, - since_token=timeline_since_token, - ephemeral_by_room=ephemeral_by_room, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - batch=batch, - full_state=True, - ) - - defer.returnValue(room_sync) + return self.generate_sync_result(sync_config, since_token, full_state) @defer.inlineCallbacks def push_rules_for_user(self, user): @@ -365,24 +215,6 @@ class SyncHandler(object): return account_data_events - def account_data_for_room(self, room_id, tags_by_room, account_data_by_room): - account_data_events = [] - tags = tags_by_room.get(room_id) - if tags is not None: - account_data_events.append({ - "type": "m.tag", - "content": {"tags": tags}, - }) - - account_data = account_data_by_room.get(room_id, {}) - for account_data_type, content in account_data.items(): - account_data_events.append({ - "type": account_data_type, - "content": content, - }) - - return account_data_events - @defer.inlineCallbacks def ephemeral_by_room(self, sync_config, now_token, since_token=None): """Get the ephemeral events for each room the user is in @@ -445,237 +277,6 @@ class SyncHandler(object): defer.returnValue((now_token, ephemeral_by_room)) - def full_state_sync_for_archived_room(self, room_id, sync_config, - leave_event_id, leave_token, - timeline_since_token, tags_by_room, - account_data_by_room): - """Sync a room for a client which is starting without any state - Returns: - A Deferred ArchivedSyncResult. - """ - - return self.incremental_sync_for_archived_room( - sync_config, room_id, leave_event_id, timeline_since_token, tags_by_room, - account_data_by_room, full_state=True, leave_token=leave_token, - ) - - @defer.inlineCallbacks - def incremental_sync_with_gap(self, sync_config, since_token): - """ Get the incremental delta needed to bring the client up to - date with the server. - Returns: - A Deferred SyncResult. - """ - now_token = yield self.event_sources.get_current_token() - - rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string()) - room_ids = [room.room_id for room in rooms] - - presence_source = self.event_sources.sources["presence"] - presence, presence_key = yield presence_source.get_new_events( - user=sync_config.user, - from_key=since_token.presence_key, - limit=sync_config.filter_collection.presence_limit(), - room_ids=room_ids, - is_guest=sync_config.is_guest, - ) - now_token = now_token.copy_and_replace("presence_key", presence_key) - - now_token, ephemeral_by_room = yield self.ephemeral_by_room( - sync_config, now_token, since_token - ) - - app_service = yield self.store.get_app_service_by_user_id( - sync_config.user.to_string() - ) - if app_service: - rooms = yield self.store.get_app_service_rooms(app_service) - joined_room_ids = set(r.room_id for r in rooms) - else: - rooms = yield self.store.get_rooms_for_user( - sync_config.user.to_string() - ) - joined_room_ids = set(r.room_id for r in rooms) - - user_id = sync_config.user.to_string() - - timeline_limit = sync_config.filter_collection.timeline_limit() - - tags_by_room = yield self.store.get_updated_tags( - user_id, - since_token.account_data_key, - ) - - account_data, account_data_by_room = ( - yield self.store.get_updated_account_data_for_user( - user_id, - since_token.account_data_key, - ) - ) - - push_rules_changed = yield self.store.have_push_rules_changed_for_user( - user_id, int(since_token.push_rules_key) - ) - - if push_rules_changed: - account_data["m.push_rules"] = yield self.push_rules_for_user( - sync_config.user - ) - - ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( - "m.ignored_user_list", user_id=user_id, - ) - - if ignored_account_data: - ignored_users = ignored_account_data.get("ignored_users", {}).keys() - else: - ignored_users = frozenset() - - # Get a list of membership change events that have happened. - rooms_changed = yield self.store.get_membership_changes_for_user( - user_id, since_token.room_key, now_token.room_key - ) - - mem_change_events_by_room_id = {} - for event in rooms_changed: - mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) - - newly_joined_rooms = [] - archived = [] - invited = [] - for room_id, events in mem_change_events_by_room_id.items(): - non_joins = [e for e in events if e.membership != Membership.JOIN] - has_join = len(non_joins) != len(events) - - # We want to figure out if we joined the room at some point since - # the last sync (even if we have since left). This is to make sure - # we do send down the room, and with full state, where necessary - if room_id in joined_room_ids or has_join: - old_state = yield self.get_state_at(room_id, since_token) - old_mem_ev = old_state.get((EventTypes.Member, user_id), None) - if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: - newly_joined_rooms.append(room_id) - - if room_id in joined_room_ids: - continue - - if not non_joins: - continue - - # Only bother if we're still currently invited - should_invite = non_joins[-1].membership == Membership.INVITE - if should_invite: - if event.sender not in ignored_users: - room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) - if room_sync: - invited.append(room_sync) - - # Always include leave/ban events. Just take the last one. - # TODO: How do we handle ban -> leave in same batch? - leave_events = [ - e for e in non_joins - if e.membership in (Membership.LEAVE, Membership.BAN) - ] - - if leave_events: - leave_event = leave_events[-1] - room_sync = yield self.incremental_sync_for_archived_room( - sync_config, room_id, leave_event.event_id, since_token, - tags_by_room, account_data_by_room, - full_state=room_id in newly_joined_rooms - ) - if room_sync: - archived.append(room_sync) - - # Get all events for rooms we're currently joined to. - room_to_events = yield self.store.get_room_events_stream_for_rooms( - room_ids=joined_room_ids, - from_key=since_token.room_key, - to_key=now_token.room_key, - limit=timeline_limit + 1, - ) - - joined = [] - # We loop through all room ids, even if there are no new events, in case - # there are non room events taht we need to notify about. - for room_id in joined_room_ids: - room_entry = room_to_events.get(room_id, None) - - if room_entry: - events, start_key = room_entry - - prev_batch_token = now_token.copy_and_replace("room_key", start_key) - - newly_joined_room = room_id in newly_joined_rooms - full_state = newly_joined_room - - batch = yield self.load_filtered_recents( - room_id, sync_config, prev_batch_token, - since_token=since_token, - recents=events, - newly_joined_room=newly_joined_room, - ) - else: - batch = TimelineBatch( - events=[], - prev_batch=since_token, - limited=False, - ) - full_state = False - - room_sync = yield self.incremental_sync_with_gap_for_room( - room_id=room_id, - sync_config=sync_config, - since_token=since_token, - now_token=now_token, - ephemeral_by_room=ephemeral_by_room, - tags_by_room=tags_by_room, - account_data_by_room=account_data_by_room, - batch=batch, - full_state=full_state, - ) - if room_sync: - joined.append(room_sync) - - # For each newly joined room, we want to send down presence of - # existing users. - presence_handler = self.presence_handler - extra_presence_users = set() - for room_id in newly_joined_rooms: - users = yield self.store.get_users_in_room(event.room_id) - extra_presence_users.update(users) - - # For each new member, send down presence. - for joined_sync in joined: - it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values()) - for event in it: - if event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - extra_presence_users.add(event.state_key) - - states = yield presence_handler.get_states( - [u for u in extra_presence_users if u != user_id], - as_event=True, - ) - presence.extend(states) - - account_data_for_user = sync_config.filter_collection.filter_account_data( - self.account_data_for_user(account_data) - ) - - presence = sync_config.filter_collection.filter_presence( - presence - ) - - defer.returnValue(SyncResult( - presence=presence, - account_data=account_data_for_user, - joined=joined, - invited=invited, - archived=archived, - next_batch=now_token, - )) - @defer.inlineCallbacks def load_filtered_recents(self, room_id, sync_config, now_token, since_token=None, recents=None, newly_joined_room=False): @@ -696,6 +297,10 @@ class SyncHandler(object): else: limited = False + if since_token: + if not now_token.is_after(since_token): + limited = False + if recents is not None: recents = sync_config.filter_collection.filter_room_timeline(recents) recents = yield filter_events_for_client( @@ -748,103 +353,6 @@ class SyncHandler(object): limited=limited or newly_joined_room )) - @defer.inlineCallbacks - def incremental_sync_with_gap_for_room(self, room_id, sync_config, - since_token, now_token, - ephemeral_by_room, tags_by_room, - account_data_by_room, - batch, full_state=False): - state = yield self.compute_state_delta( - room_id, batch, sync_config, since_token, now_token, - full_state=full_state - ) - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) - - unread_notifications = {} - room_sync = JoinedSyncResult( - room_id=room_id, - timeline=batch, - state=state, - ephemeral=ephemeral, - account_data=account_data, - unread_notifications=unread_notifications, - ) - - if room_sync: - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config - ) - - if notifs is not None: - unread_notifications["notification_count"] = notifs["notify_count"] - unread_notifications["highlight_count"] = notifs["highlight_count"] - - logger.debug("Room sync: %r", room_sync) - - defer.returnValue(room_sync) - - @defer.inlineCallbacks - def incremental_sync_for_archived_room(self, sync_config, room_id, leave_event_id, - since_token, tags_by_room, - account_data_by_room, full_state, - leave_token=None): - """ Get the incremental delta needed to bring the client up to date for - the archived room. - Returns: - A Deferred ArchivedSyncResult - """ - - if not leave_token: - stream_token = yield self.store.get_stream_token_for_event( - leave_event_id - ) - - leave_token = since_token.copy_and_replace("room_key", stream_token) - - if since_token and since_token.is_after(leave_token): - defer.returnValue(None) - - batch = yield self.load_filtered_recents( - room_id, sync_config, leave_token, since_token, - ) - - logger.debug("Recents %r", batch) - - state_events_delta = yield self.compute_state_delta( - room_id, batch, sync_config, since_token, leave_token, - full_state=full_state - ) - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - room_sync = ArchivedSyncResult( - room_id=room_id, - timeline=batch, - state=state_events_delta, - account_data=account_data, - ) - - logger.debug("Room sync: %r", room_sync) - - defer.returnValue(room_sync) - @defer.inlineCallbacks def get_state_after_event(self, event): """ @@ -1010,6 +518,457 @@ class SyncHandler(object): # count is whatever it was last time. defer.returnValue(None) + @defer.inlineCallbacks + def generate_sync_result(self, sync_config, since_token=None, full_state=False): + now_token = yield self.event_sources.get_current_token() + + sync_result_builer = SyncResultBuilder( + sync_config, full_state, + since_token=since_token, + now_token=now_token, + ) + + account_data_by_room = yield self.generate_sync_entry_for_account_data( + sync_result_builer + ) + + newly_joined_rooms, newly_joined_users = yield self.generate_sync_entry_for_rooms( + sync_result_builer, account_data_by_room + ) + + yield self.generate_sync_entry_for_presence( + sync_result_builer, newly_joined_rooms, newly_joined_users + ) + + defer.returnValue(SyncResult( + presence=sync_result_builer.presence, + account_data=sync_result_builer.account_data, + joined=sync_result_builer.joined, + invited=sync_result_builer.invited, + archived=sync_result_builer.archived, + next_batch=sync_result_builer.now_token, + )) + + @defer.inlineCallbacks + def generate_sync_entry_for_account_data(self, sync_result_builer): + sync_config = sync_result_builer.sync_config + user_id = sync_result_builer.sync_config.user.to_string() + since_token = sync_result_builer.since_token + + if since_token and not sync_result_builer.full_state: + account_data, account_data_by_room = ( + yield self.store.get_updated_account_data_for_user( + user_id, + since_token.account_data_key, + ) + ) + + push_rules_changed = yield self.store.have_push_rules_changed_for_user( + user_id, int(since_token.push_rules_key) + ) + + if push_rules_changed: + account_data["m.push_rules"] = yield self.push_rules_for_user( + sync_config.user + ) + else: + account_data, account_data_by_room = ( + yield self.store.get_account_data_for_user( + sync_config.user.to_string() + ) + ) + + account_data['m.push_rules'] = yield self.push_rules_for_user( + sync_config.user + ) + + account_data_for_user = sync_config.filter_collection.filter_account_data( + self.account_data_for_user(account_data) + ) + + sync_result_builer.account_data = account_data_for_user + + defer.returnValue(account_data_by_room) + + @defer.inlineCallbacks + def generate_sync_entry_for_presence(self, sync_result_builer, newly_joined_rooms, + newly_joined_users): + now_token = sync_result_builer.now_token + sync_config = sync_result_builer.sync_config + user = sync_result_builer.sync_config.user + + presence_source = self.event_sources.sources["presence"] + + since_token = sync_result_builer.since_token + if since_token and not sync_result_builer.full_state: + presence_key = since_token.presence_key + else: + presence_key = None + + presence, presence_key = yield presence_source.get_new_events( + user=user, + from_key=presence_key, + is_guest=sync_config.is_guest, + ) + sync_result_builer.now_token = now_token.copy_and_replace( + "presence_key", presence_key + ) + + extra_users_ids = set(newly_joined_users) + for room_id in newly_joined_rooms: + users = yield self.store.get_users_in_room(room_id) + extra_users_ids.update(users) + extra_users_ids.discard(user.to_string()) + + states = yield self.presence_handler.get_states( + extra_users_ids, + as_event=True, + ) + presence.extend(states) + + presence = sync_config.filter_collection.filter_presence( + presence + ) + + sync_result_builer.presence = presence + + @defer.inlineCallbacks + def generate_sync_entry_for_rooms(self, sync_result_builer, account_data_by_room): + user_id = sync_result_builer.sync_config.user.to_string() + + now_token, ephemeral_by_room = yield self.ephemeral_by_room( + sync_result_builer.sync_config, sync_result_builer.now_token + ) + sync_result_builer.now_token = now_token + + ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( + "m.ignored_user_list", user_id=user_id, + ) + + if ignored_account_data: + ignored_users = ignored_account_data.get("ignored_users", {}).keys() + else: + ignored_users = frozenset() + + if sync_result_builer.since_token: + res = yield self._get_rooms_changed(sync_result_builer, ignored_users) + joined, invited, archived, newly_joined_rooms = res + + tags_by_room = yield self.store.get_updated_tags( + user_id, + sync_result_builer.since_token.account_data_key, + ) + else: + res = yield self._get_all_rooms(sync_result_builer, ignored_users) + joined, invited, archived, newly_joined_rooms = res + + tags_by_room = yield self.store.get_tags_for_user(user_id) + + for room_entry in joined: + yield self._generate_room_entry( + "joined", + sync_result_builer, + ignored_users, + room_entry, + ephemeral=ephemeral_by_room.get(room_entry.room_id, []), + tags=tags_by_room.get(room_entry.room_id), + account_data=account_data_by_room.get(room_entry.room_id, {}), + always_include=sync_result_builer.full_state, + ) + for room_entry in archived: + yield self._generate_room_entry( + "archived", + sync_result_builer, + ignored_users, + room_entry, + ephemeral=ephemeral_by_room.get(room_entry.room_id, []), + tags=tags_by_room.get(room_entry.room_id), + account_data=account_data_by_room.get(room_entry.room_id, {}), + always_include=sync_result_builer.full_state, + ) + + sync_result_builer.invited.extend(invited) + + # Now we want to get any newly joined users + newly_joined_users = set() + for joined_sync in sync_result_builer.joined: + it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values()) + for event in it: + if event.type == EventTypes.Member: + if event.membership == Membership.JOIN: + newly_joined_users.add(event.state_key) + + defer.returnValue((newly_joined_rooms, newly_joined_users)) + + @defer.inlineCallbacks + def _get_rooms_changed(self, sync_result_builer, ignored_users): + user_id = sync_result_builer.sync_config.user.to_string() + since_token = sync_result_builer.since_token + now_token = sync_result_builer.now_token + sync_config = sync_result_builer.sync_config + + assert since_token + + app_service = yield self.store.get_app_service_by_user_id(user_id) + if app_service: + rooms = yield self.store.get_app_service_rooms(app_service) + joined_room_ids = set(r.room_id for r in rooms) + else: + rooms = yield self.store.get_rooms_for_user(user_id) + joined_room_ids = set(r.room_id for r in rooms) + + # Get a list of membership change events that have happened. + rooms_changed = yield self.store.get_membership_changes_for_user( + user_id, since_token.room_key, now_token.room_key + ) + + mem_change_events_by_room_id = {} + for event in rooms_changed: + mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) + + newly_joined_rooms = [] + archived = [] + invited = [] + for room_id, events in mem_change_events_by_room_id.items(): + non_joins = [e for e in events if e.membership != Membership.JOIN] + has_join = len(non_joins) != len(events) + + # We want to figure out if we joined the room at some point since + # the last sync (even if we have since left). This is to make sure + # we do send down the room, and with full state, where necessary + if room_id in joined_room_ids or has_join: + old_state = yield self.get_state_at(room_id, since_token) + old_mem_ev = old_state.get((EventTypes.Member, user_id), None) + if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: + newly_joined_rooms.append(room_id) + + if room_id in joined_room_ids: + continue + + if not non_joins: + continue + + # Only bother if we're still currently invited + should_invite = non_joins[-1].membership == Membership.INVITE + if should_invite: + if event.sender not in ignored_users: + room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) + if room_sync: + invited.append(room_sync) + + # Always include leave/ban events. Just take the last one. + # TODO: How do we handle ban -> leave in same batch? + leave_events = [ + e for e in non_joins + if e.membership in (Membership.LEAVE, Membership.BAN) + ] + + if leave_events: + leave_event = leave_events[-1] + leave_stream_token = yield self.store.get_stream_token_for_event( + leave_event.event_id + ) + leave_token = since_token.copy_and_replace( + "room_key", leave_stream_token + ) + + if since_token and since_token.is_after(leave_token): + continue + + archived.append(RoomSyncResultBuilder( + room_id=room_id, + events=None, + newly_joined=room_id in newly_joined_rooms, + full_state=False, + since_token=since_token, + upto_token=leave_token, + )) + + timeline_limit = sync_config.filter_collection.timeline_limit() + + # Get all events for rooms we're currently joined to. + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=joined_room_ids, + from_key=since_token.room_key, + to_key=now_token.room_key, + limit=timeline_limit + 1, + ) + + joined = [] + # We loop through all room ids, even if there are no new events, in case + # there are non room events taht we need to notify about. + for room_id in joined_room_ids: + room_entry = room_to_events.get(room_id, None) + + if room_entry: + events, start_key = room_entry + + prev_batch_token = now_token.copy_and_replace("room_key", start_key) + + joined.append(RoomSyncResultBuilder( + room_id=room_id, + events=events, + newly_joined=room_id in newly_joined_rooms, + full_state=False, + since_token=None if room_id in newly_joined_rooms else since_token, + upto_token=prev_batch_token, + )) + else: + joined.append(RoomSyncResultBuilder( + room_id=room_id, + events=[], + newly_joined=room_id in newly_joined_rooms, + full_state=False, + since_token=since_token, + upto_token=since_token, + )) + + defer.returnValue((joined, invited, archived, newly_joined_rooms)) + + @defer.inlineCallbacks + def _get_all_rooms(self, sync_result_builer, ignored_users): + user_id = sync_result_builer.sync_config.user.to_string() + since_token = sync_result_builer.since_token + now_token = sync_result_builer.now_token + sync_config = sync_result_builer.sync_config + + membership_list = ( + Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN + ) + + room_list = yield self.store.get_rooms_for_user_where_membership_is( + user_id=user_id, + membership_list=membership_list + ) + + joined = [] + invited = [] + archived = [] + + for event in room_list: + if event.membership == Membership.JOIN: + joined.append(RoomSyncResultBuilder( + room_id=event.room_id, + events=None, + newly_joined=False, + full_state=True, + since_token=since_token, + upto_token=now_token, + )) + elif event.membership == Membership.INVITE: + if event.sender in ignored_users: + continue + invite = yield self.store.get_event(event.event_id) + invited.append(InvitedSyncResult( + room_id=event.room_id, + invite=invite, + )) + elif event.membership in (Membership.LEAVE, Membership.BAN): + # Always send down rooms we were banned or kicked from. + if not sync_config.filter_collection.include_leave: + if event.membership == Membership.LEAVE: + if user_id == event.sender: + continue + + leave_token = now_token.copy_and_replace( + "room_key", "s%d" % (event.stream_ordering,) + ) + archived.append(RoomSyncResultBuilder( + room_id=event.room_id, + events=None, + newly_joined=False, + full_state=True, + since_token=since_token, + upto_token=leave_token, + )) + + defer.returnValue((joined, invited, archived, [])) + + @defer.inlineCallbacks + def _generate_room_entry(self, room_type, sync_result_builer, ignored_users, + room_builder, ephemeral, tags, account_data, + always_include=False): + since_token = sync_result_builer.since_token + now_token = sync_result_builer.now_token + sync_config = sync_result_builer.sync_config + + room_id = room_builder.room_id + events = room_builder.events + newly_joined = room_builder.newly_joined + full_state = ( + room_builder.full_state + or newly_joined + or sync_result_builer.full_state + ) + since_token = room_builder.since_token + upto_token = room_builder.upto_token + + batch = yield self.load_filtered_recents( + room_id, sync_config, + now_token=upto_token, + since_token=since_token, + recents=events, + newly_joined_room=newly_joined, # FIXME + ) + + account_data_events = [] + if tags is not None: + account_data_events.append({ + "type": "m.tag", + "content": {"tags": tags}, + }) + + for account_data_type, content in account_data.items(): + account_data_events.append({ + "type": account_data_type, + "content": content, + }) + + account_data = sync_config.filter_collection.filter_room_account_data( + account_data_events + ) + + ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral) + + if not (always_include or batch or account_data or ephemeral or full_state): + return + + state = yield self.compute_state_delta( + room_id, batch, sync_config, since_token, now_token, + full_state=full_state + ) + + if room_type == "joined": + unread_notifications = {} + room_sync = JoinedSyncResult( + room_id=room_id, + timeline=batch, + state=state, + ephemeral=ephemeral, + account_data=account_data_events, + unread_notifications=unread_notifications, + ) + + if room_sync or always_include: + notifs = yield self.unread_notifs_for_room_id( + room_id, sync_config + ) + + if notifs is not None: + unread_notifications["notification_count"] = notifs["notify_count"] + unread_notifications["highlight_count"] = notifs["highlight_count"] + + sync_result_builer.joined.append(room_sync) + elif room_type == "archived": + room_sync = ArchivedSyncResult( + room_id=room_id, + timeline=batch, + state=state, + account_data=account_data, + ) + if room_sync or always_include: + sync_result_builer.archived.append(room_sync) + def _action_has_highlight(actions): for action in actions: @@ -1057,3 +1016,28 @@ def _calculate_state(timeline_contains, timeline_start, previous, current): (e.type, e.state_key): e for e in evs } + + +class SyncResultBuilder(object): + def __init__(self, sync_config, full_state, since_token, now_token): + self.sync_config = sync_config + self.full_state = full_state + self.since_token = since_token + self.now_token = now_token + + self.presence = [] + self.account_data = [] + self.joined = [] + self.invited = [] + self.archived = [] + + +class RoomSyncResultBuilder(object): + def __init__(self, room_id, events, newly_joined, full_state, since_token, + upto_token): + self.room_id = room_id + self.events = events + self.newly_joined = newly_joined + self.full_state = full_state + self.since_token = since_token + self.upto_token = upto_token From c0c79ef444ca0f21e8324abc8a813026aaf6cf17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 May 2016 18:21:27 +0100 Subject: [PATCH 009/414] Add back concurrently_execute --- synapse/handlers/sync.py | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 80eccf19ae..bc6d6af133 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.streams.config import PaginationConfig from synapse.api.constants import Membership, EventTypes from synapse.util.async import concurrently_execute from synapse.util.logcontext import LoggingContext @@ -478,26 +477,6 @@ class SyncHandler(object): for e in sync_config.filter_collection.filter_room_state(state.values()) }) - def check_joined_room(self, sync_config, state_delta): - """ - Check if the user has just joined the given room (so should - be given the full state) - - Args: - sync_config(synapse.handlers.sync.SyncConfig): - state_delta(dict[(str,str), synapse.events.FrozenEvent]): the - difference in state since the last sync - - Returns: - A deferred Tuple (state_delta, limited) - """ - join_event = state_delta.get(( - EventTypes.Member, sync_config.user.to_string()), None) - if join_event is not None: - if join_event.content["membership"] == Membership.JOIN: - return True - return False - @defer.inlineCallbacks def unread_notifs_for_room_id(self, room_id, sync_config): with Measure(self.clock, "unread_notifs_for_room_id"): @@ -664,8 +643,8 @@ class SyncHandler(object): tags_by_room = yield self.store.get_tags_for_user(user_id) - for room_entry in joined: - yield self._generate_room_entry( + def handle_joined(room_entry): + return self._generate_room_entry( "joined", sync_result_builer, ignored_users, @@ -675,8 +654,11 @@ class SyncHandler(object): account_data=account_data_by_room.get(room_entry.room_id, {}), always_include=sync_result_builer.full_state, ) - for room_entry in archived: - yield self._generate_room_entry( + + yield concurrently_execute(handle_joined, joined, 10) + + def handle_archived(room_entry): + return self._generate_room_entry( "archived", sync_result_builer, ignored_users, @@ -687,6 +669,8 @@ class SyncHandler(object): always_include=sync_result_builer.full_state, ) + yield concurrently_execute(handle_archived, archived, 10) + sync_result_builer.invited.extend(invited) # Now we want to get any newly joined users From 6fe04ffef2fd44a3af61fe262266b25158c46db7 Mon Sep 17 00:00:00 2001 From: Negi Fazeli Date: Mon, 23 May 2016 11:14:11 +0200 Subject: [PATCH 010/414] Fix set profile error with Requester. Replace flush_user with delete access token due to function removal Add a new test case for if the user is already registered --- synapse/handlers/register.py | 9 +++++---- tests/handlers/test_register.py | 34 ++++++++++++++++++++++++--------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 5883b9111e..16f33f8371 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -16,7 +16,7 @@ """Contains functions for registering clients.""" from twisted.internet import defer -from synapse.types import UserID +from synapse.types import UserID, Requester from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) @@ -360,7 +360,8 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def get_or_create_user(self, localpart, displayname, duration_seconds): - """Creates a new user or returns an access token for an existing one + """Creates a new user if the user does not exist, + else revokes all previous access tokens and generates a new one. Args: localpart : The local part of the user ID to register. If None, @@ -399,14 +400,14 @@ class RegistrationHandler(BaseHandler): yield registered_user(self.distributor, user) else: - yield self.store.flush_user(user_id=user_id) + yield self.store.user_delete_access_tokens(user_id=user_id) yield self.store.add_access_token_to_user(user_id=user_id, token=token) if displayname is not None: logger.info("setting user display name: %s -> %s", user_id, displayname) profile_handler = self.hs.get_handlers().profile_handler yield profile_handler.set_displayname( - user, user, displayname + user, Requester(user, token, False), displayname ) defer.returnValue((user_id, token)) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 8b7be96bd9..9d5c653b45 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -17,6 +17,7 @@ from twisted.internet import defer from .. import unittest from synapse.handlers.register import RegistrationHandler +from synapse.types import UserID from tests.utils import setup_test_homeserver @@ -36,25 +37,21 @@ class RegistrationTestCase(unittest.TestCase): self.mock_distributor = Mock() self.mock_distributor.declare("registered_user") self.mock_captcha_client = Mock() - hs = yield setup_test_homeserver( + self.hs = yield setup_test_homeserver( handlers=None, http_client=None, expire_access_token=True) - hs.handlers = RegistrationHandlers(hs) - self.handler = hs.get_handlers().registration_handler - hs.get_handlers().profile_handler = Mock() + self.hs.handlers = RegistrationHandlers(self.hs) + self.handler = self.hs.get_handlers().registration_handler + self.hs.get_handlers().profile_handler = Mock() self.mock_handler = Mock(spec=[ "generate_short_term_login_token", ]) - hs.get_handlers().auth_handler = self.mock_handler + self.hs.get_handlers().auth_handler = self.mock_handler @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): - """ - Returns: - The user doess not exist in this case so it will register and log it in - """ duration_ms = 200 local_part = "someone" display_name = "someone" @@ -65,3 +62,22 @@ class RegistrationTestCase(unittest.TestCase): local_part, display_name, duration_ms) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') + + @defer.inlineCallbacks + def test_if_user_exists(self): + store = self.hs.get_datastore() + frank = UserID.from_string("@frank:test") + yield store.register( + user_id=frank.to_string(), + token="jkv;g498752-43gj['eamb!-5", + password_hash=None) + duration_ms = 200 + local_part = "frank" + display_name = "Frank" + user_id = "@frank:test" + mock_token = self.mock_handler.generate_short_term_login_token + mock_token.return_value = 'secret' + result_user_id, result_token = yield self.handler.get_or_create_user( + local_part, display_name, duration_ms) + self.assertEquals(result_user_id, user_id) + self.assertEquals(result_token, 'secret') From 989bdc9e569e6ca414369e90e7b88e2f1d99c753 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 23 May 2016 19:24:11 +0100 Subject: [PATCH 011/414] Tune email notifs to make them quieter: * After initial 10 minute window, only alert every 24h for room notifs * Reset room state after 6h of idleness * Synchronise throttles for messages sent in the same notif, so the 24 hourly notifs 'line up' * Fix the email subjects to say what triggered the notification * Order the rooms in reverse activity order in the email, so the 'reason' room should always come first --- synapse/push/emailpusher.py | 26 +++++++++++++------- synapse/push/mailer.py | 48 +++++++++++++++++++++++++++++-------- 2 files changed, 56 insertions(+), 18 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index b4b728adc5..a72cba8306 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -32,12 +32,19 @@ DELAY_BEFORE_MAIL_MS = 10 * 60 * 1000 # Each room maintains its own throttle counter, but each new mail notification # sends the pending notifications for all rooms. THROTTLE_START_MS = 10 * 60 * 1000 -THROTTLE_MAX_MS = 24 * 60 * 60 * 1000 # (2 * 60 * 1000) * (2 ** 11) # ~3 days -THROTTLE_MULTIPLIER = 6 # 10 mins, 1 hour, 6 hours, 24 hours +THROTTLE_MAX_MS = 24 * 60 * 60 * 1000 # 24h +# THROTTLE_MULTIPLIER = 6 # 10 mins, 1 hour, 6 hours, 24 hours +THROTTLE_MULTIPLIER = 144 # 10 mins, 24 hours - i.e. jump straight to 1 day # If no event triggers a notification for this long after the previous, # the throttle is released. -THROTTLE_RESET_AFTER_MS = (2 * 60 * 1000) * (2 ** 11) # ~3 days +# 12 hours - a gap of 12 hours in conversation is surely enough to merit a new +# notification when things get going again... +THROTTLE_RESET_AFTER_MS = (12 * 60 * 60 * 1000) + +# does each email include all unread notifs, or just the ones which have happened +# since the last mail? +INCLUDE_ALL_UNREAD_NOTIFS = True class EmailPusher(object): @@ -126,8 +133,9 @@ class EmailPusher(object): up logging, measures and guards against multiple instances of it being run. """ + start = 0 if INCLUDE_ALL_UNREAD_NOTIFS else self.last_stream_ordering unprocessed = yield self.store.get_unread_push_actions_for_user_in_range( - self.user_id, self.last_stream_ordering, self.max_stream_ordering + self.user_id, start, self.max_stream_ordering ) soonest_due_at = None @@ -150,7 +158,6 @@ class EmailPusher(object): # we then consider all previously outstanding notifications # to be delivered. - # debugging: reason = { 'room_id': push_action['room_id'], 'now': self.clock.time_msec(), @@ -165,9 +172,12 @@ class EmailPusher(object): yield self.save_last_stream_ordering_and_success(max([ ea['stream_ordering'] for ea in unprocessed ])) - yield self.sent_notif_update_throttle( - push_action['room_id'], push_action - ) + + # we update the throttle on all the possible unprocessed push actions + for ea in unprocessed: + yield self.sent_notif_update_throttle( + ea['room_id'], ea + ) break else: if soonest_due_at is None or should_notify_at < soonest_due_at: diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index c2c2ca3fa7..8a9e9895ba 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -45,7 +45,10 @@ MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \ MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..." MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..." MESSAGES_IN_ROOM = "There are some messages on %(app)s for you in the %(room)s room..." -MESSAGES_IN_ROOMS = "Here are some messages on %(app)s you may have missed..." +MESSAGES_IN_ROOM_AND_OTHERS = \ + "You have messages on %(app)s in the %(room)s room and others..." +MESSAGES_FROM_PERSON_AND_OTHERS = \ + "You have messages on %(app)s from %(person)s and others..." INVITE_FROM_PERSON_TO_ROOM = "%(person)s has invited you to join the " \ "%(room)s room on %(app)s..." INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..." @@ -128,9 +131,14 @@ class Mailer(object): state_by_room[room_id] = room_state # Run at most 3 of these at once: sync does 10 at a time but email - # notifs are much realtime than sync so we can afford to wait a bit. + # notifs are much less realtime than sync so we can afford to wait a bit. yield concurrently_execute(_fetch_room_state, rooms_in_order, 3) + # actually sort our so-called rooms_in_order list, most recent room first + rooms_in_order = rooms_in_order.sort( + key=lambda r: -notifs_by_room[r]['received_ts'] + ) + rooms = [] for r in rooms_in_order: @@ -139,12 +147,12 @@ class Mailer(object): ) rooms.append(roomvars) - summary_text = self.make_summary_text( - notifs_by_room, state_by_room, notif_events, user_id + reason['room_name'] = calculate_room_name( + state_by_room[reason['room_id']], user_id, fallback_to_members=True ) - reason['room_name'] = calculate_room_name( - state_by_room[reason['room_id']], user_id, fallback_to_members=False + summary_text = self.make_summary_text( + notifs_by_room, state_by_room, notif_events, user_id, reason ) template_vars = { @@ -296,7 +304,8 @@ class Mailer(object): return messagevars - def make_summary_text(self, notifs_by_room, state_by_room, notif_events, user_id): + def make_summary_text(self, notifs_by_room, state_by_room, + notif_events, user_id, reason): if len(notifs_by_room) == 1: # Only one room has new stuff room_id = notifs_by_room.keys()[0] @@ -371,9 +380,28 @@ class Mailer(object): } else: # Stuff's happened in multiple different rooms - return MESSAGES_IN_ROOMS % { - "app": self.app_name, - } + + # ...but we still refer to the 'reason' room which triggered the mail + if reason['room_name'] is not None: + return MESSAGES_IN_ROOM_AND_OTHERS % { + "room": reason['room_name'], + "app": self.app_name, + } + else: + # If the reason room doesn't have a name, say who the messages + # are from explicitly to avoid, "messages in the Bob room" + sender_ids = list(set([ + notif_events[n['event_id']].sender + for n in notifs_by_room[reason['room_id']] + ])) + + return MESSAGES_FROM_PERSON_AND_OTHERS % { + "person": descriptor_from_member_events([ + state_by_room[reason['room_id']][("m.room.member", s)] + for s in sender_ids + ]), + "app": self.app_name, + } def make_room_link(self, room_id): # need /beta for Universal Links to work on iOS From 88ea5ab2c37345c1266db1446afa6d6472b7c9fe Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 23 May 2016 19:33:45 +0100 Subject: [PATCH 012/414] consistency is the better part of valour --- synapse/push/mailer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 8a9e9895ba..766888ca79 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -44,7 +44,7 @@ MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \ "in the %s room..." MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..." MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..." -MESSAGES_IN_ROOM = "There are some messages on %(app)s for you in the %(room)s room..." +MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..." MESSAGES_IN_ROOM_AND_OTHERS = \ "You have messages on %(app)s in the %(room)s room and others..." MESSAGES_FROM_PERSON_AND_OTHERS = \ From d4378825811eaee75acaa88ba6c9769af3159eb1 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 23 May 2016 22:54:32 +0100 Subject: [PATCH 013/414] fix debug text --- res/templates/mail.css | 5 +++++ res/templates/notif_mail.html | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/res/templates/mail.css b/res/templates/mail.css index f2b5e84abc..5ab3e1b06d 100644 --- a/res/templates/mail.css +++ b/res/templates/mail.css @@ -145,6 +145,11 @@ pre, code { text-decoration: none; } +.debug { + font-size: 10px; + color: #888; +} + .footer { margin-top: 20px; text-align: center; diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html index dc13398df1..1146ee133b 100644 --- a/res/templates/notif_mail.html +++ b/res/templates/notif_mail.html @@ -30,7 +30,7 @@ {% include 'room.html' with context %} {% endfor %} From cb8a321bdd35278fc1214d2dd2c1a8163ce25871 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 23 May 2016 22:54:56 +0100 Subject: [PATCH 014/414] fix NPE in room ordering --- synapse/push/mailer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 766888ca79..d8a0c35c79 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -135,8 +135,8 @@ class Mailer(object): yield concurrently_execute(_fetch_room_state, rooms_in_order, 3) # actually sort our so-called rooms_in_order list, most recent room first - rooms_in_order = rooms_in_order.sort( - key=lambda r: -notifs_by_room[r]['received_ts'] + rooms_in_order.sort( + key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0) ) rooms = [] From 680f1d9387f48ced9902f4e7d54758ab6a0aa9b0 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 23 May 2016 22:55:11 +0100 Subject: [PATCH 015/414] catch thinko in presentable names --- synapse/util/presentable_names.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/synapse/util/presentable_names.py b/synapse/util/presentable_names.py index 3efa8a8206..a6866f6117 100644 --- a/synapse/util/presentable_names.py +++ b/synapse/util/presentable_names.py @@ -14,6 +14,9 @@ # limitations under the License. import re +import logging + +logger = logging.getLogger(__name__) # intentionally looser than what aliases we allow to be registered since # other HSes may allow aliases that we would not @@ -105,13 +108,21 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True): # or inbound invite, or outbound 3PID invite. if all_members[0].sender == user_id: if "m.room.third_party_invite" in room_state_bytype: - third_party_invites = room_state_bytype["m.room.third_party_invite"] + third_party_invites = ( + room_state_bytype["m.room.third_party_invite"].values() + ) + if len(third_party_invites) > 0: # technically third party invite events are not member # events, but they are close enough - return "Inviting %s" ( - descriptor_from_member_events(third_party_invites) - ) + + # FIXME: no they're not - they look nothing like a member; + # they have a great big encrypted thing as their name to + # prevent leaking the 3PID name... + # return "Inviting %s" % ( + # descriptor_from_member_events(third_party_invites) + # ) + return "Inviting email address" else: return ALL_ALONE else: From 137e6a45577d5850ef6936670791af12c2fe74d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 09:43:35 +0100 Subject: [PATCH 016/414] Shuffle things room --- synapse/handlers/sync.py | 70 +++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bc6d6af133..6b7c6a436e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -499,6 +499,10 @@ class SyncHandler(object): @defer.inlineCallbacks def generate_sync_result(self, sync_config, since_token=None, full_state=False): + # NB: The now_token gets changed by some of the generate_sync_* methods, + # this is due to some of the underlying streams not supporting the ability + # to query up to a given point. + # Always use the `now_token` in `SyncResultBuilder` now_token = yield self.event_sources.get_current_token() sync_result_builer = SyncResultBuilder( @@ -511,9 +515,10 @@ class SyncHandler(object): sync_result_builer ) - newly_joined_rooms, newly_joined_users = yield self.generate_sync_entry_for_rooms( + res = yield self.generate_sync_entry_for_rooms( sync_result_builer, account_data_by_room ) + newly_joined_rooms, newly_joined_users = res yield self.generate_sync_entry_for_presence( sync_result_builer, newly_joined_rooms, newly_joined_users @@ -631,7 +636,7 @@ class SyncHandler(object): if sync_result_builer.since_token: res = yield self._get_rooms_changed(sync_result_builer, ignored_users) - joined, invited, archived, newly_joined_rooms = res + room_entries, invited, newly_joined_rooms = res tags_by_room = yield self.store.get_updated_tags( user_id, @@ -639,13 +644,12 @@ class SyncHandler(object): ) else: res = yield self._get_all_rooms(sync_result_builer, ignored_users) - joined, invited, archived, newly_joined_rooms = res + room_entries, invited, newly_joined_rooms = res tags_by_room = yield self.store.get_tags_for_user(user_id) - def handle_joined(room_entry): + def handle_room_entries(room_entry): return self._generate_room_entry( - "joined", sync_result_builer, ignored_users, room_entry, @@ -655,21 +659,7 @@ class SyncHandler(object): always_include=sync_result_builer.full_state, ) - yield concurrently_execute(handle_joined, joined, 10) - - def handle_archived(room_entry): - return self._generate_room_entry( - "archived", - sync_result_builer, - ignored_users, - room_entry, - ephemeral=ephemeral_by_room.get(room_entry.room_id, []), - tags=tags_by_room.get(room_entry.room_id), - account_data=account_data_by_room.get(room_entry.room_id, {}), - always_include=sync_result_builer.full_state, - ) - - yield concurrently_execute(handle_archived, archived, 10) + yield concurrently_execute(handle_room_entries, room_entries, 10) sync_result_builer.invited.extend(invited) @@ -711,7 +701,7 @@ class SyncHandler(object): mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) newly_joined_rooms = [] - archived = [] + room_entries = [] invited = [] for room_id, events in mem_change_events_by_room_id.items(): non_joins = [e for e in events if e.membership != Membership.JOIN] @@ -759,8 +749,9 @@ class SyncHandler(object): if since_token and since_token.is_after(leave_token): continue - archived.append(RoomSyncResultBuilder( + room_entries.append(RoomSyncResultBuilder( room_id=room_id, + rtype="archived", events=None, newly_joined=room_id in newly_joined_rooms, full_state=False, @@ -778,7 +769,6 @@ class SyncHandler(object): limit=timeline_limit + 1, ) - joined = [] # We loop through all room ids, even if there are no new events, in case # there are non room events taht we need to notify about. for room_id in joined_room_ids: @@ -789,8 +779,9 @@ class SyncHandler(object): prev_batch_token = now_token.copy_and_replace("room_key", start_key) - joined.append(RoomSyncResultBuilder( + room_entries.append(RoomSyncResultBuilder( room_id=room_id, + rtype="joined", events=events, newly_joined=room_id in newly_joined_rooms, full_state=False, @@ -798,8 +789,9 @@ class SyncHandler(object): upto_token=prev_batch_token, )) else: - joined.append(RoomSyncResultBuilder( + room_entries.append(RoomSyncResultBuilder( room_id=room_id, + rtype="joined", events=[], newly_joined=room_id in newly_joined_rooms, full_state=False, @@ -807,7 +799,7 @@ class SyncHandler(object): upto_token=since_token, )) - defer.returnValue((joined, invited, archived, newly_joined_rooms)) + defer.returnValue((room_entries, invited, newly_joined_rooms)) @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builer, ignored_users): @@ -825,14 +817,14 @@ class SyncHandler(object): membership_list=membership_list ) - joined = [] + room_entries = [] invited = [] - archived = [] for event in room_list: if event.membership == Membership.JOIN: - joined.append(RoomSyncResultBuilder( + room_entries.append(RoomSyncResultBuilder( room_id=event.room_id, + rtype="joined", events=None, newly_joined=False, full_state=True, @@ -857,8 +849,9 @@ class SyncHandler(object): leave_token = now_token.copy_and_replace( "room_key", "s%d" % (event.stream_ordering,) ) - archived.append(RoomSyncResultBuilder( + room_entries.append(RoomSyncResultBuilder( room_id=event.room_id, + rtype="archived", events=None, newly_joined=False, full_state=True, @@ -866,10 +859,10 @@ class SyncHandler(object): upto_token=leave_token, )) - defer.returnValue((joined, invited, archived, [])) + defer.returnValue((room_entries, invited, [])) @defer.inlineCallbacks - def _generate_room_entry(self, room_type, sync_result_builer, ignored_users, + def _generate_room_entry(self, sync_result_builer, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False): since_token = sync_result_builer.since_token @@ -892,7 +885,7 @@ class SyncHandler(object): now_token=upto_token, since_token=since_token, recents=events, - newly_joined_room=newly_joined, # FIXME + newly_joined_room=newly_joined, ) account_data_events = [] @@ -922,7 +915,7 @@ class SyncHandler(object): full_state=full_state ) - if room_type == "joined": + if room_builder.rtype == "joined": unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, @@ -943,7 +936,7 @@ class SyncHandler(object): unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builer.joined.append(room_sync) - elif room_type == "archived": + elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, @@ -952,6 +945,8 @@ class SyncHandler(object): ) if room_sync or always_include: sync_result_builer.archived.append(room_sync) + else: + raise Exception("Unrecognized rtype: %r", room_builder.rtype) def _action_has_highlight(actions): @@ -1017,9 +1012,10 @@ class SyncResultBuilder(object): class RoomSyncResultBuilder(object): - def __init__(self, room_id, events, newly_joined, full_state, since_token, - upto_token): + def __init__(self, room_id, rtype, events, newly_joined, full_state, + since_token, upto_token): self.room_id = room_id + self.rtype = rtype self.events = events self.newly_joined = newly_joined self.full_state = full_state From 84f94e4cbbd8c79d867503159b564aaf233e46d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 10:14:53 +0100 Subject: [PATCH 017/414] Add comments --- synapse/handlers/sync.py | 112 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 105 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6b7c6a436e..271dd4c147 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -499,6 +499,17 @@ class SyncHandler(object): @defer.inlineCallbacks def generate_sync_result(self, sync_config, since_token=None, full_state=False): + """Generates a sync result. + + Args: + sync_config (SyncConfig) + since_token (StreamToken) + full_state (bool) + + Returns: + Deferred(SyncResult) + """ + # NB: The now_token gets changed by some of the generate_sync_* methods, # this is due to some of the underlying streams not supporting the ability # to query up to a given point. @@ -511,16 +522,16 @@ class SyncHandler(object): now_token=now_token, ) - account_data_by_room = yield self.generate_sync_entry_for_account_data( + account_data_by_room = yield self._generate_sync_entry_for_account_data( sync_result_builer ) - res = yield self.generate_sync_entry_for_rooms( + res = yield self._generate_sync_entry_for_rooms( sync_result_builer, account_data_by_room ) newly_joined_rooms, newly_joined_users = res - yield self.generate_sync_entry_for_presence( + yield self._generate_sync_entry_for_presence( sync_result_builer, newly_joined_rooms, newly_joined_users ) @@ -534,7 +545,16 @@ class SyncHandler(object): )) @defer.inlineCallbacks - def generate_sync_entry_for_account_data(self, sync_result_builer): + def _generate_sync_entry_for_account_data(self, sync_result_builer): + """Generates the account data portion of the sync response. Populates + `sync_result_builer` with the result. + + Args: + sync_result_builer(SyncResultBuilder) + + Returns: + Deferred(dict): A dictionary containing the per room account data. + """ sync_config = sync_result_builer.sync_config user_id = sync_result_builer.sync_config.user.to_string() since_token = sync_result_builer.since_token @@ -575,8 +595,18 @@ class SyncHandler(object): defer.returnValue(account_data_by_room) @defer.inlineCallbacks - def generate_sync_entry_for_presence(self, sync_result_builer, newly_joined_rooms, - newly_joined_users): + def _generate_sync_entry_for_presence(self, sync_result_builer, newly_joined_rooms, + newly_joined_users): + """Generates the presence portion of the sync response. Populates the + `sync_result_builer` with the result. + + Args: + sync_result_builer(SyncResultBuilder) + newly_joined_rooms(list): List of rooms that the user has joined + since the last sync (or empty if an initial sync) + newly_joined_users(list): List of users that have joined rooms + since the last sync (or empty if an initial sync) + """ now_token = sync_result_builer.now_token sync_config = sync_result_builer.sync_config user = sync_result_builer.sync_config.user @@ -617,7 +647,18 @@ class SyncHandler(object): sync_result_builer.presence = presence @defer.inlineCallbacks - def generate_sync_entry_for_rooms(self, sync_result_builer, account_data_by_room): + def _generate_sync_entry_for_rooms(self, sync_result_builer, account_data_by_room): + """Generates the rooms portion of the sync response. Populates the + `sync_result_builer` with the result. + + Args: + sync_result_builer(SyncResultBuilder) + account_data_by_room(dict): Dictionary of per room account data + + Returns: + Deferred(tuple): Returns a 2-tuple of + `(newly_joined_rooms, newly_joined_users)` + """ user_id = sync_result_builer.sync_config.user.to_string() now_token, ephemeral_by_room = yield self.ephemeral_by_room( @@ -676,6 +717,16 @@ class SyncHandler(object): @defer.inlineCallbacks def _get_rooms_changed(self, sync_result_builer, ignored_users): + """Gets the the changes that have happened since the last sync. + + Args: + sync_result_builer(SyncResultBuilder) + ignored_users(set(str)): Set of users ignored by user. + + Returns: + Deferred(tuple): Returns a tuple of the form: + `([RoomSyncResultBuilder], [InvitedSyncResult], newly_joined_rooms)` + """ user_id = sync_result_builer.sync_config.user.to_string() since_token = sync_result_builer.since_token now_token = sync_result_builer.now_token @@ -803,6 +854,17 @@ class SyncHandler(object): @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builer, ignored_users): + """Returns entries for all rooms for the user. + + Args: + sync_result_builer(SyncResultBuilder) + ignored_users(set(str)): Set of users ignored by user. + + Returns: + Deferred(tuple): Returns a tuple of the form: + `([RoomSyncResultBuilder], [InvitedSyncResult], [])` + """ + user_id = sync_result_builer.sync_config.user.to_string() since_token = sync_result_builer.since_token now_token = sync_result_builer.now_token @@ -865,6 +927,20 @@ class SyncHandler(object): def _generate_room_entry(self, sync_result_builer, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False): + """Populates the `joined` and `archived` section of `sync_result_builer` + based on the `room_builder`. + + Args: + sync_result_builer(SyncResultBuilder) + ignored_users(set(str)): Set of users ignored by user. + room_builder(RoomSyncResultBuilder) + ephemeral(list): List of new ephemeral events for room + tags(list): List of *all* tags for room, or None if there has been + no change. + account_data(list): List of new account data for room + always_include(bool): Always include this room in the sync response, + even if empty. + """ since_token = sync_result_builer.since_token now_token = sync_result_builer.now_token sync_config = sync_result_builer.sync_config @@ -998,7 +1074,15 @@ def _calculate_state(timeline_contains, timeline_start, previous, current): class SyncResultBuilder(object): + "Used to help build up a new SyncResult for a user" def __init__(self, sync_config, full_state, since_token, now_token): + """ + Args: + sync_config(SyncConfig) + full_state(bool): The full_state flag as specified by user + since_token(StreamToken): The token supplied by user, or None. + now_token(StreamToken): The token to sync up to. + """ self.sync_config = sync_config self.full_state = full_state self.since_token = since_token @@ -1012,8 +1096,22 @@ class SyncResultBuilder(object): class RoomSyncResultBuilder(object): + """Stores information needed to create either a `JoinedSyncResult` or + `ArchivedSyncResult`. + """ def __init__(self, room_id, rtype, events, newly_joined, full_state, since_token, upto_token): + """ + Args: + room_id(str) + rtype(str): One of `"joined"` or `"archived"` + events(list): List of events to include in the room, (more events + may be added when generating result). + newly_joined(bool): If the user has newly joined the room + full_state(bool): Whether the full state should be sent in result + since_token(StreamToken): Earliest point to return events from, or None + upto_token(StreamToken): Latest point to return events from. + """ self.room_id = room_id self.rtype = rtype self.events = events From 79bea8ab9a4c4cb4a0907784603698731424c00a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 10:22:24 +0100 Subject: [PATCH 018/414] Inline function. Make load_filtered_recents private --- synapse/handlers/sync.py | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 271dd4c147..3aa4432d68 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -203,17 +203,6 @@ class SyncHandler(object): rules = format_push_rules_for_user(user, rawrules, enabled_map) defer.returnValue(rules) - def account_data_for_user(self, account_data): - account_data_events = [] - - for account_data_type, content in account_data.items(): - account_data_events.append({ - "type": account_data_type, - "content": content, - }) - - return account_data_events - @defer.inlineCallbacks def ephemeral_by_room(self, sync_config, now_token, since_token=None): """Get the ephemeral events for each room the user is in @@ -277,8 +266,8 @@ class SyncHandler(object): defer.returnValue((now_token, ephemeral_by_room)) @defer.inlineCallbacks - def load_filtered_recents(self, room_id, sync_config, now_token, - since_token=None, recents=None, newly_joined_room=False): + def _load_filtered_recents(self, room_id, sync_config, now_token, + since_token=None, recents=None, newly_joined_room=False): """ Returns: a Deferred TimelineBatch @@ -586,9 +575,10 @@ class SyncHandler(object): sync_config.user ) - account_data_for_user = sync_config.filter_collection.filter_account_data( - self.account_data_for_user(account_data) - ) + account_data_for_user = sync_config.filter_collection.filter_account_data([ + {"type": account_data_type, "content": content} + for account_data_type, content in account_data.items() + ]) sync_result_builer.account_data = account_data_for_user @@ -956,7 +946,7 @@ class SyncHandler(object): since_token = room_builder.since_token upto_token = room_builder.upto_token - batch = yield self.load_filtered_recents( + batch = yield self._load_filtered_recents( room_id, sync_config, now_token=upto_token, since_token=since_token, From be2c67738640ff88fcf63701c9e3d82afc385e47 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 10:53:03 +0100 Subject: [PATCH 019/414] Spell builder correctly --- synapse/handlers/sync.py | 126 +++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3aa4432d68..12a4cc8b57 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -505,50 +505,50 @@ class SyncHandler(object): # Always use the `now_token` in `SyncResultBuilder` now_token = yield self.event_sources.get_current_token() - sync_result_builer = SyncResultBuilder( + sync_result_builder = SyncResultBuilder( sync_config, full_state, since_token=since_token, now_token=now_token, ) account_data_by_room = yield self._generate_sync_entry_for_account_data( - sync_result_builer + sync_result_builder ) res = yield self._generate_sync_entry_for_rooms( - sync_result_builer, account_data_by_room + sync_result_builder, account_data_by_room ) newly_joined_rooms, newly_joined_users = res yield self._generate_sync_entry_for_presence( - sync_result_builer, newly_joined_rooms, newly_joined_users + sync_result_builder, newly_joined_rooms, newly_joined_users ) defer.returnValue(SyncResult( - presence=sync_result_builer.presence, - account_data=sync_result_builer.account_data, - joined=sync_result_builer.joined, - invited=sync_result_builer.invited, - archived=sync_result_builer.archived, - next_batch=sync_result_builer.now_token, + presence=sync_result_builder.presence, + account_data=sync_result_builder.account_data, + joined=sync_result_builder.joined, + invited=sync_result_builder.invited, + archived=sync_result_builder.archived, + next_batch=sync_result_builder.now_token, )) @defer.inlineCallbacks - def _generate_sync_entry_for_account_data(self, sync_result_builer): + def _generate_sync_entry_for_account_data(self, sync_result_builder): """Generates the account data portion of the sync response. Populates - `sync_result_builer` with the result. + `sync_result_builder` with the result. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) Returns: Deferred(dict): A dictionary containing the per room account data. """ - sync_config = sync_result_builer.sync_config - user_id = sync_result_builer.sync_config.user.to_string() - since_token = sync_result_builer.since_token + sync_config = sync_result_builder.sync_config + user_id = sync_result_builder.sync_config.user.to_string() + since_token = sync_result_builder.since_token - if since_token and not sync_result_builer.full_state: + if since_token and not sync_result_builder.full_state: account_data, account_data_by_room = ( yield self.store.get_updated_account_data_for_user( user_id, @@ -580,31 +580,31 @@ class SyncHandler(object): for account_data_type, content in account_data.items() ]) - sync_result_builer.account_data = account_data_for_user + sync_result_builder.account_data = account_data_for_user defer.returnValue(account_data_by_room) @defer.inlineCallbacks - def _generate_sync_entry_for_presence(self, sync_result_builer, newly_joined_rooms, + def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms, newly_joined_users): """Generates the presence portion of the sync response. Populates the - `sync_result_builer` with the result. + `sync_result_builder` with the result. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) newly_joined_rooms(list): List of rooms that the user has joined since the last sync (or empty if an initial sync) newly_joined_users(list): List of users that have joined rooms since the last sync (or empty if an initial sync) """ - now_token = sync_result_builer.now_token - sync_config = sync_result_builer.sync_config - user = sync_result_builer.sync_config.user + now_token = sync_result_builder.now_token + sync_config = sync_result_builder.sync_config + user = sync_result_builder.sync_config.user presence_source = self.event_sources.sources["presence"] - since_token = sync_result_builer.since_token - if since_token and not sync_result_builer.full_state: + since_token = sync_result_builder.since_token + if since_token and not sync_result_builder.full_state: presence_key = since_token.presence_key else: presence_key = None @@ -614,7 +614,7 @@ class SyncHandler(object): from_key=presence_key, is_guest=sync_config.is_guest, ) - sync_result_builer.now_token = now_token.copy_and_replace( + sync_result_builder.now_token = now_token.copy_and_replace( "presence_key", presence_key ) @@ -634,27 +634,27 @@ class SyncHandler(object): presence ) - sync_result_builer.presence = presence + sync_result_builder.presence = presence @defer.inlineCallbacks - def _generate_sync_entry_for_rooms(self, sync_result_builer, account_data_by_room): + def _generate_sync_entry_for_rooms(self, sync_result_builder, account_data_by_room): """Generates the rooms portion of the sync response. Populates the - `sync_result_builer` with the result. + `sync_result_builder` with the result. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) account_data_by_room(dict): Dictionary of per room account data Returns: Deferred(tuple): Returns a 2-tuple of `(newly_joined_rooms, newly_joined_users)` """ - user_id = sync_result_builer.sync_config.user.to_string() + user_id = sync_result_builder.sync_config.user.to_string() now_token, ephemeral_by_room = yield self.ephemeral_by_room( - sync_result_builer.sync_config, sync_result_builer.now_token + sync_result_builder.sync_config, sync_result_builder.now_token ) - sync_result_builer.now_token = now_token + sync_result_builder.now_token = now_token ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id, @@ -665,38 +665,38 @@ class SyncHandler(object): else: ignored_users = frozenset() - if sync_result_builer.since_token: - res = yield self._get_rooms_changed(sync_result_builer, ignored_users) + if sync_result_builder.since_token: + res = yield self._get_rooms_changed(sync_result_builder, ignored_users) room_entries, invited, newly_joined_rooms = res tags_by_room = yield self.store.get_updated_tags( user_id, - sync_result_builer.since_token.account_data_key, + sync_result_builder.since_token.account_data_key, ) else: - res = yield self._get_all_rooms(sync_result_builer, ignored_users) + res = yield self._get_all_rooms(sync_result_builder, ignored_users) room_entries, invited, newly_joined_rooms = res tags_by_room = yield self.store.get_tags_for_user(user_id) def handle_room_entries(room_entry): return self._generate_room_entry( - sync_result_builer, + sync_result_builder, ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), account_data=account_data_by_room.get(room_entry.room_id, {}), - always_include=sync_result_builer.full_state, + always_include=sync_result_builder.full_state, ) yield concurrently_execute(handle_room_entries, room_entries, 10) - sync_result_builer.invited.extend(invited) + sync_result_builder.invited.extend(invited) # Now we want to get any newly joined users newly_joined_users = set() - for joined_sync in sync_result_builer.joined: + for joined_sync in sync_result_builder.joined: it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values()) for event in it: if event.type == EventTypes.Member: @@ -706,21 +706,21 @@ class SyncHandler(object): defer.returnValue((newly_joined_rooms, newly_joined_users)) @defer.inlineCallbacks - def _get_rooms_changed(self, sync_result_builer, ignored_users): + def _get_rooms_changed(self, sync_result_builder, ignored_users): """Gets the the changes that have happened since the last sync. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. Returns: Deferred(tuple): Returns a tuple of the form: `([RoomSyncResultBuilder], [InvitedSyncResult], newly_joined_rooms)` """ - user_id = sync_result_builer.sync_config.user.to_string() - since_token = sync_result_builer.since_token - now_token = sync_result_builer.now_token - sync_config = sync_result_builer.sync_config + user_id = sync_result_builder.sync_config.user.to_string() + since_token = sync_result_builder.since_token + now_token = sync_result_builder.now_token + sync_config = sync_result_builder.sync_config assert since_token @@ -843,11 +843,11 @@ class SyncHandler(object): defer.returnValue((room_entries, invited, newly_joined_rooms)) @defer.inlineCallbacks - def _get_all_rooms(self, sync_result_builer, ignored_users): + def _get_all_rooms(self, sync_result_builder, ignored_users): """Returns entries for all rooms for the user. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. Returns: @@ -855,10 +855,10 @@ class SyncHandler(object): `([RoomSyncResultBuilder], [InvitedSyncResult], [])` """ - user_id = sync_result_builer.sync_config.user.to_string() - since_token = sync_result_builer.since_token - now_token = sync_result_builer.now_token - sync_config = sync_result_builer.sync_config + user_id = sync_result_builder.sync_config.user.to_string() + since_token = sync_result_builder.since_token + now_token = sync_result_builder.now_token + sync_config = sync_result_builder.sync_config membership_list = ( Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN @@ -914,14 +914,14 @@ class SyncHandler(object): defer.returnValue((room_entries, invited, [])) @defer.inlineCallbacks - def _generate_room_entry(self, sync_result_builer, ignored_users, + def _generate_room_entry(self, sync_result_builder, ignored_users, room_builder, ephemeral, tags, account_data, always_include=False): - """Populates the `joined` and `archived` section of `sync_result_builer` + """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: - sync_result_builer(SyncResultBuilder) + sync_result_builder(SyncResultBuilder) ignored_users(set(str)): Set of users ignored by user. room_builder(RoomSyncResultBuilder) ephemeral(list): List of new ephemeral events for room @@ -931,9 +931,9 @@ class SyncHandler(object): always_include(bool): Always include this room in the sync response, even if empty. """ - since_token = sync_result_builer.since_token - now_token = sync_result_builer.now_token - sync_config = sync_result_builer.sync_config + since_token = sync_result_builder.since_token + now_token = sync_result_builder.now_token + sync_config = sync_result_builder.sync_config room_id = room_builder.room_id events = room_builder.events @@ -941,7 +941,7 @@ class SyncHandler(object): full_state = ( room_builder.full_state or newly_joined - or sync_result_builer.full_state + or sync_result_builder.full_state ) since_token = room_builder.since_token upto_token = room_builder.upto_token @@ -1001,7 +1001,7 @@ class SyncHandler(object): unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] - sync_result_builer.joined.append(room_sync) + sync_result_builder.joined.append(room_sync) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, @@ -1010,7 +1010,7 @@ class SyncHandler(object): account_data=account_data, ) if room_sync or always_include: - sync_result_builer.archived.append(room_sync) + sync_result_builder.archived.append(room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype) From b08ad0389e0e412798f32d9f5656db483238173d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 11:04:35 +0100 Subject: [PATCH 020/414] Only include non-offline presence in initial sync --- synapse/handlers/sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 12a4cc8b57..8143171c11 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -606,13 +606,16 @@ class SyncHandler(object): since_token = sync_result_builder.since_token if since_token and not sync_result_builder.full_state: presence_key = since_token.presence_key + include_offline = True else: presence_key = None + include_offline = False presence, presence_key = yield presence_source.get_new_events( user=user, from_key=presence_key, is_guest=sync_config.is_guest, + include_offline=include_offline, ) sync_result_builder.now_token = now_token.copy_and_replace( "presence_key", presence_key From 1c5ed2a19ba6da4478c54df072e623598c0ea60d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 11:21:34 +0100 Subject: [PATCH 021/414] Only work out newly_joined_users for incremental sync --- synapse/handlers/sync.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 8143171c11..476dcf38e2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -699,12 +699,15 @@ class SyncHandler(object): # Now we want to get any newly joined users newly_joined_users = set() - for joined_sync in sync_result_builder.joined: - it = itertools.chain(joined_sync.timeline.events, joined_sync.state.values()) - for event in it: - if event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - newly_joined_users.add(event.state_key) + if sync_result_builder.since_token: + for joined_sync in sync_result_builder.joined: + it = itertools.chain( + joined_sync.timeline.events, joined_sync.state.values() + ) + for event in it: + if event.type == EventTypes.Member: + if event.membership == Membership.JOIN: + newly_joined_users.add(event.state_key) defer.returnValue((newly_joined_rooms, newly_joined_users)) From 69003039973169097592359f3f34fc32c5bbaeb0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 11:44:55 +0100 Subject: [PATCH 022/414] Don't send down all ephemeral events --- synapse/handlers/sync.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 476dcf38e2..6f7dd45ef3 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -655,7 +655,9 @@ class SyncHandler(object): user_id = sync_result_builder.sync_config.user.to_string() now_token, ephemeral_by_room = yield self.ephemeral_by_room( - sync_result_builder.sync_config, sync_result_builder.now_token + sync_result_builder.sync_config, + now_token=sync_result_builder.now_token, + since_token=sync_result_builder.since_token, ) sync_result_builder.now_token = now_token From faad233ea61cfff2c377609fa1d3c64d39f8a039 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 24 May 2016 14:00:43 +0100 Subject: [PATCH 023/414] Change short circuit path --- synapse/handlers/sync.py | 42 +++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6f7dd45ef3..3b89582d79 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -273,23 +273,14 @@ class SyncHandler(object): a Deferred TimelineBatch """ with Measure(self.clock, "load_filtered_recents"): - filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = max(timeline_limit * filtering_factor, 10) - max_repeat = 5 # Only try a few times per room, otherwise - room_key = now_token.room_key - end_key = room_key if recents is None or newly_joined_room or timeline_limit < len(recents): limited = True else: limited = False - if since_token: - if not now_token.is_after(since_token): - limited = False - - if recents is not None: + if recents: recents = sync_config.filter_collection.filter_room_timeline(recents) recents = yield filter_events_for_client( self.store, @@ -299,6 +290,19 @@ class SyncHandler(object): else: recents = [] + if not limited: + defer.returnValue(TimelineBatch( + events=recents, + prev_batch=now_token, + limited=False + )) + + filtering_factor = 2 + load_limit = max(timeline_limit * filtering_factor, 10) + max_repeat = 5 # Only try a few times per room, otherwise + room_key = now_token.room_key + end_key = room_key + since_key = None if since_token and not newly_joined_room: since_key = since_token.room_key @@ -939,18 +943,24 @@ class SyncHandler(object): always_include(bool): Always include this room in the sync response, even if empty. """ - since_token = sync_result_builder.since_token - now_token = sync_result_builder.now_token - sync_config = sync_result_builder.sync_config - - room_id = room_builder.room_id - events = room_builder.events newly_joined = room_builder.newly_joined full_state = ( room_builder.full_state or newly_joined or sync_result_builder.full_state ) + events = room_builder.events + + # We want to shortcut out as early as possible. + if not (always_include or account_data or ephemeral or full_state): + if events == [] and tags is None: + return + + since_token = sync_result_builder.since_token + now_token = sync_result_builder.now_token + sync_config = sync_result_builder.sync_config + + room_id = room_builder.room_id since_token = room_builder.since_token upto_token = room_builder.upto_token From 95b86e6dada4135c2b30932809084510adb3a076 Mon Sep 17 00:00:00 2001 From: Matrix Date: Wed, 18 May 2016 01:07:44 +0100 Subject: [PATCH 024/414] tweak mail notifs --- res/templates/notif_mail.html | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html index 1146ee133b..8aee68b591 100644 --- a/res/templates/notif_mail.html +++ b/res/templates/notif_mail.html @@ -30,18 +30,20 @@ {% include 'room.html' with context %} {% endfor %} From b007ee46065ed25ed1ca248cf47c19ee7f4b56c2 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 24 May 2016 15:11:28 +0100 Subject: [PATCH 025/414] Check for presence of 'avatar_url' key --- synapse/push/mailer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index d8a0c35c79..3ae92d1574 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -259,7 +259,9 @@ class Mailer(object): sender_state_event = room_state[("m.room.member", event.sender)] sender_name = name_from_member_event(sender_state_event) - sender_avatar_url = sender_state_event.content["avatar_url"] + sender_avatar_url = None + if "avatar_url" in sender_state_event.content: + sender_avatar_url = sender_state_event.content["avatar_url"] # 'hash' for deterministically picking default images: use # sender_hash % the number of default images to choose from From cc84f7cb8e7f379ceb23a8bc719465b989199b20 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 May 2016 10:35:15 +0100 Subject: [PATCH 026/414] Send down correct error response if user not found --- synapse/handlers/auth.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 68d0d78fc6..26c865e171 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -18,7 +18,7 @@ from twisted.internet import defer from ._base import BaseHandler from synapse.api.constants import LoginType from synapse.types import UserID -from synapse.api.errors import AuthError, LoginError, Codes +from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError from synapse.util.async import run_on_reactor from twisted.web.client import PartialDownloadError @@ -563,7 +563,12 @@ class AuthHandler(BaseHandler): except_access_token_ids = [requester.access_token_id] if requester else [] - yield self.store.user_set_password_hash(user_id, password_hash) + try: + yield self.store.user_set_password_hash(user_id, password_hash) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Unknown user", Codes.NOT_FOUND) + raise e yield self.store.user_delete_access_tokens( user_id, except_access_token_ids ) From 85b992f621743adc45945f67b1b1bbf4aed198fb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 May 2016 10:44:44 +0100 Subject: [PATCH 027/414] Fix to allow start with postgres --- synapse/storage/__init__.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 49feb77779..8581796b7e 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -17,7 +17,7 @@ from twisted.internet import defer from .appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore ) -from ._base import Cache +from ._base import Cache, LoggingTransaction from .directory import DirectoryStore from .events import EventsStore from .presence import PresenceStore, UserPresenceState @@ -174,7 +174,12 @@ class DataStore(RoomMemberStore, RoomStore, prefilled_cache=push_rules_prefill, ) - cur = db_conn.cursor() + cur = LoggingTransaction( + db_conn.cursor(), + name="_find_stream_orderings_for_times_txn", + database_engine=self.database_engine, + after_callbacks=[] + ) self._find_stream_orderings_for_times_txn(cur) cur.close() From 8b5dbee47ebafc8243abb25a1771bd1983d48306 Mon Sep 17 00:00:00 2001 From: Jimmy Cuadra Date: Sun, 29 May 2016 02:31:56 -0700 Subject: [PATCH 028/414] Alter phrasing to clarify where info is stored. A user on #matrix:matrix.org was confused by the phrasing of the first sentence in the paragraph and couldn't tell whether it was saying that the homeserver stored the data or the clients did. This change splits it into two sentences to make the subject of each sentence clear. --- README.rst | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index 95e7257115..722ff9f11f 100644 --- a/README.rst +++ b/README.rst @@ -58,12 +58,13 @@ the spec in the context of a codebase and let you run your own homeserver and generally help bootstrap the ecosystem. In Matrix, every user runs one or more Matrix clients, which connect through to -a Matrix homeserver which stores all their personal chat history and user -account information - much as a mail client connects through to an IMAP/SMTP -server. Just like email, you can either run your own Matrix homeserver and -control and own your own communications and history or use one hosted by -someone else (e.g. matrix.org) - there is no single point of control or -mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc. +a Matrix homeserver. The homeserver stores all their personal chat history and +user account information - much as a mail client connects through to an +IMAP/SMTP server. Just like email, you can either run your own Matrix +homeserver and control and own your own communications and history or use one +hosted by someone else (e.g. matrix.org) - there is no single point of control +or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, +etc. Synapse ships with two basic demo Matrix clients: webclient (a basic group chat web client demo implemented in AngularJS) and cmdclient (a basic Python From 887c6e6f052e1dc5e61a0b4bade8e7bd3a63e275 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 11:05:16 +0100 Subject: [PATCH 029/414] Split out the room list handler So I can use it from federation bits without pulling in all the handlers. --- synapse/handlers/__init__.py | 3 +-- synapse/rest/client/v1/room.py | 2 +- synapse/server.py | 5 +++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index 9442ae6f1d..0ac5d3da3a 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -17,7 +17,7 @@ from synapse.appservice.scheduler import AppServiceScheduler from synapse.appservice.api import ApplicationServiceApi from .register import RegistrationHandler from .room import ( - RoomCreationHandler, RoomListHandler, RoomContextHandler, + RoomCreationHandler, RoomContextHandler, ) from .room_member import RoomMemberHandler from .message import MessageHandler @@ -50,7 +50,6 @@ class Handlers(object): self.event_handler = EventHandler(hs) self.federation_handler = FederationHandler(hs) self.profile_handler = ProfileHandler(hs) - self.room_list_handler = RoomListHandler(hs) self.directory_handler = DirectoryHandler(hs) self.admin_handler = AdminHandler(hs) self.receipts_handler = ReceiptsHandler(hs) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 644aa4e513..2d22bbdaa3 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -279,7 +279,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - handler = self.handlers.room_list_handler + handler = self.hs.get_room_list_handler() data = yield handler.get_public_room_list() defer.returnValue((200, data)) diff --git a/synapse/server.py b/synapse/server.py index 01f828819f..bfd5608b7d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -30,6 +30,7 @@ from synapse.handlers import Handlers from synapse.handlers.presence import PresenceHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import TypingHandler +from synapse.handlers.room import RoomListHandler from synapse.state import StateHandler from synapse.storage import DataStore from synapse.util import Clock @@ -84,6 +85,7 @@ class HomeServer(object): 'presence_handler', 'sync_handler', 'typing_handler', + 'room_list_handler', 'notifier', 'distributor', 'client_resource', @@ -179,6 +181,9 @@ class HomeServer(object): def build_sync_handler(self): return SyncHandler(self) + def build_room_list_handler(self): + return RoomListHandler(self) + def build_event_sources(self): return EventSources(self) From e1625d62a8313ff34662aa72ae4d0574e540cc2b Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 11:55:57 +0100 Subject: [PATCH 030/414] Add federation room list servlet --- synapse/config/server.py | 6 +++ synapse/federation/transport/server.py | 65 +++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index 0b5f462e44..8d554d749d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -29,6 +29,7 @@ class ServerConfig(Config): self.user_agent_suffix = config.get("user_agent_suffix") self.use_frozen_dicts = config.get("use_frozen_dicts", True) self.public_baseurl = config.get("public_baseurl") + self.secondary_directory_servers = config.get("secondary_directory_servers", []) if self.public_baseurl is not None: if self.public_baseurl[-1] != '/': @@ -156,6 +157,11 @@ class ServerConfig(Config): # hard limit. soft_file_limit: 0 + # A list of other Home Servers to fetch the public room directory from + # and include in the public room directory of this home server + # secondary_directory_servers: + # - matrix.org + # List of ports that Synapse should listen on, their purpose and their # configuration. listeners: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 5b6c7d11dd..b82f72fd57 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -134,10 +134,11 @@ class Authenticator(object): class BaseFederationServlet(object): - def __init__(self, handler, authenticator, ratelimiter, server_name): + def __init__(self, handler, authenticator, ratelimiter, server_name, room_list_handler): self.handler = handler self.authenticator = authenticator self.ratelimiter = ratelimiter + self.room_list_handler = room_list_handler def _wrap(self, code): authenticator = self.authenticator @@ -491,6 +492,66 @@ class OpenIdUserInfo(BaseFederationServlet): def _wrap(self, code): return code +class PublicRoomList(BaseFederationServlet): + """ + Fetch the public room list for this server. + + This API returns information in the same format as /publicRooms on the + client API, but will only ever include local public rooms and hence is + intended for consumption by other home servers. + + GET /publicRooms HTTP/1.1 + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "chunk": [ + { + "aliases": [ + "#test:localhost" + ], + "guest_can_join": false, + "name": "test room", + "num_joined_members": 3, + "room_id": "!whkydVegtvatLfXmPN:localhost", + "world_readable": false + } + ], + "end": "END", + "start": "START" + } + """ + + PATH = "/publicRooms" + + @defer.inlineCallbacks + def on_GET(self, request): + data = yield self.room_list_handler.get_public_room_list() + defer.returnValue((200, data)) + + token = parse_string(request, "access_token") + if token is None: + defer.returnValue((401, { + "errcode": "M_MISSING_TOKEN", "error": "Access Token required" + })) + return + + user_id = yield self.handler.on_openid_userinfo(token) + + if user_id is None: + defer.returnValue((401, { + "errcode": "M_UNKNOWN_TOKEN", + "error": "Access Token unknown or expired" + })) + + defer.returnValue((200, {"sub": user_id})) + + # Avoid doing remote HS authorization checks which are done by default by + # BaseFederationServlet. + def _wrap(self, code): + return code + SERVLET_CLASSES = ( FederationSendServlet, @@ -513,6 +574,7 @@ SERVLET_CLASSES = ( FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, OpenIdUserInfo, + PublicRoomList, ) @@ -523,4 +585,5 @@ def register_servlets(hs, resource, authenticator, ratelimiter): authenticator=authenticator, ratelimiter=ratelimiter, server_name=hs.hostname, + room_list_handler=hs.get_room_list_handler(), ).register(resource) From 70ecb415f553e5de86833034ce184a8a905b7ed5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 12:00:54 +0100 Subject: [PATCH 031/414] Fix c+p fail --- synapse/federation/transport/server.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index b82f72fd57..f23c02efde 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -530,23 +530,6 @@ class PublicRoomList(BaseFederationServlet): data = yield self.room_list_handler.get_public_room_list() defer.returnValue((200, data)) - token = parse_string(request, "access_token") - if token is None: - defer.returnValue((401, { - "errcode": "M_MISSING_TOKEN", "error": "Access Token required" - })) - return - - user_id = yield self.handler.on_openid_userinfo(token) - - if user_id is None: - defer.returnValue((401, { - "errcode": "M_UNKNOWN_TOKEN", - "error": "Access Token unknown or expired" - })) - - defer.returnValue((200, {"sub": user_id})) - # Avoid doing remote HS authorization checks which are done by default by # BaseFederationServlet. def _wrap(self, code): From e5b0bbcd3381e581ecf9876760bbe36c66fcb4fe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 13:46:58 +0100 Subject: [PATCH 032/414] Add caches to bulk_get_push_rules* --- synapse/push/bulk_push_rule_evaluator.py | 8 +++++--- synapse/storage/push_rule.py | 16 ++++++++++------ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 25e13b3423..25f2fb9da4 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -29,6 +29,7 @@ logger = logging.getLogger(__name__) def decode_rule_json(rule): + rule = dict(rule) rule['conditions'] = json.loads(rule['conditions']) rule['actions'] = json.loads(rule['actions']) return rule @@ -39,6 +40,8 @@ def _get_rules(room_id, user_ids, store): rules_by_user = yield store.bulk_get_push_rules(user_ids) rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids) + rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} + rules_by_user = { uid: list_with_base_rules([ decode_rule_json(rule_list) @@ -51,11 +54,10 @@ def _get_rules(room_id, user_ids, store): # fetch disabled rules, but this won't account for any server default # rules the user has disabled, so we need to do this too. for uid in user_ids: - if uid not in rules_enabled_by_user: + user_enabled_map = rules_enabled_by_user.get(uid) + if not user_enabled_map: continue - user_enabled_map = rules_enabled_by_user[uid] - for i, rule in enumerate(rules_by_user[uid]): rule_id = rule['rule_id'] diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index d2bf7f2aec..f285f59afd 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -14,7 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList from twisted.internet import defer import logging @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) class PushRuleStore(SQLBaseStore): - @cachedInlineCallbacks() + @cachedInlineCallbacks(lru=True) def get_push_rules_for_user(self, user_id): rows = yield self._simple_select_list( table="push_rules", @@ -44,7 +44,7 @@ class PushRuleStore(SQLBaseStore): defer.returnValue(rows) - @cachedInlineCallbacks() + @cachedInlineCallbacks(lru=True) def get_push_rules_enabled_for_user(self, user_id): results = yield self._simple_select_list( table="push_rules_enable", @@ -60,7 +60,8 @@ class PushRuleStore(SQLBaseStore): r['rule_id']: False if r['enabled'] == 0 else True for r in results }) - @defer.inlineCallbacks + @cachedList(cached_method_name="get_push_rules_for_user", + list_name="user_ids", num_args=1, inlineCallbacks=True) def bulk_get_push_rules(self, user_ids): if not user_ids: defer.returnValue({}) @@ -75,13 +76,16 @@ class PushRuleStore(SQLBaseStore): desc="bulk_get_push_rules", ) - rows.sort(key=lambda e: (-e["priority_class"], -e["priority"])) + rows.sort( + key=lambda row: (-int(row["priority_class"]), -int(row["priority"])) + ) for row in rows: results.setdefault(row['user_name'], []).append(row) defer.returnValue(results) - @defer.inlineCallbacks + @cachedList(cached_method_name="get_push_rules_enabled_for_user", + list_name="user_ids", num_args=1, inlineCallbacks=True) def bulk_get_push_rules_enabled(self, user_ids): if not user_ids: defer.returnValue({}) From c626fc576a87f401c594cbb070d5b0000e45b4e1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 31 May 2016 13:53:48 +0100 Subject: [PATCH 033/414] Move the AS handler out of the Handlers object. Access it directly from the homeserver itself. It already wasn't inheriting from BaseHandler storing it on the Handlers object was already somewhat dubious. --- synapse/appservice/scheduler.py | 14 +++++++------- synapse/handlers/__init__.py | 11 ----------- synapse/handlers/appservice.py | 15 +++++---------- synapse/handlers/directory.py | 3 ++- synapse/notifier.py | 10 ++++------ synapse/server.py | 15 +++++++++++++++ tests/handlers/test_appservice.py | 6 +++--- 7 files changed, 36 insertions(+), 38 deletions(-) diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 47a4e9f864..9afc8fd754 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -56,22 +56,22 @@ import logging logger = logging.getLogger(__name__) -class AppServiceScheduler(object): +class ApplicationServiceScheduler(object): """ Public facing API for this module. Does the required DI to tie the components together. This also serves as the "event_pool", which in this case is a simple array. """ - def __init__(self, clock, store, as_api): - self.clock = clock - self.store = store - self.as_api = as_api + def __init__(self, hs): + self.clock = hs.get_clock() + self.store = hs.get_datastore() + self.as_api = hs.get_application_service_api() def create_recoverer(service, callback): - return _Recoverer(clock, store, as_api, service, callback) + return _Recoverer(self.clock, self.store, self.as_api, service, callback) self.txn_ctrl = _TransactionController( - clock, store, as_api, create_recoverer + self.clock, self.store, self.as_api, create_recoverer ) self.queuer = _ServiceQueuer(self.txn_ctrl) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index 0ac5d3da3a..c0069e23d6 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.appservice.scheduler import AppServiceScheduler -from synapse.appservice.api import ApplicationServiceApi from .register import RegistrationHandler from .room import ( RoomCreationHandler, RoomContextHandler, @@ -26,7 +24,6 @@ from .federation import FederationHandler from .profile import ProfileHandler from .directory import DirectoryHandler from .admin import AdminHandler -from .appservice import ApplicationServicesHandler from .auth import AuthHandler from .identity import IdentityHandler from .receipts import ReceiptsHandler @@ -53,14 +50,6 @@ class Handlers(object): self.directory_handler = DirectoryHandler(hs) self.admin_handler = AdminHandler(hs) self.receipts_handler = ReceiptsHandler(hs) - asapi = ApplicationServiceApi(hs) - self.appservice_handler = ApplicationServicesHandler( - hs, asapi, AppServiceScheduler( - clock=hs.get_clock(), - store=hs.get_datastore(), - as_api=asapi - ) - ) self.auth_handler = AuthHandler(hs) self.identity_handler = IdentityHandler(hs) self.search_handler = SearchHandler(hs) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 75fc74c797..051ccdb380 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -17,7 +17,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.appservice import ApplicationService -from synapse.types import UserID import logging @@ -35,16 +34,13 @@ def log_failure(failure): ) -# NB: Purposefully not inheriting BaseHandler since that contains way too much -# setup code which this handler does not need or use. This makes testing a lot -# easier. class ApplicationServicesHandler(object): - def __init__(self, hs, appservice_api, appservice_scheduler): + def __init__(self, hs): self.store = hs.get_datastore() - self.hs = hs - self.appservice_api = appservice_api - self.scheduler = appservice_scheduler + self.is_mine_id = hs.is_mine_id + self.appservice_api = hs.get_application_service_api() + self.scheduler = hs.get_application_service_scheduler() self.started_scheduler = False @defer.inlineCallbacks @@ -169,8 +165,7 @@ class ApplicationServicesHandler(object): @defer.inlineCallbacks def _is_unknown_user(self, user_id): - user = UserID.from_string(user_id) - if not self.hs.is_mine(user): + if not self.is_mine_id(user_id): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. defer.returnValue(False) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 8eeb225811..4bea7f2b19 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -33,6 +33,7 @@ class DirectoryHandler(BaseHandler): super(DirectoryHandler, self).__init__(hs) self.state = hs.get_state_handler() + self.appservice_handler = hs.get_application_service_handler() self.federation = hs.get_replication_layer() self.federation.register_query_handler( @@ -281,7 +282,7 @@ class DirectoryHandler(BaseHandler): ) if not result: # Query AS to see if it exists - as_handler = self.hs.get_handlers().appservice_handler + as_handler = self.appservice_handler result = yield as_handler.query_room_alias_exists(room_alias) defer.returnValue(result) diff --git a/synapse/notifier.py b/synapse/notifier.py index 33b79c0ec7..cbec4d30ae 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -140,8 +140,6 @@ class Notifier(object): UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def __init__(self, hs): - self.hs = hs - self.user_to_user_stream = {} self.room_to_user_streams = {} self.appservice_to_user_streams = {} @@ -151,6 +149,8 @@ class Notifier(object): self.pending_new_room_events = [] self.clock = hs.get_clock() + self.appservice_handler = hs.get_application_service_handler() + self.state_handler = hs.get_state_handler() hs.get_distributor().observe( "user_joined_room", self._user_joined_room @@ -232,9 +232,7 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - self.hs.get_handlers().appservice_handler.notify_interested_services( - event - ) + self.appservice_handler.notify_interested_services(event) app_streams = set() @@ -449,7 +447,7 @@ class Notifier(object): @defer.inlineCallbacks def _is_world_readable(self, room_id): - state = yield self.hs.get_state_handler().get_current_state( + state = yield self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility ) diff --git a/synapse/server.py b/synapse/server.py index bfd5608b7d..7cf22b1eea 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -22,6 +22,8 @@ from twisted.web.client import BrowserLikePolicyForHTTPS from twisted.enterprise import adbapi +from synapse.appservice.scheduler import ApplicationServiceScheduler +from synapse.appservice.api import ApplicationServiceApi from synapse.federation import initialize_http_replication from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.notifier import Notifier @@ -31,6 +33,7 @@ from synapse.handlers.presence import PresenceHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import TypingHandler from synapse.handlers.room import RoomListHandler +from synapse.handlers.appservice import ApplicationServicesHandler from synapse.state import StateHandler from synapse.storage import DataStore from synapse.util import Clock @@ -86,6 +89,9 @@ class HomeServer(object): 'sync_handler', 'typing_handler', 'room_list_handler', + 'application_service_api', + 'application_service_scheduler', + 'application_service_handler', 'notifier', 'distributor', 'client_resource', @@ -184,6 +190,15 @@ class HomeServer(object): def build_room_list_handler(self): return RoomListHandler(self) + def build_application_service_api(self): + return ApplicationServiceApi(self) + + def build_application_service_scheduler(self): + return ApplicationServiceScheduler(self) + + def build_application_service_handler(self): + return ApplicationServicesHandler(self) + def build_event_sources(self): return EventSources(self) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 7ddbbb9b4a..a884c95f8d 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -30,9 +30,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): self.mock_scheduler = Mock() hs = Mock() hs.get_datastore = Mock(return_value=self.mock_store) - self.handler = ApplicationServicesHandler( - hs, self.mock_as_api, self.mock_scheduler - ) + hs.get_application_service_api = Mock(return_value=self.mock_as_api) + hs.get_application_service_scheduler = Mock(return_value=self.mock_scheduler) + self.handler = ApplicationServicesHandler(hs) @defer.inlineCallbacks def test_notify_interested_services(self): From aefd2d1cbc83f2cdf5d45d4e9cd2a176747ac38a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 15:32:32 +0100 Subject: [PATCH 034/414] Cache get_event_reference_hashes --- synapse/storage/signatures.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index b10f2a5787..ea6823f18d 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -19,17 +19,24 @@ from ._base import SQLBaseStore from unpaddedbase64 import encode_base64 from synapse.crypto.event_signing import compute_event_reference_hash +from synapse.util.caches.descriptors import cached, cachedList class SignatureStore(SQLBaseStore): """Persistence for event signatures and hashes""" + @cached(lru=True) + def get_event_reference_hash(self, event_id): + return self._get_event_reference_hashes_txn(event_id) + + @cachedList(cached_method_name="get_event_reference_hash", + list_name="event_ids", num_args=1) def get_event_reference_hashes(self, event_ids): def f(txn): - return [ - self._get_event_reference_hashes_txn(txn, ev) - for ev in event_ids - ] + return { + event_id: self._get_event_reference_hashes_txn(txn, event_id) + for event_id in event_ids + } return self.runInteraction( "get_event_reference_hashes", @@ -41,15 +48,15 @@ class SignatureStore(SQLBaseStore): hashes = yield self.get_event_reference_hashes( event_ids ) - hashes = [ - { + hashes = { + e_id: { k: encode_base64(v) for k, v in h.items() if k == "sha256" } - for h in hashes - ] + for e_id, h in hashes.items() + } - defer.returnValue(zip(event_ids, hashes)) + defer.returnValue(hashes.items()) def _get_event_reference_hashes_txn(self, txn, event_id): """Get all the hashes for a given PDU. From 4efa389299bfef694af41b642ba9623a8be5df93 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 15:37:53 +0100 Subject: [PATCH 035/414] Fix GET /push_rules --- synapse/rest/client/v1/push_rule.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 02d837ee6a..c135353aab 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -132,6 +132,9 @@ class PushRuleRestServlet(ClientV1RestServlet): enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) + rawrules = {k: v for k, v in rawrules.item() if k is not None} + enabled_map = {k: v for k, v in enabled_map.item() if k is not None} + rules = format_push_rules_for_user(requester.user, rawrules, enabled_map) path = request.postpath[1:] From cca0093fa96cfec0566ff790ef990fad8b6763bb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 15:44:08 +0100 Subject: [PATCH 036/414] Change fix --- synapse/rest/client/v1/push_rule.py | 3 --- synapse/storage/push_rule.py | 10 ++++++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c135353aab..02d837ee6a 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -132,9 +132,6 @@ class PushRuleRestServlet(ClientV1RestServlet): enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) - rawrules = {k: v for k, v in rawrules.item() if k is not None} - enabled_map = {k: v for k, v in enabled_map.item() if k is not None} - rules = format_push_rules_for_user(requester.user, rawrules, enabled_map) path = request.postpath[1:] diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f285f59afd..65c20e8900 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -66,7 +66,10 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - results = {} + results = { + user_id: [] + for user_id in user_ids + } rows = yield self._simple_select_many_batch( table="push_rules", @@ -90,7 +93,10 @@ class PushRuleStore(SQLBaseStore): if not user_ids: defer.returnValue({}) - results = {} + results = { + user_id: [] + for user_id in user_ids + } rows = yield self._simple_select_many_batch( table="push_rules_enable", From 1d4ee854e21626f64c945610e467f7e761534424 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 15:45:53 +0100 Subject: [PATCH 037/414] Fix typo --- synapse/storage/push_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 65c20e8900..216a8bf69c 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -94,7 +94,7 @@ class PushRuleStore(SQLBaseStore): defer.returnValue({}) results = { - user_id: [] + user_id: {} for user_id in user_ids } From c8c5bf950a27e00e3e9ae57b98f38cab03cdc3c9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 31 May 2016 17:10:40 +0100 Subject: [PATCH 038/414] Fix synapse/storage/schema/delta/30/as_users.py --- synapse/storage/schema/delta/30/as_users.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/schema/delta/30/as_users.py b/synapse/storage/schema/delta/30/as_users.py index b417e3ac08..5b7d8d1ab5 100644 --- a/synapse/storage/schema/delta/30/as_users.py +++ b/synapse/storage/schema/delta/30/as_users.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from synapse.storage.appservice import ApplicationServiceStore +from synapse.config.appservice import load_appservices logger = logging.getLogger(__name__) @@ -38,7 +38,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): logger.warning("Could not get app_service_config_files from config") pass - appservices = ApplicationServiceStore.load_appservices( + appservices = load_appservices( config.server_name, config_files ) From d240796dedcfae1f6929c1501e7e335df417cfaf Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 17:20:07 +0100 Subject: [PATCH 039/414] Basic, un-cached support for secondary_directory_servers --- synapse/federation/federation_client.py | 21 ++++++++++++++++ synapse/federation/transport/client.py | 12 +++++++++ synapse/federation/transport/server.py | 2 +- synapse/handlers/room.py | 33 ++++++++++++++++++++++++- synapse/rest/client/v1/room.py | 3 ++- 5 files changed, 68 insertions(+), 3 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 37ee469fa2..ba8d71c050 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -24,6 +24,7 @@ from synapse.api.errors import ( CodeMessageException, HttpResponseException, SynapseError, ) from synapse.util import unwrapFirstError +from synapse.util.async import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.logutils import log_function from synapse.events import FrozenEvent @@ -550,6 +551,26 @@ class FederationClient(FederationBase): raise RuntimeError("Failed to send to any server.") + @defer.inlineCallbacks + def get_public_rooms(self, destinations): + results_by_server = {} + + @defer.inlineCallbacks + def _get_result(s): + if s == self.server_name: + defer.returnValue() + + try: + result = yield self.transport_layer.get_public_rooms(s) + results_by_server[s] = result + except: + logger.exception("Error getting room list from server %r", s) + + + yield concurrently_execute(_get_result, destinations, 3) + + defer.returnValue(results_by_server) + @defer.inlineCallbacks def query_auth(self, destination, room_id, event_id, local_auth): """ diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index cd2841c4db..ebb698e278 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -224,6 +224,18 @@ class TransportLayerClient(object): defer.returnValue(response) + @defer.inlineCallbacks + @log_function + def get_public_rooms(self, remote_server): + path = PREFIX + "/publicRooms" + + response = yield self.client.get_json( + destination=remote_server, + path=path, + ) + + defer.returnValue(response) + @defer.inlineCallbacks @log_function def exchange_third_party_invite(self, destination, room_id, event_dict): diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index f23c02efde..da9e7a326d 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -527,7 +527,7 @@ class PublicRoomList(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, request): - data = yield self.room_list_handler.get_public_room_list() + data = yield self.room_list_handler.get_local_public_room_list() defer.returnValue((200, data)) # Avoid doing remote HS authorization checks which are done by default by diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3d63b3c513..b0aa9fb511 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -345,7 +345,7 @@ class RoomListHandler(BaseHandler): super(RoomListHandler, self).__init__(hs) self.response_cache = ResponseCache() - def get_public_room_list(self): + def get_local_public_room_list(self): result = self.response_cache.get(()) if not result: result = self.response_cache.set((), self._get_public_room_list()) @@ -427,6 +427,37 @@ class RoomListHandler(BaseHandler): # FIXME (erikj): START is no longer a valid value defer.returnValue({"start": "START", "end": "END", "chunk": results}) + @defer.inlineCallbacks + def get_aggregated_public_room_list(self): + """ + Get the public room list from this server and the servers + specified in the secondary_directory_servers config option. + XXX: Pagination... + """ + federated_by_server = yield self.hs.get_replication_layer().get_public_rooms( + self.hs.config.secondary_directory_servers + ) + public_rooms = yield self.get_local_public_room_list() + + # keep track of which room IDs we've seen so we can de-dup + room_ids = set() + + # tag all the ones in our list with our server name. + # Also add the them to the de-deping set + for room in public_rooms['chunk']: + room["server_name"] = self.hs.hostname + room_ids.add(room["room_id"]) + + # Now add the results from federation + for server_name, server_result in federated_by_server.items(): + for room in server_result["chunk"]: + if room["room_id"] not in room_ids: + room["server_name"] = server_name + public_rooms["chunk"].append(room) + room_ids.add(room["room_id"]) + + defer.returnValue(public_rooms) + class RoomContextHandler(BaseHandler): @defer.inlineCallbacks diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 2d22bbdaa3..db52a1fc39 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -280,7 +280,8 @@ class PublicRoomListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): handler = self.hs.get_room_list_handler() - data = yield handler.get_public_room_list() + data = yield handler.get_aggregated_public_room_list() + defer.returnValue((200, data)) From 963e3ed2828f6d1e704678af971ceffff3076115 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 17:22:53 +0100 Subject: [PATCH 040/414] Apparently I am not permitted to have two blank lines here --- synapse/federation/federation_client.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index ba8d71c050..d835c1b038 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -566,7 +566,6 @@ class FederationClient(FederationBase): except: logger.exception("Error getting room list from server %r", s) - yield concurrently_execute(_get_result, destinations, 3) defer.returnValue(results_by_server) From dea9f20f8ce105624933a819d7949d31b69aa141 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 May 2016 17:24:30 +0100 Subject: [PATCH 041/414] Force boolean --- synapse/storage/push_rule.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 216a8bf69c..ebb97c8474 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -106,7 +106,8 @@ class PushRuleStore(SQLBaseStore): desc="bulk_get_push_rules_enabled", ) for row in rows: - results.setdefault(row['user_name'], {})[row['rule_id']] = row['enabled'] + enabled = bool(row['enabled']) + results.setdefault(row['user_name'], {})[row['rule_id']] = enabled defer.returnValue(results) @defer.inlineCallbacks From 6ca4d3ae9add2cf2bab3cf074072d7a7bb0b6553 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 17:24:50 +0100 Subject: [PATCH 042/414] Add vector.im to default secondary_directory_servers and add comment explaining it's not a permanent solution --- synapse/config/server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/config/server.py b/synapse/config/server.py index 8d554d749d..c2d8f8a52f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -159,8 +159,12 @@ class ServerConfig(Config): # A list of other Home Servers to fetch the public room directory from # and include in the public room directory of this home server + # This is a temporary stopgap solution to populate new server with a + # list of rooms until there exists a good solution of a decentralized + # room directory. # secondary_directory_servers: # - matrix.org + # - vector.im # List of ports that Synapse should listen on, their purpose and their # configuration. From 2a449fec4d03b7739269e832683905a18459e4c3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 31 May 2016 18:27:23 +0100 Subject: [PATCH 043/414] Add cache to remote room lists Poll for updates from remote servers, waiting for the poll if there's no cache entry. --- synapse/handlers/room.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b0aa9fb511..77063b021a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -36,6 +36,8 @@ import string logger = logging.getLogger(__name__) +REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000 + id_server_scheme = "https://" @@ -344,6 +346,12 @@ class RoomListHandler(BaseHandler): def __init__(self, hs): super(RoomListHandler, self).__init__(hs) self.response_cache = ResponseCache() + self.remote_list_request_cache = ResponseCache() + self.remote_list_cache = {} + self.fetch_looping_call = hs.get_clock().looping_call( + self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL + ) + self.fetch_all_remote_lists() def get_local_public_room_list(self): result = self.response_cache.get(()) @@ -427,6 +435,14 @@ class RoomListHandler(BaseHandler): # FIXME (erikj): START is no longer a valid value defer.returnValue({"start": "START", "end": "END", "chunk": results}) + @defer.inlineCallbacks + def fetch_all_remote_lists(self): + deferred = self.hs.get_replication_layer().get_public_rooms( + self.hs.config.secondary_directory_servers + ) + self.remote_list_request_cache.set((), deferred) + yield deferred + @defer.inlineCallbacks def get_aggregated_public_room_list(self): """ @@ -434,9 +450,19 @@ class RoomListHandler(BaseHandler): specified in the secondary_directory_servers config option. XXX: Pagination... """ - federated_by_server = yield self.hs.get_replication_layer().get_public_rooms( - self.hs.config.secondary_directory_servers - ) + # We return the results from out cache which is updated by a looping call, + # unless we're missing a cache entry, in which case wait for the result + # of the fetch if there's one in progress. If not, omit that server. + wait = False + for s in self.hs.config.secondary_directory_servers: + if s not in self.remote_list_cache: + logger.warn("No cached room list from %s: waiting for fetch", s) + wait = True + break + + if wait and self.remote_list_request_cache.get(()): + yield self.remote_list_request_cache.get(()) + public_rooms = yield self.get_local_public_room_list() # keep track of which room IDs we've seen so we can de-dup @@ -449,7 +475,7 @@ class RoomListHandler(BaseHandler): room_ids.add(room["room_id"]) # Now add the results from federation - for server_name, server_result in federated_by_server.items(): + for server_name, server_result in self.remote_list_cache.items(): for room in server_result["chunk"]: if room["room_id"] not in room_ids: room["server_name"] = server_name From 58ee43d020804448fe2ae504072e3c2addae6eb1 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 31 May 2016 20:28:42 +0100 Subject: [PATCH 044/414] handle emotes & notices correctly in email notifs --- res/templates/notif.html | 6 +++++- res/templates/notif.txt | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/res/templates/notif.html b/res/templates/notif.html index 834840861e..88b921ca9c 100644 --- a/res/templates/notif.html +++ b/res/templates/notif.html @@ -17,11 +17,15 @@ {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} -
{{ message.sender_name }}
+
{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
{% endif %}
{% if message.msgtype == "m.text" %} {{ message.body_text_html }} + {% elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} {% elif message.msgtype == "m.image" %} {% elif message.msgtype == "m.file" %} diff --git a/res/templates/notif.txt b/res/templates/notif.txt index a3ddac80ce..a37bee9833 100644 --- a/res/templates/notif.txt +++ b/res/templates/notif.txt @@ -1,7 +1,11 @@ {% for message in notif.messages %} -{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) {% if message.msgtype == "m.text" %} {{ message.body_text_plain }} +{% elif message.msgtype == "m.emote" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.notice" %} +{{ message.body_text_plain }} {% elif message.msgtype == "m.image" %} {{ message.body_text_plain }} {% elif message.msgtype == "m.file" %} From 6ecb2ca4ec3fae8c6f2e837b4ec99cc6929de638 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Jun 2016 09:48:55 +0100 Subject: [PATCH 045/414] pep8 --- synapse/federation/transport/server.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index da9e7a326d..a1a334955f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -134,7 +134,8 @@ class Authenticator(object): class BaseFederationServlet(object): - def __init__(self, handler, authenticator, ratelimiter, server_name, room_list_handler): + def __init__(self, handler, authenticator, ratelimiter, server_name, + room_list_handler): self.handler = handler self.authenticator = authenticator self.ratelimiter = ratelimiter @@ -492,6 +493,7 @@ class OpenIdUserInfo(BaseFederationServlet): def _wrap(self, code): return code + class PublicRoomList(BaseFederationServlet): """ Fetch the public room list for this server. From 43db0d9f6a314679bd25b82354e5c469e7a010b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 10:31:09 +0100 Subject: [PATCH 046/414] Add get_users_with_read_receipts_in_room cache --- synapse/push/bulk_push_rule_evaluator.py | 8 +++---- synapse/storage/receipts.py | 28 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 25f2fb9da4..1e5c4b073c 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -87,13 +87,13 @@ def evaluator_for_event(event, hs, store): all_in_room = yield store.get_users_in_room(room_id) all_in_room = set(all_in_room) - receipts = yield store.get_receipts_for_room(room_id, "m.read") + users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id) # any users with pushers must be ours: they have pushers user_ids = set(users_with_pushers) - for r in receipts: - if hs.is_mine_id(r['user_id']) and r['user_id'] in all_in_room: - user_ids.add(r['user_id']) + for uid in users_with_receipts: + if hs.is_mine_id(uid) and uid in all_in_room: + user_ids.add(uid) # if this event is an invite event, we may need to run rules for the user # who's been invited, otherwise they won't get told they've been invited diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index fdcf28f3e1..964f30dff7 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -34,6 +34,26 @@ class ReceiptsStore(SQLBaseStore): "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token() ) + @cachedInlineCallbacks() + def get_users_with_read_receipts_in_room(self, room_id): + receipts = yield self.get_receipts_for_room(room_id, "m.read") + defer.returnValue(set(r['user_id'] for r in receipts)) + + def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type, + user_id): + if receipt_type != "m.read": + return + + # Returns an ObservableDeferred + res = self.get_users_with_read_receipts_in_room.cache.get((room_id,), None) + + if res and res.called and user_id in res.result: + # We'd only be adding to the set, so no point invalidating if the + # user is already there + return + + self.get_users_with_read_receipts_in_room.invalidate((room_id,)) + @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): return self._simple_select_list( @@ -228,6 +248,10 @@ class ReceiptsStore(SQLBaseStore): txn.call_after( self.get_receipts_for_room.invalidate, (room_id, receipt_type) ) + txn.call_after( + self._invalidate_get_users_with_receipts_in_room, + room_id, receipt_type, user_id, + ) txn.call_after( self.get_receipts_for_user.invalidate, (user_id, receipt_type) ) @@ -373,6 +397,10 @@ class ReceiptsStore(SQLBaseStore): txn.call_after( self.get_receipts_for_room.invalidate, (room_id, receipt_type) ) + txn.call_after( + self._invalidate_get_users_with_receipts_in_room, + room_id, receipt_type, user_id, + ) txn.call_after( self.get_receipts_for_user.invalidate, (user_id, receipt_type) ) From 195254cae80f4748c3fc0ac3b46000047c2e6cc0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Jun 2016 11:14:16 +0100 Subject: [PATCH 047/414] Inject fake room list handler in tests Otherwise it tries to start the remote public room list updating looping call which breaks. --- tests/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/utils.py b/tests/utils.py index 59d985b5f2..006abedbc1 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -67,6 +67,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): version_string="Synapse/tests", database_engine=create_engine(config.database_config), get_db_conn=db_pool.get_db_conn, + room_list_handler=object(), **kargs ) hs.setup() @@ -75,6 +76,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): name, db_pool=None, datastore=datastore, config=config, version_string="Synapse/tests", database_engine=create_engine(config.database_config), + room_list_handler=object(), **kargs ) From d60eed07109f61ebe2120e1eb566e5bb7095fbad Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Jun 2016 11:45:43 +0100 Subject: [PATCH 048/414] Limit number of notifications in an email notification --- synapse/storage/event_push_actions.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 4dae51a172..940e11d7a2 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -119,7 +119,8 @@ class EventPushActionsStore(SQLBaseStore): @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range(self, user_id, min_stream_ordering, - max_stream_ordering=None): + max_stream_ordering=None, + limit=20): def get_after_receipt(txn): sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, " @@ -151,7 +152,8 @@ class EventPushActionsStore(SQLBaseStore): if max_stream_ordering is not None: sql += " AND ep.stream_ordering <= ?" args.append(max_stream_ordering) - sql += " ORDER BY ep.stream_ordering ASC" + sql += " ORDER BY ep.stream_ordering ASC LIMIT ?" + args.append(limit) txn.execute(sql, args) return txn.fetchall() after_read_receipt = yield self.runInteraction( From c8285564a3772db387fd5c94b1a82329dc320e36 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 11:08:45 +0100 Subject: [PATCH 049/414] Use state to calculate get_users_in_room --- synapse/push/action_generator.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 27 +++++++++------ synapse/storage/events.py | 3 -- synapse/storage/pusher.py | 42 ++++++++++++++++-------- synapse/storage/roommember.py | 3 -- 5 files changed, 47 insertions(+), 30 deletions(-) diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 9b208668b6..46e768e35c 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -40,7 +40,7 @@ class ActionGenerator: def handle_push_actions_for_event(self, event, context): with Measure(self.clock, "handle_push_actions_for_event"): bulk_evaluator = yield evaluator_for_event( - event, self.hs, self.store + event, self.hs, self.store, context.current_state ) actions_by_user = yield bulk_evaluator.action_for_event_by_user( diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 1e5c4b073c..8c59e59e03 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -21,7 +21,7 @@ from twisted.internet import defer from .baserules import list_with_base_rules from .push_rule_evaluator import PushRuleEvaluatorForEvent -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.visibility import filter_events_for_clients @@ -72,20 +72,24 @@ def _get_rules(room_id, user_ids, store): @defer.inlineCallbacks -def evaluator_for_event(event, hs, store): +def evaluator_for_event(event, hs, store, current_state): room_id = event.room_id - - # users in the room who have pushers need to get push rules run because - # that's how their pushers work - users_with_pushers = yield store.get_users_with_pushers_in_room(room_id) - # We also will want to generate notifs for other people in the room so # their unread countss are correct in the event stream, but to avoid # generating them for bot / AS users etc, we only do so for people who've # sent a read receipt into the room. - all_in_room = yield store.get_users_in_room(room_id) - all_in_room = set(all_in_room) + all_in_room = set( + e.state_key for e in current_state.values() + if e.type == EventTypes.Member and e.membership == Membership.JOIN + ) + + # users in the room who have pushers need to get push rules run because + # that's how their pushers work + if_users_with_pushers = yield store.get_if_users_have_pushers(all_in_room) + users_with_pushers = set( + uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher + ) users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id) @@ -143,7 +147,10 @@ class BulkPushRuleEvaluator: self.store, user_tuples, [event], {event.event_id: current_state} ) - room_members = yield self.store.get_users_in_room(self.room_id) + room_members = set( + e.state_key for e in current_state.values() + if e.type == EventTypes.Member and e.membership == Membership.JOIN + ) evaluator = PushRuleEvaluatorForEvent(event, len(room_members)) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 4655669ba0..2b3f79577b 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -342,9 +342,6 @@ class EventsStore(SQLBaseStore): txn.call_after(self._get_current_state_for_key.invalidate_all) txn.call_after(self.get_rooms_for_user.invalidate_all) txn.call_after(self.get_users_in_room.invalidate, (event.room_id,)) - txn.call_after( - self.get_users_with_pushers_in_room.invalidate, (event.room_id,) - ) txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,)) txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,)) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 9e8e2e2964..39d5349eaa 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -18,7 +18,7 @@ from twisted.internet import defer from canonicaljson import encode_canonical_json -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList import logging import simplejson as json @@ -135,19 +135,35 @@ class PusherStore(SQLBaseStore): "get_all_updated_pushers", get_all_updated_pushers_txn ) - @cachedInlineCallbacks(num_args=1) - def get_users_with_pushers_in_room(self, room_id): - users = yield self.get_users_in_room(room_id) - + @cachedInlineCallbacks(lru=True, num_args=1) + def get_if_user_has_pusher(self, user_id): result = yield self._simple_select_many_batch( table='pushers', - column='user_name', - iterable=users, - retcols=['user_name'], - desc='get_users_with_pushers_in_room' + keyvalues={ + 'user_name': 'user_id', + }, + retcol='user_name', + desc='get_if_user_has_pusher', + allow_none=True, ) - defer.returnValue([r['user_name'] for r in result]) + defer.returnValue(bool(result)) + + @cachedList(cached_method_name="get_if_user_has_pusher", + list_name="user_ids", num_args=1, inlineCallbacks=True) + def get_if_users_have_pushers(self, user_ids): + rows = yield self._simple_select_many_batch( + table='pushers', + column='user_name', + iterable=user_ids, + retcols=['user_name'], + desc='get_if_users_have_pushers' + ) + + result = {user_id: False for user_id in user_ids} + result.update({r['user_name']: True for r in rows}) + + defer.returnValue(result) @defer.inlineCallbacks def add_pusher(self, user_id, access_token, kind, app_id, @@ -178,16 +194,16 @@ class PusherStore(SQLBaseStore): }, ) if newly_inserted: - # get_users_with_pushers_in_room only cares if the user has + # get_if_user_has_pusher only cares if the user has # at least *one* pusher. - txn.call_after(self.get_users_with_pushers_in_room.invalidate_all) + txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,)) yield self.runInteraction("add_pusher", f) @defer.inlineCallbacks def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id): def delete_pusher_txn(txn, stream_id): - txn.call_after(self.get_users_with_pushers_in_room.invalidate_all) + txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,)) self._simple_delete_one_txn( txn, diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index face685ed2..41b395e07c 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -58,9 +58,6 @@ class RoomMemberStore(SQLBaseStore): txn.call_after(self.get_rooms_for_user.invalidate, (event.state_key,)) txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,)) txn.call_after(self.get_users_in_room.invalidate, (event.room_id,)) - txn.call_after( - self.get_users_with_pushers_in_room.invalidate, (event.room_id,) - ) txn.call_after( self._membership_stream_cache.entity_has_changed, event.state_key, event.internal_metadata.stream_ordering From 991af8b0d6406b633386384d823e5c3a9c2ceb8b Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Jun 2016 17:40:52 +0100 Subject: [PATCH 050/414] WIP on unsubscribing email notifs without logging in --- synapse/api/auth.py | 25 +++++++++------ synapse/rest/client/v1/pusher.py | 55 +++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 2474a1453b..2ece59bb19 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This module contains classes for authenticating the user.""" from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json, SignatureVerifyException @@ -42,13 +41,20 @@ AuthEventTypes = ( class Auth(object): - + """ + FIXME: This class contains a mix of functions for authenticating users + of our client-server API and authenticating events added to room graphs. + """ def __init__(self, hs): self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastore() self.state = hs.get_state_handler() self.TOKEN_NOT_FOUND_HTTP_STATUS = 401 + # Docs for these currently lives at + # https://github.com/matrix-org/matrix-doc/blob/master/drafts/macaroons_caveats.rst + # In addition, we have type == delete_pusher which grants access only to + # delete pushers. self._KNOWN_CAVEAT_PREFIXES = set([ "gen = ", "guest = ", @@ -507,7 +513,7 @@ class Auth(object): return default @defer.inlineCallbacks - def get_user_by_req(self, request, allow_guest=False): + def get_user_by_req(self, request, allow_guest=False, rights="access"): """ Get a registered user's ID. Args: @@ -529,7 +535,7 @@ class Auth(object): ) access_token = request.args["access_token"][0] - user_info = yield self.get_user_by_access_token(access_token) + user_info = yield self.get_user_by_access_token(access_token, rights) user = user_info["user"] token_id = user_info["token_id"] is_guest = user_info["is_guest"] @@ -590,7 +596,7 @@ class Auth(object): defer.returnValue(user_id) @defer.inlineCallbacks - def get_user_by_access_token(self, token): + def get_user_by_access_token(self, token, rights="access"): """ Get a registered user's ID. Args: @@ -601,7 +607,7 @@ class Auth(object): AuthError if no user by that token exists or the token is invalid. """ try: - ret = yield self.get_user_from_macaroon(token) + ret = yield self.get_user_from_macaroon(token, rights) except AuthError: # TODO(daniel): Remove this fallback when all existing access tokens # have been re-issued as macaroons. @@ -609,11 +615,11 @@ class Auth(object): defer.returnValue(ret) @defer.inlineCallbacks - def get_user_from_macaroon(self, macaroon_str): + def get_user_from_macaroon(self, macaroon_str, rights="access"): try: macaroon = pymacaroons.Macaroon.deserialize(macaroon_str) - self.validate_macaroon(macaroon, "access", self.hs.config.expire_access_token) + self.validate_macaroon(macaroon, rights, self.hs.config.expire_access_token) user_prefix = "user_id = " user = None @@ -667,7 +673,8 @@ class Auth(object): Args: macaroon(pymacaroons.Macaroon): The macaroon to validate - type_string(str): The kind of token this is (e.g. "access", "refresh") + type_string(str): The kind of token required (e.g. "access", "refresh", + "delete_pusher") verify_expiry(bool): Whether to verify whether the macaroon has expired. This should really always be True, but no clients currently implement token refresh, so we can't enforce expiry yet. diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index ab928a16da..fa7a0992dd 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -17,7 +17,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError, Codes from synapse.push import PusherConfigException -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import ( + parse_json_object_from_request, parse_string, RestServlet +) +from synapse.http.server import finish_request +from synapse.api.errors import StoreError from .base import ClientV1RestServlet, client_path_patterns @@ -136,6 +140,55 @@ class PushersSetRestServlet(ClientV1RestServlet): return 200, {} +class PushersRemoveRestServlet(RestServlet): + """ + To allow pusher to be delete by clicking a link (ie. GET request) + """ + PATTERNS = client_path_patterns("/pushers/remove$") + SUCCESS_HTML = "You have been unsubscribed" + + def __init__(self, hs): + super(RestServlet, self).__init__() + self.notifier = hs.get_notifier() + + @defer.inlineCallbacks + def on_GET(self, request): + requester = yield self.auth.get_user_by_req(request, "delete_pusher") + user = requester.user + + app_id = parse_string(request, "app_id", required=True) + pushkey = parse_string(request, "pushkey", required=True) + + pusher_pool = self.hs.get_pusherpool() + + try: + yield pusher_pool.remove_pusher( + app_id=app_id, + pushkey=pushkey, + user_id=user.to_string(), + ) + except StoreError as se: + if se.code != 404: + # This is fine: they're already unsubscribed + raise + + self.notifier.on_new_replication_data() + + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Server", self.hs.version_string) + request.setHeader(b"Content-Length", b"%d" % ( + len(PushersRemoveRestServlet.SUCCESS_HTML), + )) + request.write(PushersRemoveRestServlet.SUCCESS_HTML) + finish_request(request) + defer.returnValue(None) + + def on_OPTIONS(self, _): + return 200, {} + + def register_servlets(hs, http_server): PushersRestServlet(hs).register(http_server) PushersSetRestServlet(hs).register(http_server) + PushersRemoveRestServlet(hs).register(http_server) From e0deeff23eec099ad6bcb6e7170f524dc14982e4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 1 Jun 2016 17:58:58 +0100 Subject: [PATCH 051/414] Fix room list spidering --- synapse/handlers/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 77063b021a..9fd34588dd 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -441,7 +441,7 @@ class RoomListHandler(BaseHandler): self.hs.config.secondary_directory_servers ) self.remote_list_request_cache.set((), deferred) - yield deferred + self.remote_list_cache = yield deferred @defer.inlineCallbacks def get_aggregated_public_room_list(self): From aaa70e26a2eb37fbdf728393148e003dc9866afd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 1 Jun 2016 22:13:47 +0100 Subject: [PATCH 052/414] special case m.room.third_party_invite event auth to match invites, otherwise they get out of sync and you get https://github.com/vector-im/vector-web/issues/1208 --- synapse/api/auth.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 2474a1453b..007a0998a7 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -120,6 +120,24 @@ class Auth(object): return allowed self.check_event_sender_in_room(event, auth_events) + + # Special case to allow m.room.third_party_invite events wherever + # a user is allowed to issue invites. Fixes + # https://github.com/vector-im/vector-web/issues/1208 hopefully + if event.type == EventTypes.ThirdPartyInvite: + user_level = self._get_user_power_level(event.user_id, auth_events) + invite_level = self._get_named_level(auth_events, "invite", 0) + + if user_level < invite_level: + raise AuthError( + 403, ( + "You cannot issue a third party invite for %s." % + (event.content.display_name,) + ) + ) + else: + return True + self._can_send_event(event, auth_events) if event.type == EventTypes.PowerLevels: From e793866398ffb3e222a86ebb15b9d24220accbc8 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 09:41:13 +0100 Subject: [PATCH 053/414] Use user_id in email greeting if display name is null --- synapse/push/mailer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 3ae92d1574..fe5d67a03c 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -122,6 +122,8 @@ class Mailer(object): user_display_name = yield self.store.get_profile_displayname( UserID.from_string(user_id).localpart ) + if user_display_name is None: + user_display_name = user_id except StoreError: user_display_name = user_id From a15ad608496fd29fb8bf289152c23adca822beca Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 11:44:15 +0100 Subject: [PATCH 054/414] Email unsubscribing that may in theory, work Were it not for that fact that you can't use the base handler in the pusher because it pulls in the world. Comitting while I fix that on a different branch. --- synapse/handlers/auth.py | 5 +++++ synapse/push/emailpusher.py | 2 +- synapse/push/mailer.py | 21 ++++++++++++++++----- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 26c865e171..200793b5ed 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -529,6 +529,11 @@ class AuthHandler(BaseHandler): macaroon.add_first_party_caveat("time < %d" % (expiry,)) return macaroon.serialize() + def generate_delete_pusher_token(self, user_id): + macaroon = self._generate_base_macaroon(user_id) + macaroon.add_first_party_caveat("type = delete_pusher") + return macaroon.serialize() + def validate_short_term_login_token_and_get_user_id(self, login_token): try: macaroon = pymacaroons.Macaroon.deserialize(login_token) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index a72cba8306..46d7c0434b 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -273,5 +273,5 @@ class EmailPusher(object): logger.info("Sending notif email for user %r", self.user_id) yield self.mailer.send_notification_mail( - self.user_id, self.email, push_actions, reason + self.app_id, self.user_id, self.email, push_actions, reason ) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 3ae92d1574..95250bad7d 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -81,6 +81,7 @@ class Mailer(object): def __init__(self, hs): self.hs = hs self.store = self.hs.get_datastore() + self.handlers = self.hs.get_handlers() self.state_handler = self.hs.get_state_handler() loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir) self.app_name = self.hs.config.email_app_name @@ -95,7 +96,8 @@ class Mailer(object): ) @defer.inlineCallbacks - def send_notification_mail(self, user_id, email_address, push_actions, reason): + def send_notification_mail(self, app_id, user_id, email_address, + push_actions, reason): raw_from = email.utils.parseaddr(self.hs.config.email_notif_from)[1] raw_to = email.utils.parseaddr(email_address)[1] @@ -157,7 +159,7 @@ class Mailer(object): template_vars = { "user_display_name": user_display_name, - "unsubscribe_link": self.make_unsubscribe_link(), + "unsubscribe_link": self.make_unsubscribe_link(app_id, email_address), "summary_text": summary_text, "app_name": self.app_name, "rooms": rooms, @@ -423,9 +425,18 @@ class Mailer(object): notif['room_id'], notif['event_id'] ) - def make_unsubscribe_link(self): - # XXX: matrix.to - return "https://vector.im/#/settings" + def make_unsubscribe_link(self, app_id, email_address): + params = { + "access_token": self.handlers.auth.generate_delete_pusher_token(), + "app_id": app_id, + "pushkey": email_address, + } + + # XXX: make r0 once API is stable + return "%s_matrix/client/unstable/pushers/remove?%s" % ( + self.hs.config.public_baseurl, + urllib.urlencode(params), + ) def mxc_to_http_filter(self, value, width, height, resize_method="crop"): if value[0:6] != "mxc://": From f84b89f0c6b2e67897fec8639b79bf1d45c8f2b6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 2 Jun 2016 13:29:48 +0100 Subject: [PATCH 055/414] if an email pusher specifies a brand param, use it --- synapse/push/emailpusher.py | 7 ++++++- synapse/push/mailer.py | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index a72cba8306..e38ed02006 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -72,7 +72,12 @@ class EmailPusher(object): self.processing = False if self.hs.config.email_enable_notifs: - self.mailer = Mailer(self.hs) + if 'data' in pusherdict and 'brand' in pusherdict['data']: + app_name = pusherdict['data']['brand'] + else: + app_name = self.hs.config.email_app_name + + self.mailer = Mailer(self.hs, app_name) else: self.mailer = None diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index fe5d67a03c..0e9d8ccb53 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -78,12 +78,12 @@ ALLOWED_ATTRS = { class Mailer(object): - def __init__(self, hs): + def __init__(self, hs, app_name): self.hs = hs self.store = self.hs.get_datastore() self.state_handler = self.hs.get_state_handler() loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir) - self.app_name = self.hs.config.email_app_name + self.app_name = app_name env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = self.mxc_to_http_filter From 4a10510cd5aff790127a185ecefc83b881a717cc Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 13:31:45 +0100 Subject: [PATCH 056/414] Split out the auth handler --- synapse/handlers/__init__.py | 2 -- synapse/handlers/register.py | 2 +- synapse/rest/client/v1/login.py | 11 ++++++----- synapse/rest/client/v2_alpha/account.py | 4 ++-- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/client/v2_alpha/register.py | 2 +- synapse/rest/client/v2_alpha/tokenrefresh.py | 2 +- synapse/server.py | 5 +++++ tests/rest/client/v2_alpha/test_register.py | 2 +- tests/utils.py | 15 +++++---------- 10 files changed, 23 insertions(+), 24 deletions(-) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index c0069e23d6..d28e07f0d9 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -24,7 +24,6 @@ from .federation import FederationHandler from .profile import ProfileHandler from .directory import DirectoryHandler from .admin import AdminHandler -from .auth import AuthHandler from .identity import IdentityHandler from .receipts import ReceiptsHandler from .search import SearchHandler @@ -50,7 +49,6 @@ class Handlers(object): self.directory_handler = DirectoryHandler(hs) self.admin_handler = AdminHandler(hs) self.receipts_handler = ReceiptsHandler(hs) - self.auth_handler = AuthHandler(hs) self.identity_handler = IdentityHandler(hs) self.search_handler = SearchHandler(hs) self.room_context_handler = RoomContextHandler(hs) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 16f33f8371..bbc07b045e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -413,7 +413,7 @@ class RegistrationHandler(BaseHandler): defer.returnValue((user_id, token)) def auth_handler(self): - return self.hs.get_handlers().auth_handler + return self.hs.get_auth_handler() @defer.inlineCallbacks def guest_access_token_for(self, medium, address, inviter_user_id): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 3b5544851b..8df9d10efa 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -58,6 +58,7 @@ class LoginRestServlet(ClientV1RestServlet): self.cas_required_attributes = hs.config.cas_required_attributes self.servername = hs.config.server_name self.http_client = hs.get_simple_http_client() + self.auth_handler = self.hs.get_auth_handler() def on_GET(self, request): flows = [] @@ -143,7 +144,7 @@ class LoginRestServlet(ClientV1RestServlet): user_id, self.hs.hostname ).to_string() - auth_handler = self.handlers.auth_handler + auth_handler = self.auth_handler user_id, access_token, refresh_token = yield auth_handler.login_with_password( user_id=user_id, password=login_submission["password"]) @@ -160,7 +161,7 @@ class LoginRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def do_token_login(self, login_submission): token = login_submission['token'] - auth_handler = self.handlers.auth_handler + auth_handler = self.auth_handler user_id = ( yield auth_handler.validate_short_term_login_token_and_get_user_id(token) ) @@ -194,7 +195,7 @@ class LoginRestServlet(ClientV1RestServlet): raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) user_id = UserID.create(user, self.hs.hostname).to_string() - auth_handler = self.handlers.auth_handler + auth_handler = self.auth_handler user_exists = yield auth_handler.does_user_exist(user_id) if user_exists: user_id, access_token, refresh_token = ( @@ -243,7 +244,7 @@ class LoginRestServlet(ClientV1RestServlet): raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) user_id = UserID.create(user, self.hs.hostname).to_string() - auth_handler = self.handlers.auth_handler + auth_handler = self.auth_handler user_exists = yield auth_handler.does_user_exist(user_id) if user_exists: user_id, access_token, refresh_token = ( @@ -412,7 +413,7 @@ class CasTicketServlet(ClientV1RestServlet): raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED) user_id = UserID.create(user, self.hs.hostname).to_string() - auth_handler = self.handlers.auth_handler + auth_handler = self.auth_handler user_exists = yield auth_handler.does_user_exist(user_id) if not user_exists: user_id, _ = ( diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c88c270537..9a84873a5f 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -35,7 +35,7 @@ class PasswordRestServlet(RestServlet): super(PasswordRestServlet, self).__init__() self.hs = hs self.auth = hs.get_auth() - self.auth_handler = hs.get_handlers().auth_handler + self.auth_handler = hs.get_auth_handler() @defer.inlineCallbacks def on_POST(self, request): @@ -97,7 +97,7 @@ class ThreepidRestServlet(RestServlet): self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() - self.auth_handler = hs.get_handlers().auth_handler + self.auth_handler = hs.get_auth_handler() @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 78181b7b18..58d3cad6a1 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -104,7 +104,7 @@ class AuthRestServlet(RestServlet): super(AuthRestServlet, self).__init__() self.hs = hs self.auth = hs.get_auth() - self.auth_handler = hs.get_handlers().auth_handler + self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_handlers().registration_handler @defer.inlineCallbacks diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 1ecc02d94d..2088c316d1 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -49,7 +49,7 @@ class RegisterRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() - self.auth_handler = hs.get_handlers().auth_handler + self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_handlers().registration_handler self.identity_handler = hs.get_handlers().identity_handler diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index a158c2209a..8270e8787f 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -38,7 +38,7 @@ class TokenRefreshRestServlet(RestServlet): body = parse_json_object_from_request(request) try: old_refresh_token = body["refresh_token"] - auth_handler = self.hs.get_handlers().auth_handler + auth_handler = self.hs.get_auth_handler() (user_id, new_refresh_token) = yield self.store.exchange_refresh_token( old_refresh_token, auth_handler.generate_refresh_token) new_access_token = yield auth_handler.issue_access_token(user_id) diff --git a/synapse/server.py b/synapse/server.py index 7cf22b1eea..dd4b81c658 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -33,6 +33,7 @@ from synapse.handlers.presence import PresenceHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import TypingHandler from synapse.handlers.room import RoomListHandler +from synapse.handlers.auth import AuthHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.state import StateHandler from synapse.storage import DataStore @@ -89,6 +90,7 @@ class HomeServer(object): 'sync_handler', 'typing_handler', 'room_list_handler', + 'auth_handler', 'application_service_api', 'application_service_scheduler', 'application_service_handler', @@ -190,6 +192,9 @@ class HomeServer(object): def build_room_list_handler(self): return RoomListHandler(self) + def build_auth_handler(self): + return AuthHandler(self) + def build_application_service_api(self): return ApplicationServiceApi(self) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index affd42c015..cda0a2b27c 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -33,7 +33,6 @@ class RegisterRestServletTestCase(unittest.TestCase): # do the dance to hook it up to the hs global self.handlers = Mock( - auth_handler=self.auth_handler, registration_handler=self.registration_handler, identity_handler=self.identity_handler, login_handler=self.login_handler @@ -42,6 +41,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.hs.hostname = "superbig~testing~thing.com" self.hs.get_auth = Mock(return_value=self.auth) self.hs.get_handlers = Mock(return_value=self.handlers) + self.hs.get_auth_handler = Mock(return_value=self.auth_handler) self.hs.config.enable_registration = True # init the thing we're testing diff --git a/tests/utils.py b/tests/utils.py index 006abedbc1..e19ae581e0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -81,16 +81,11 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): ) # bcrypt is far too slow to be doing in unit tests - def swap_out_hash_for_testing(old_build_handlers): - def build_handlers(): - handlers = old_build_handlers() - auth_handler = handlers.auth_handler - auth_handler.hash = lambda p: hashlib.md5(p).hexdigest() - auth_handler.validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h - return handlers - return build_handlers - - hs.build_handlers = swap_out_hash_for_testing(hs.build_handlers) + # Need to let the HS build an auth handler and then mess with it + # because AuthHandler's constructor requires the HS, so we can't make one + # beforehand and pass it in to the HS's constructor (chicken / egg) + hs.get_auth_handler().hash = lambda p: hashlib.md5(p).hexdigest() + hs.get_auth_handler().validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h fed = kargs.get("resource_for_federation", None) if fed: From 356f13c0696526032c211c103dad2f57d18473fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 Jun 2016 14:07:38 +0100 Subject: [PATCH 057/414] Disable INCLUDE_ALL_UNREAD_NOTIFS --- synapse/push/emailpusher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index e38ed02006..2c21ed3088 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -44,7 +44,8 @@ THROTTLE_RESET_AFTER_MS = (12 * 60 * 60 * 1000) # does each email include all unread notifs, or just the ones which have happened # since the last mail? -INCLUDE_ALL_UNREAD_NOTIFS = True +# XXX: this is currently broken as it includes ones from parted rooms(!) +INCLUDE_ALL_UNREAD_NOTIFS = False class EmailPusher(object): From 70599ce9252997d32d0bf9f26a4e02c99bbe474d Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 2 Jun 2016 15:20:15 +0100 Subject: [PATCH 058/414] Allow external processes to mark a user as syncing. (#812) * Add infrastructure to the presence handler to track sync requests in external processes * Expire stale entries for dead external processes * Add an http endpoint for making users as syncing Add some docstrings and comments. * Fixes --- synapse/handlers/presence.py | 119 ++++++++++++++++++++--- synapse/replication/presence_resource.py | 59 +++++++++++ synapse/replication/resource.py | 2 + tests/handlers/test_presence.py | 16 ++- 4 files changed, 174 insertions(+), 22 deletions(-) create mode 100644 synapse/replication/presence_resource.py diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 37f57301fb..fc8538b41e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -68,6 +68,10 @@ FEDERATION_TIMEOUT = 30 * 60 * 1000 # How often to resend presence to remote servers FEDERATION_PING_INTERVAL = 25 * 60 * 1000 +# How long we will wait before assuming that the syncs from an external process +# are dead. +EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000 + assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER @@ -158,10 +162,21 @@ class PresenceHandler(object): self.serial_to_user = {} self._next_serial = 1 - # Keeps track of the number of *ongoing* syncs. While this is non zero - # a user will never go offline. + # Keeps track of the number of *ongoing* syncs on this process. While + # this is non zero a user will never go offline. self.user_to_num_current_syncs = {} + # Keeps track of the number of *ongoing* syncs on other processes. + # While any sync is ongoing on another process the user will never + # go offline. + # Each process has a unique identifier and an update frequency. If + # no update is received from that process within the update period then + # we assume that all the sync requests on that process have stopped. + # Stored as a dict from process_id to set of user_id, and a dict of + # process_id to millisecond timestamp last updated. + self.external_process_to_current_syncs = {} + self.external_process_last_updated_ms = {} + # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. @@ -272,13 +287,26 @@ class PresenceHandler(object): # Fetch the list of users that *may* have timed out. Things may have # changed since the timeout was set, so we won't necessarily have to # take any action. - users_to_check = self.wheel_timer.fetch(now) + users_to_check = set(self.wheel_timer.fetch(now)) + + # Check whether the lists of syncing processes from an external + # process have expired. + expired_process_ids = [ + process_id for process_id, last_update + in self.external_process_last_update.items() + if now - last_update > EXTERNAL_PROCESS_EXPIRY + ] + for process_id in expired_process_ids: + users_to_check.update( + self.external_process_to_current_syncs.pop(process_id, ()) + ) + self.external_process_last_update.pop(process_id) states = [ self.user_to_current_state.get( user_id, UserPresenceState.default(user_id) ) - for user_id in set(users_to_check) + for user_id in users_to_check ] timers_fired_counter.inc_by(len(states)) @@ -286,7 +314,7 @@ class PresenceHandler(object): changes = handle_timeouts( states, is_mine_fn=self.is_mine_id, - user_to_num_current_syncs=self.user_to_num_current_syncs, + syncing_users=self.get_syncing_users(), now=now, ) @@ -363,6 +391,73 @@ class PresenceHandler(object): defer.returnValue(_user_syncing()) + def get_currently_syncing_users(self): + """Get the set of user ids that are currently syncing on this HS. + Returns: + set(str): A set of user_id strings. + """ + syncing_user_ids = { + user_id for user_id, count in self.user_to_num_current_syncs.items() + if count + } + syncing_user_ids.update(self.external_process_to_current_syncs.values()) + return syncing_user_ids + + @defer.inlineCallbacks + def update_external_syncs(self, process_id, syncing_user_ids): + """Update the syncing users for an external process + + Args: + process_id(str): An identifier for the process the users are + syncing against. This allows synapse to process updates + as user start and stop syncing against a given process. + syncing_user_ids(set(str)): The set of user_ids that are + currently syncing on that server. + """ + + # Grab the previous list of user_ids that were syncing on that process + prev_syncing_user_ids = ( + self.external_process_to_current_syncs.get(process_id, set()) + ) + # Grab the current presence state for both the users that are syncing + # now and the users that were syncing before this update. + prev_states = yield self.current_state_for_users( + syncing_user_ids | prev_syncing_user_ids + ) + updates = [] + time_now_ms = self.clock.time_msec() + + # For each new user that is syncing check if we need to mark them as + # being online. + for new_user_id in syncing_user_ids - prev_syncing_user_ids: + prev_state = prev_states[new_user_id] + if prev_state.state == PresenceState.OFFLINE: + updates.append(prev_state.copy_and_replace( + state=PresenceState.ONLINE, + last_active_ts=time_now_ms, + last_user_sync_ts=time_now_ms, + )) + else: + updates.append(prev_state.copy_and_replace( + last_user_sync_ts=time_now_ms, + )) + + # For each user that is still syncing or stopped syncing update the + # last sync time so that we will correctly apply the grace period when + # they stop syncing. + for old_user_id in prev_syncing_user_ids: + prev_state = prev_states[old_user_id] + updates.append(prev_state.copy_and_replace( + last_user_sync_ts=time_now_ms, + )) + + yield self._update_states(updates) + + # Update the last updated time for the process. We expire the entries + # if we don't receive an update in the given timeframe. + self.external_process_last_updated_ms[process_id] = self.clock.time_msec() + self.external_process_to_current_syncs[process_id] = syncing_user_ids + @defer.inlineCallbacks def current_state_for_user(self, user_id): """Get the current presence state for a user. @@ -935,15 +1030,14 @@ class PresenceEventSource(object): return self.get_new_events(user, from_key=None, include_offline=False) -def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now): +def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): """Checks the presence of users that have timed out and updates as appropriate. Args: user_states(list): List of UserPresenceState's to check. is_mine_fn (fn): Function that returns if a user_id is ours - user_to_num_current_syncs (dict): Mapping of user_id to number of currently - active syncs. + syncing_user_ids (set): Set of user_ids with active syncs. now (int): Current time in ms. Returns: @@ -954,21 +1048,20 @@ def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now): for state in user_states: is_mine = is_mine_fn(state.user_id) - new_state = handle_timeout(state, is_mine, user_to_num_current_syncs, now) + new_state = handle_timeout(state, is_mine, syncing_user_ids, now) if new_state: changes[state.user_id] = new_state return changes.values() -def handle_timeout(state, is_mine, user_to_num_current_syncs, now): +def handle_timeout(state, is_mine, syncing_user_ids, now): """Checks the presence of the user to see if any of the timers have elapsed Args: state (UserPresenceState) is_mine (bool): Whether the user is ours - user_to_num_current_syncs (dict): Mapping of user_id to number of currently - active syncs. + syncing_user_ids (set): Set of user_ids with active syncs. now (int): Current time in ms. Returns: @@ -1002,7 +1095,7 @@ def handle_timeout(state, is_mine, user_to_num_current_syncs, now): # If there are have been no sync for a while (and none ongoing), # set presence to offline - if not user_to_num_current_syncs.get(user_id, 0): + if user_id not in syncing_user_ids: if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT: state = state.copy_and_replace( state=PresenceState.OFFLINE, diff --git a/synapse/replication/presence_resource.py b/synapse/replication/presence_resource.py new file mode 100644 index 0000000000..fc18130ab4 --- /dev/null +++ b/synapse/replication/presence_resource.py @@ -0,0 +1,59 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.http.server import respond_with_json_bytes, request_handler +from synapse.http.servlet import parse_json_object_from_request + +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET +from twisted.internet import defer + + +class PresenceResource(Resource): + """ + HTTP endpoint for marking users as syncing. + + POST /_synapse/replication/presence HTTP/1.1 + Content-Type: application/json + + { + "process_id": "", + "syncing_users": [""] + } + """ + + def __init__(self, hs): + Resource.__init__(self) # Resource is old-style, so no super() + + self.version_string = hs.version_string + self.clock = hs.get_clock() + self.presence_handler = hs.get_presence_handler() + + def render_POST(self, request): + self._async_render_POST(request) + return NOT_DONE_YET + + @request_handler() + @defer.inlineCallbacks + def _async_render_POST(self, request): + content = parse_json_object_from_request(request) + + process_id = content["process_id"] + syncing_user_ids = content["syncing_users"] + + yield self.presence_handler.update_external_syncs( + process_id, set(syncing_user_ids) + ) + + respond_with_json_bytes(request, 200, "{}") diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py index 847f212a3d..8c2d487ff4 100644 --- a/synapse/replication/resource.py +++ b/synapse/replication/resource.py @@ -16,6 +16,7 @@ from synapse.http.servlet import parse_integer, parse_string from synapse.http.server import request_handler, finish_request from synapse.replication.pusher_resource import PusherResource +from synapse.replication.presence_resource import PresenceResource from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET @@ -115,6 +116,7 @@ class ReplicationResource(Resource): self.clock = hs.get_clock() self.putChild("remove_pushers", PusherResource(hs)) + self.putChild("syncing_users", PresenceResource(hs)) def render_GET(self, request): self._async_render_GET(request) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 87c795fcfa..b531ba8540 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -264,7 +264,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={}, now=now + state, is_mine=True, syncing_user_ids=set(), now=now ) self.assertIsNotNone(new_state) @@ -282,7 +282,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={}, now=now + state, is_mine=True, syncing_user_ids=set(), now=now ) self.assertIsNotNone(new_state) @@ -300,9 +300,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={ - user_id: 1, - }, now=now + state, is_mine=True, syncing_user_ids=set([user_id]), now=now ) self.assertIsNotNone(new_state) @@ -321,7 +319,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={}, now=now + state, is_mine=True, syncing_user_ids=set(), now=now ) self.assertIsNotNone(new_state) @@ -340,7 +338,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={}, now=now + state, is_mine=True, syncing_user_ids=set(), now=now ) self.assertIsNone(new_state) @@ -358,7 +356,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=False, user_to_num_current_syncs={}, now=now + state, is_mine=False, syncing_user_ids=set(), now=now ) self.assertIsNotNone(new_state) @@ -377,7 +375,7 @@ class PresenceTimeoutTestCase(unittest.TestCase): ) new_state = handle_timeout( - state, is_mine=True, user_to_num_current_syncs={}, now=now + state, is_mine=True, syncing_user_ids=set(), now=now ) self.assertIsNotNone(new_state) From 661a540dd1de89a3ab3a8f6ca0f780ea7d264176 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 2 Jun 2016 15:20:28 +0100 Subject: [PATCH 059/414] Deduplicate presence entries in sync (#818) --- synapse/handlers/sync.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3b89582d79..5307b62b85 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -637,6 +637,9 @@ class SyncHandler(object): ) presence.extend(states) + # Deduplicate the presence entries so that there's at most one per user + presence = {p["content"]["user_id"]: p for p in presence}.values() + presence = sync_config.filter_collection.filter_presence( presence ) From 80f34d7b574e1a6b8bc922df41bd53b59260fcf2 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 2 Jun 2016 15:23:09 +0100 Subject: [PATCH 060/414] Fix setting the _clock in SQLBaseStore --- synapse/storage/_base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 56a0dd80f3..32c6677d47 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -152,6 +152,7 @@ class SQLBaseStore(object): def __init__(self, hs): self.hs = hs + self._clock = hs.get_clock() self._db_pool = hs.get_db_pool() self._previous_txn_total_time = 0 From 56d15a05306896555ded3025f8a808bda04872fa Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 2 Jun 2016 16:28:54 +0100 Subject: [PATCH 061/414] Store the typing users as user_id strings. (#819) Rather than storing them as UserID objects. --- synapse/handlers/typing.py | 64 ++++++++++++++++++++--------------- tests/handlers/test_typing.py | 4 +-- 2 files changed, 38 insertions(+), 30 deletions(-) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index d46f05f426..3c54307bed 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) # A tiny object useful for storing a user's membership in a room, as a mapping # key -RoomMember = namedtuple("RoomMember", ("room_id", "user")) +RoomMember = namedtuple("RoomMember", ("room_id", "user_id")) class TypingHandler(object): @@ -38,7 +38,7 @@ class TypingHandler(object): self.store = hs.get_datastore() self.server_name = hs.config.server_name self.auth = hs.get_auth() - self.is_mine = hs.is_mine + self.is_mine_id = hs.is_mine_id self.notifier = hs.get_notifier() self.clock = hs.get_clock() @@ -67,20 +67,23 @@ class TypingHandler(object): @defer.inlineCallbacks def started_typing(self, target_user, auth_user, room_id, timeout): - if not self.is_mine(target_user): + target_user_id = target_user.to_string() + auth_user_id = auth_user.to_string() + + if not self.is_mine_id(target_user_id): raise SynapseError(400, "User is not hosted on this Home Server") - if target_user != auth_user: + if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") - yield self.auth.check_joined_room(room_id, target_user.to_string()) + yield self.auth.check_joined_room(room_id, target_user_id) logger.debug( - "%s has started typing in %s", target_user.to_string(), room_id + "%s has started typing in %s", target_user_id, room_id ) until = self.clock.time_msec() + timeout - member = RoomMember(room_id=room_id, user=target_user) + member = RoomMember(room_id=room_id, user_id=target_user_id) was_present = member in self._member_typing_until @@ -104,25 +107,28 @@ class TypingHandler(object): yield self._push_update( room_id=room_id, - user=target_user, + user_id=target_user_id, typing=True, ) @defer.inlineCallbacks def stopped_typing(self, target_user, auth_user, room_id): - if not self.is_mine(target_user): + target_user_id = target_user.to_string() + auth_user_id = auth_user.to_string() + + if not self.is_mine_id(target_user_id): raise SynapseError(400, "User is not hosted on this Home Server") - if target_user != auth_user: + if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") - yield self.auth.check_joined_room(room_id, target_user.to_string()) + yield self.auth.check_joined_room(room_id, target_user_id) logger.debug( - "%s has stopped typing in %s", target_user.to_string(), room_id + "%s has stopped typing in %s", target_user_id, room_id ) - member = RoomMember(room_id=room_id, user=target_user) + member = RoomMember(room_id=room_id, user_id=target_user_id) if member in self._member_typing_timer: self.clock.cancel_call_later(self._member_typing_timer[member]) @@ -132,8 +138,9 @@ class TypingHandler(object): @defer.inlineCallbacks def user_left_room(self, user, room_id): - if self.is_mine(user): - member = RoomMember(room_id=room_id, user=user) + user_id = user.to_string() + if self.is_mine_id(user_id): + member = RoomMember(room_id=room_id, user=user_id) yield self._stopped_typing(member) @defer.inlineCallbacks @@ -144,7 +151,7 @@ class TypingHandler(object): yield self._push_update( room_id=member.room_id, - user=member.user, + user_id=member.user_id, typing=False, ) @@ -156,7 +163,7 @@ class TypingHandler(object): del self._member_typing_timer[member] @defer.inlineCallbacks - def _push_update(self, room_id, user, typing): + def _push_update(self, room_id, user_id, typing): domains = yield self.store.get_joined_hosts_for_room(room_id) deferreds = [] @@ -164,7 +171,7 @@ class TypingHandler(object): if domain == self.server_name: self._push_update_local( room_id=room_id, - user=user, + user_id=user_id, typing=typing ) else: @@ -173,7 +180,7 @@ class TypingHandler(object): edu_type="m.typing", content={ "room_id": room_id, - "user_id": user.to_string(), + "user_id": user_id, "typing": typing, }, )) @@ -183,23 +190,26 @@ class TypingHandler(object): @defer.inlineCallbacks def _recv_edu(self, origin, content): room_id = content["room_id"] - user = UserID.from_string(content["user_id"]) + user_id = content["user_id"] + + # Check that the string is a valid user id + UserID.from_string(user_id) domains = yield self.store.get_joined_hosts_for_room(room_id) if self.server_name in domains: self._push_update_local( room_id=room_id, - user=user, + user_id=user_id, typing=content["typing"] ) - def _push_update_local(self, room_id, user, typing): + def _push_update_local(self, room_id, user_id, typing): room_set = self._room_typing.setdefault(room_id, set()) if typing: - room_set.add(user) + room_set.add(user_id) else: - room_set.discard(user) + room_set.discard(user_id) self._latest_room_serial += 1 self._room_serials[room_id] = self._latest_room_serial @@ -215,9 +225,7 @@ class TypingHandler(object): for room_id, serial in self._room_serials.items(): if last_id < serial and serial <= current_id: typing = self._room_typing[room_id] - typing_bytes = json.dumps([ - u.to_string() for u in typing - ], ensure_ascii=False) + typing_bytes = json.dumps(list(typing), ensure_ascii=False) rows.append((serial, room_id, typing_bytes)) rows.sort() return rows @@ -239,7 +247,7 @@ class TypingNotificationEventSource(object): "type": "m.typing", "room_id": room_id, "content": { - "user_ids": [u.to_string() for u in typing], + "user_ids": list(typing), }, } diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index abb739ae52..ab9899b7d5 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -251,12 +251,12 @@ class TypingNotificationsTestCase(unittest.TestCase): # Gut-wrenching from synapse.handlers.typing import RoomMember - member = RoomMember(self.room_id, self.u_apple) + member = RoomMember(self.room_id, self.u_apple.to_string()) self.handler._member_typing_until[member] = 1002000 self.handler._member_typing_timer[member] = ( self.clock.call_later(1002, lambda: 0) ) - self.handler._room_typing[self.room_id] = set((self.u_apple,)) + self.handler._room_typing[self.room_id] = set((self.u_apple.to_string(),)) self.assertEquals(self.event_source.get_current_key(), 0) From 07a555991600137c830eb3b06f90a305c8f1e3d8 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 17:17:16 +0100 Subject: [PATCH 062/414] Fix error in email notification string formatting --- synapse/push/mailer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 0e9d8ccb53..63c7ec18a5 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -41,7 +41,7 @@ logger = logging.getLogger(__name__) MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \ - "in the %s room..." + "in the %(room)s room..." MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..." MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..." MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..." From 2675c1e40ebc3392ce719ac2304b97e98c7fefb4 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 2 Jun 2016 17:21:12 +0100 Subject: [PATCH 063/414] add some branding debugging --- synapse/push/mailer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 0e9d8ccb53..944f3b481d 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -84,6 +84,7 @@ class Mailer(object): self.state_handler = self.hs.get_state_handler() loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir) self.app_name = app_name + logger.info("Created Mailer for app_name %s" % app_name) env = jinja2.Environment(loader=loader) env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = self.mxc_to_http_filter From 1f31cc37f8611f9ae5612ef5be82e63735fbdf34 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 17:21:31 +0100 Subject: [PATCH 064/414] Working unsubscribe links going straight to the HS and authed by macaroons that let you delete pushers and nothing else --- synapse/api/auth.py | 7 +++++++ synapse/app/pusher.py | 23 ++++++++++++++++++++++- synapse/push/mailer.py | 8 ++++---- synapse/rest/client/v1/pusher.py | 4 +++- 4 files changed, 36 insertions(+), 6 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 463bd8b692..31e1abb964 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -660,6 +660,13 @@ class Auth(object): "is_guest": True, "token_id": None, } + elif rights == "delete_pusher": + # We don't store these tokens in the database + ret = { + "user": user, + "is_guest": False, + "token_id": None, + } else: # This codepath exists so that we can actually return a # token ID, because we use token IDs in place of device diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 135dd58c15..f1de1e7ce9 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -21,6 +21,7 @@ from synapse.config._base import ConfigError from synapse.config.database import DatabaseConfig from synapse.config.logger import LoggingConfig from synapse.config.emailconfig import EmailConfig +from synapse.config.key import KeyConfig from synapse.http.site import SynapseSite from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.storage.roommember import RoomMemberStore @@ -63,6 +64,26 @@ class SlaveConfig(DatabaseConfig): self.pid_file = self.abspath(config.get("pid_file")) self.public_baseurl = config["public_baseurl"] + # some things used by the auth handler but not actually used in the + # pusher codebase + self.bcrypt_rounds = None + self.ldap_enabled = None + self.ldap_server = None + self.ldap_port = None + self.ldap_tls = None + self.ldap_search_base = None + self.ldap_search_property = None + self.ldap_email_property = None + self.ldap_full_name_property = None + + # We would otherwise try to use the registration shared secret as the + # macaroon shared secret if there was no macaroon_shared_secret, but + # that means pulling in RegistrationConfig too. We don't need to be + # backwards compaitible in the pusher codebase so just make people set + # macaroon_shared_secret. We set this to None to prevent it referencing + # an undefined key. + self.registration_shared_secret = None + def default_config(self, server_name, **kwargs): pid_file = self.abspath("pusher.pid") return """\ @@ -95,7 +116,7 @@ class SlaveConfig(DatabaseConfig): """ % locals() -class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig): +class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig): pass diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index e877d8fdad..60d3700afa 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -81,7 +81,7 @@ class Mailer(object): def __init__(self, hs, app_name): self.hs = hs self.store = self.hs.get_datastore() - self.handlers = self.hs.get_handlers() + self.auth_handler = self.hs.get_auth_handler() self.state_handler = self.hs.get_state_handler() loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir) self.app_name = app_name @@ -161,7 +161,7 @@ class Mailer(object): template_vars = { "user_display_name": user_display_name, - "unsubscribe_link": self.make_unsubscribe_link(app_id, email_address), + "unsubscribe_link": self.make_unsubscribe_link(user_id, app_id, email_address), "summary_text": summary_text, "app_name": self.app_name, "rooms": rooms, @@ -427,9 +427,9 @@ class Mailer(object): notif['room_id'], notif['event_id'] ) - def make_unsubscribe_link(self, app_id, email_address): + def make_unsubscribe_link(self, user_id, app_id, email_address): params = { - "access_token": self.handlers.auth.generate_delete_pusher_token(), + "access_token": self.auth_handler.generate_delete_pusher_token(user_id), "app_id": app_id, "pushkey": email_address, } diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index fa7a0992dd..9a2ed6ed88 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -149,11 +149,13 @@ class PushersRemoveRestServlet(RestServlet): def __init__(self, hs): super(RestServlet, self).__init__() + self.hs = hs self.notifier = hs.get_notifier() + self.auth = hs.get_v1auth() @defer.inlineCallbacks def on_GET(self, request): - requester = yield self.auth.get_user_by_req(request, "delete_pusher") + requester = yield self.auth.get_user_by_req(request, rights="delete_pusher") user = requester.user app_id = parse_string(request, "app_id", required=True) From 745ddb4dd04d0346f27f65a1e5508900b58e658a Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 2 Jun 2016 17:38:41 +0100 Subject: [PATCH 065/414] peppate --- synapse/push/mailer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 60d3700afa..3c9a66008d 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -161,7 +161,9 @@ class Mailer(object): template_vars = { "user_display_name": user_display_name, - "unsubscribe_link": self.make_unsubscribe_link(user_id, app_id, email_address), + "unsubscribe_link": self.make_unsubscribe_link( + user_id, app_id, email_address + ), "summary_text": summary_text, "app_name": self.app_name, "rooms": rooms, From 79d1f072f4fc9a6b2e9773e8cb700b26bc2dff51 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 2 Jun 2016 21:34:40 +0100 Subject: [PATCH 066/414] brand the email from header --- synapse/config/emailconfig.py | 2 +- synapse/push/mailer.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 90bdd08f00..a187161272 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -89,7 +89,7 @@ class EmailConfig(Config): # enable_notifs: false # smtp_host: "localhost" # smtp_port: 25 - # notif_from: Your Friendly Matrix Home Server + # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix # template_dir: res/templates # notif_template_html: notif_mail.html diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 944f3b481d..c1e9057eb6 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -97,7 +97,14 @@ class Mailer(object): @defer.inlineCallbacks def send_notification_mail(self, user_id, email_address, push_actions, reason): - raw_from = email.utils.parseaddr(self.hs.config.email_notif_from)[1] + try: + from_string = self.hs.config.email_notif_from % { + "app": self.app_name + } + except TypeError: + from_string = self.hs.config.email_notif_from + + raw_from = email.utils.parseaddr(from_string)[1] raw_to = email.utils.parseaddr(email_address)[1] if raw_to == '': From 0eae0757232169b833224f48208aed9fdc9c6fe6 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 10:58:03 +0100 Subject: [PATCH 067/414] Add slaved stores for filters, tokens, and push rules --- .../replication/slave/storage/appservice.py | 30 +++++++++ .../replication/slave/storage/filtering.py | 24 +++++++ .../replication/slave/storage/push_rule.py | 67 +++++++++++++++++++ .../replication/slave/storage/registration.py | 30 +++++++++ 4 files changed, 151 insertions(+) create mode 100644 synapse/replication/slave/storage/appservice.py create mode 100644 synapse/replication/slave/storage/filtering.py create mode 100644 synapse/replication/slave/storage/push_rule.py create mode 100644 synapse/replication/slave/storage/registration.py diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py new file mode 100644 index 0000000000..25792d9429 --- /dev/null +++ b/synapse/replication/slave/storage/appservice.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from synapse.storage import DataStore +from synapse.config.appservice import load_appservices + + +class SlavedApplicationServiceStore(BaseSlavedStore): + def __init__(self, db_conn, hs): + super(SlavedApplicationServiceStore, self).__init__(db_conn, hs) + self.services_cache = load_appservices( + hs.config.server_name, + hs.config.app_service_config_files + ) + + get_app_service_by_token = DataStore.get_app_service_by_token.__func__ + get_app_service_by_user_id = DataStore.get_app_service_by_user_id.__func__ diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py new file mode 100644 index 0000000000..5037f395b9 --- /dev/null +++ b/synapse/replication/slave/storage/filtering.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from synapse.storage.filtering import FilteringStore + + +class SlavedFilteringStore(BaseSlavedStore): + def __init__(self, db_conn, hs): + super(SlavedFilteringStore, self).__init__(db_conn, hs) + + get_user_filter = FilteringStore.__dict__["get_user_filter"] diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py new file mode 100644 index 0000000000..21ceb0213a --- /dev/null +++ b/synapse/replication/slave/storage/push_rule.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .events import SlavedEventStore +from ._slaved_id_tracker import SlavedIdTracker +from synapse.storage import DataStore +from synapse.storage.push_rule import PushRuleStore +from synapse.util.caches.stream_change_cache import StreamChangeCache + + +class SlavedPushRuleStore(SlavedEventStore): + def __init__(self, db_conn, hs): + super(SlavedPushRuleStore, self).__init__(db_conn, hs) + self._push_rules_stream_id_gen = SlavedIdTracker( + db_conn, "push_rules_stream", "stream_id", + ) + self.push_rules_stream_cache = StreamChangeCache( + "PushRulesStreamChangeCache", + self._push_rules_stream_id_gen.get_current_token(), + ) + + get_push_rules_for_user = PushRuleStore.__dict__["get_push_rules_for_user"] + get_push_rules_enabled_for_user = ( + PushRuleStore.__dict__["get_push_rules_enabled_for_user"] + ) + have_push_rules_changed_for_user = ( + DataStore.have_push_rules_changed_for_user.__func__ + ) + + def get_push_rules_stream_token(self): + return ( + self._push_rules_stream_id_gen.get_current_token(), + self._stream_id_gen.get_current_token(), + ) + + def stream_positions(self): + result = super(SlavedPushRuleStore, self).stream_positions() + result["push_rules"] = self._push_rules_stream_id_gen.get_current_token() + return result + + def process_replication(self, result): + stream = result.get("push_rules") + if stream: + for row in stream["rows"]: + position = row[0] + user_id = row[2] + self.get_push_rules_for_user.invalidate((user_id,)) + self.get_push_rules_enabled_for_user.invalidate((user_id,)) + self.push_rules_stream_cache.entity_has_changed( + user_id, position + ) + + self._push_rules_stream_id_gen.advance(int(stream["position"])) + + return super(SlavedPushRuleStore, self).process_replication(result) diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py new file mode 100644 index 0000000000..307833f9e1 --- /dev/null +++ b/synapse/replication/slave/storage/registration.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from synapse.storage import DataStore +from synapse.storage.registration import RegistrationStore + + +class SlavedRegistrationStore(BaseSlavedStore): + def __init__(self, db_conn, hs): + super(SlavedRegistrationStore, self).__init__(db_conn, hs) + + # TODO: use the cached version and invalidate deleted tokens + get_user_by_access_token = RegistrationStore.__dict__[ + "get_user_by_access_token" + ].orig + + _query_for_auth = DataStore._query_for_auth.__func__ From f88d747f7959808884451245aeba65edf7c490bf Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 11:03:10 +0100 Subject: [PATCH 068/414] Add a comment explaining why the filter cache doesn't need exipiring --- synapse/replication/slave/storage/filtering.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 5037f395b9..819ed62881 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -21,4 +21,5 @@ class SlavedFilteringStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedFilteringStore, self).__init__(db_conn, hs) + # Filters are immutable so this cache doesn't need to be expired get_user_filter = FilteringStore.__dict__["get_user_filter"] From 9c26b390a2112590fe4252057dc1f081cb99a6b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 11:34:06 +0100 Subject: [PATCH 069/414] Only get local users --- synapse/push/bulk_push_rule_evaluator.py | 7 +++++-- synapse/storage/pusher.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8c59e59e03..d50db3b736 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -83,10 +83,13 @@ def evaluator_for_event(event, hs, store, current_state): e.state_key for e in current_state.values() if e.type == EventTypes.Member and e.membership == Membership.JOIN ) + local_users_in_room = set(uid for uid in all_in_room if hs.is_mine_id(uid)) # users in the room who have pushers need to get push rules run because # that's how their pushers work - if_users_with_pushers = yield store.get_if_users_have_pushers(all_in_room) + if_users_with_pushers = yield store.get_if_users_have_pushers( + local_users_in_room + ) users_with_pushers = set( uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher ) @@ -96,7 +99,7 @@ def evaluator_for_event(event, hs, store, current_state): # any users with pushers must be ours: they have pushers user_ids = set(users_with_pushers) for uid in users_with_receipts: - if hs.is_mine_id(uid) and uid in all_in_room: + if uid in local_users_in_room: user_ids.add(uid) # if this event is an invite event, we may need to run rules for the user diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 39d5349eaa..a7d7c54d7e 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -135,7 +135,7 @@ class PusherStore(SQLBaseStore): "get_all_updated_pushers", get_all_updated_pushers_txn ) - @cachedInlineCallbacks(lru=True, num_args=1) + @cachedInlineCallbacks(lru=True, num_args=1, max_entries=15000) def get_if_user_has_pusher(self, user_id): result = yield self._simple_select_many_batch( table='pushers', From 59f2d7352224af97e8f091f673858dde42b00197 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 15:45:37 +0100 Subject: [PATCH 070/414] Remove unnecessary sets --- synapse/push/bulk_push_rule_evaluator.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index d50db3b736..af5212a5d1 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -79,25 +79,24 @@ def evaluator_for_event(event, hs, store, current_state): # generating them for bot / AS users etc, we only do so for people who've # sent a read receipt into the room. - all_in_room = set( + local_users_in_room = set( e.state_key for e in current_state.values() if e.type == EventTypes.Member and e.membership == Membership.JOIN + and hs.is_mine_id(e.state_key) ) - local_users_in_room = set(uid for uid in all_in_room if hs.is_mine_id(uid)) # users in the room who have pushers need to get push rules run because # that's how their pushers work if_users_with_pushers = yield store.get_if_users_have_pushers( local_users_in_room ) - users_with_pushers = set( + user_ids = set( uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher ) users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id) # any users with pushers must be ours: they have pushers - user_ids = set(users_with_pushers) for uid in users_with_receipts: if uid in local_users_in_room: user_ids.add(uid) @@ -111,8 +110,6 @@ def evaluator_for_event(event, hs, store, current_state): if has_pusher: user_ids.add(invited_user) - user_ids = list(user_ids) - rules_by_user = yield _get_rules(room_id, user_ids, store) defer.returnValue(BulkPushRuleEvaluator( From 3ae915b27e4531031ee325931b3c62bc200ce798 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 11:05:53 +0100 Subject: [PATCH 071/414] Add a slaved store for presence --- synapse/replication/slave/storage/presence.py | 59 +++++++++++++++++++ synapse/storage/__init__.py | 6 +- 2 files changed, 62 insertions(+), 3 deletions(-) create mode 100644 synapse/replication/slave/storage/presence.py diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py new file mode 100644 index 0000000000..703f4a49bf --- /dev/null +++ b/synapse/replication/slave/storage/presence.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from ._slaved_id_tracker import SlavedIdTracker + +from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.storage import DataStore + + +class SlavedPresenceStore(BaseSlavedStore): + def __init__(self, db_conn, hs): + super(SlavedPresenceStore, self).__init__(db_conn, hs) + self._presence_id_gen = SlavedIdTracker( + db_conn, "presence_stream", "stream_id", + ) + + self._presence_on_startup = self._get_active_presence(db_conn) + + self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache( + "PresenceStreamChangeCache", self._presence_id_gen.get_current_token() + ) + + _get_active_presence = DataStore._get_active_presence.__func__ + take_presence_startup_info = DataStore.take_presence_startup_info.__func__ + get_presence_for_users = DataStore.get_presence_for_users.__func__ + + def get_current_presence_token(self): + return self._presence_id_gen.get_current_token() + + def stream_positions(self): + result = super(SlavedPresenceStore, self).stream_positions() + position = self._presence_id_gen.get_current_token() + result["presence"] = position + return result + + def process_replication(self, result): + stream = result.get("presence") + if stream: + self._presence_id_gen.advance(int(stream["position"])) + for row in stream["rows"]: + position, user_id = row[:2] + self.presence_stream_cache.entity_has_changed( + user_id, position + ) + + return super(SlavedPresenceStore, self).process_replication(result) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 8581796b7e..6928a213e8 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -149,7 +149,7 @@ class DataStore(RoomMemberStore, RoomStore, "AccountDataAndTagsChangeCache", account_max, ) - self.__presence_on_startup = self._get_active_presence(db_conn) + self._presence_on_startup = self._get_active_presence(db_conn) presence_cache_prefill, min_presence_val = self._get_cache_dict( db_conn, "presence_stream", @@ -190,8 +190,8 @@ class DataStore(RoomMemberStore, RoomStore, super(DataStore, self).__init__(hs) def take_presence_startup_info(self): - active_on_startup = self.__presence_on_startup - self.__presence_on_startup = None + active_on_startup = self._presence_on_startup + self._presence_on_startup = None return active_on_startup def _get_active_presence(self, db_conn): From 6a0afa582aa5bf816e082af31ac44e2a8fee28c0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 14:27:07 +0100 Subject: [PATCH 072/414] Load push rules in storage layer, so that they get cached --- synapse/handlers/sync.py | 5 ++-- synapse/push/bulk_push_rule_evaluator.py | 28 ----------------- synapse/push/clientformat.py | 30 ++++++++++++++----- synapse/rest/client/v1/push_rule.py | 6 ++-- synapse/storage/push_rule.py | 38 +++++++++++++++++++++++- 5 files changed, 63 insertions(+), 44 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5307b62b85..be26a491ff 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -198,9 +198,8 @@ class SyncHandler(object): @defer.inlineCallbacks def push_rules_for_user(self, user): user_id = user.to_string() - rawrules = yield self.store.get_push_rules_for_user(user_id) - enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) - rules = format_push_rules_for_user(user, rawrules, enabled_map) + rules = yield self.store.get_push_rules_for_user(user_id) + rules = format_push_rules_for_user(user, rules) defer.returnValue(rules) @defer.inlineCallbacks diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index af5212a5d1..6e42121b1d 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -18,7 +18,6 @@ import ujson as json from twisted.internet import defer -from .baserules import list_with_base_rules from .push_rule_evaluator import PushRuleEvaluatorForEvent from synapse.api.constants import EventTypes, Membership @@ -38,36 +37,9 @@ def decode_rule_json(rule): @defer.inlineCallbacks def _get_rules(room_id, user_ids, store): rules_by_user = yield store.bulk_get_push_rules(user_ids) - rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids) rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} - rules_by_user = { - uid: list_with_base_rules([ - decode_rule_json(rule_list) - for rule_list in rules_by_user.get(uid, []) - ]) - for uid in user_ids - } - - # We apply the rules-enabled map here: bulk_get_push_rules doesn't - # fetch disabled rules, but this won't account for any server default - # rules the user has disabled, so we need to do this too. - for uid in user_ids: - user_enabled_map = rules_enabled_by_user.get(uid) - if not user_enabled_map: - continue - - for i, rule in enumerate(rules_by_user[uid]): - rule_id = rule['rule_id'] - - if rule_id in user_enabled_map: - if rule.get('enabled', True) != bool(user_enabled_map[rule_id]): - # Rules are cached across users. - rule = dict(rule) - rule['enabled'] = bool(user_enabled_map[rule_id]) - rules_by_user[uid][i] = rule - defer.returnValue(rules_by_user) diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index ae9db9ec2f..b3983f7940 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -23,10 +23,7 @@ import copy import simplejson as json -def format_push_rules_for_user(user, rawrules, enabled_map): - """Converts a list of rawrules and a enabled map into nested dictionaries - to match the Matrix client-server format for push rules""" - +def load_rules_for_user(user, rawrules, enabled_map): ruleslist = [] for rawrule in rawrules: rule = dict(rawrule) @@ -35,7 +32,26 @@ def format_push_rules_for_user(user, rawrules, enabled_map): ruleslist.append(rule) # We're going to be mutating this a lot, so do a deep copy - ruleslist = copy.deepcopy(list_with_base_rules(ruleslist)) + rules = list(list_with_base_rules(ruleslist)) + + for i, rule in enumerate(rules): + rule_id = rule['rule_id'] + if rule_id in enabled_map: + if rule.get('enabled', True) != bool(enabled_map[rule_id]): + # Rules are cached across users. + rule = dict(rule) + rule['enabled'] = bool(enabled_map[rule_id]) + rules[i] = rule + + return rules + + +def format_push_rules_for_user(user, ruleslist): + """Converts a list of rawrules and a enabled map into nested dictionaries + to match the Matrix client-server format for push rules""" + + # We're going to be mutating this a lot, so do a deep copy + ruleslist = copy.deepcopy(ruleslist) rules = {'global': {}, 'device': {}} @@ -60,9 +76,7 @@ def format_push_rules_for_user(user, rawrules, enabled_map): template_rule = _rule_to_template(r) if template_rule: - if r['rule_id'] in enabled_map: - template_rule['enabled'] = enabled_map[r['rule_id']] - elif 'enabled' in r: + if 'enabled' in r: template_rule['enabled'] = r['enabled'] else: template_rule['enabled'] = True diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 02d837ee6a..6bb4821ec6 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -128,11 +128,9 @@ class PushRuleRestServlet(ClientV1RestServlet): # we build up the full structure and then decide which bits of it # to send which means doing unnecessary work sometimes but is # is probably not going to make a whole lot of difference - rawrules = yield self.store.get_push_rules_for_user(user_id) + rules = yield self.store.get_push_rules_for_user(user_id) - enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id) - - rules = format_push_rules_for_user(requester.user, rawrules, enabled_map) + rules = format_push_rules_for_user(requester.user, rules) path = request.postpath[1:] diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index ebb97c8474..786d6f6d67 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList +from synapse.push.baserules import list_with_base_rules from twisted.internet import defer import logging @@ -23,6 +24,29 @@ import simplejson as json logger = logging.getLogger(__name__) +def _load_rules(rawrules, enabled_map): + ruleslist = [] + for rawrule in rawrules: + rule = dict(rawrule) + rule["conditions"] = json.loads(rawrule["conditions"]) + rule["actions"] = json.loads(rawrule["actions"]) + ruleslist.append(rule) + + # We're going to be mutating this a lot, so do a deep copy + rules = list(list_with_base_rules(ruleslist)) + + for i, rule in enumerate(rules): + rule_id = rule['rule_id'] + if rule_id in enabled_map: + if rule.get('enabled', True) != bool(enabled_map[rule_id]): + # Rules are cached across users. + rule = dict(rule) + rule['enabled'] = bool(enabled_map[rule_id]) + rules[i] = rule + + return rules + + class PushRuleStore(SQLBaseStore): @cachedInlineCallbacks(lru=True) def get_push_rules_for_user(self, user_id): @@ -42,7 +66,11 @@ class PushRuleStore(SQLBaseStore): key=lambda row: (-int(row["priority_class"]), -int(row["priority"])) ) - defer.returnValue(rows) + enabled_map = yield self.get_push_rules_enabled_for_user(user_id) + + rules = _load_rules(rows, enabled_map) + + defer.returnValue(rules) @cachedInlineCallbacks(lru=True) def get_push_rules_enabled_for_user(self, user_id): @@ -85,6 +113,14 @@ class PushRuleStore(SQLBaseStore): for row in rows: results.setdefault(row['user_name'], []).append(row) + + enabled_map_by_user = yield self.bulk_get_push_rules_enabled(user_ids) + + for user_id, rules in results.items(): + results[user_id] = _load_rules( + rules, enabled_map_by_user.get(user_id, {}) + ) + defer.returnValue(results) @cachedList(cached_method_name="get_push_rules_enabled_for_user", From 597013caa5e22c7134b6ca6e398659ba76047b15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 1 Jun 2016 18:01:22 +0100 Subject: [PATCH 073/414] Make cachedList go a bit faster --- synapse/metrics/metric.py | 22 +++++++++------- synapse/util/caches/descriptors.py | 42 +++++++++++++++++++++++------- 2 files changed, 45 insertions(+), 19 deletions(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 368fc24984..6f82b360bc 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -15,6 +15,7 @@ from itertools import chain +from collections import Counter # TODO(paul): I can't believe Python doesn't have one of these @@ -55,30 +56,29 @@ class CounterMetric(BaseMetric): """The simplest kind of metric; one that stores a monotonically-increasing integer that counts events.""" + __slots__ = ("counts") + def __init__(self, *args, **kwargs): super(CounterMetric, self).__init__(*args, **kwargs) - self.counts = {} + self.counts = Counter() # Scalar metrics are never empty if self.is_scalar(): self.counts[()] = 0 def inc_by(self, incr, *values): - if len(values) != self.dimension(): - raise ValueError( - "Expected as many values to inc() as labels (%d)" % (self.dimension()) - ) + # if len(values) != self.dimension(): + # raise ValueError( + # "Expected as many values to inc() as labels (%d)" % (self.dimension()) + # ) # TODO: should assert that the tag values are all strings - if values not in self.counts: - self.counts[values] = incr - else: - self.counts[values] += incr + self.counts[values] += incr def inc(self, *values): - self.inc_by(1, *values) + self.counts[values] += 1 def render_item(self, k): return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])] @@ -132,6 +132,8 @@ class CacheMetric(object): This metric generates standard metric name pairs, so that monitoring rules can easily be applied to measure hit ratio.""" + __slots__ = ("name", "hits", "total", "size") + def __init__(self, name, size_callback, labels=[]): self.name = name diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 758f5982b0..4bbb16ed3c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -32,6 +32,7 @@ import os import functools import inspect import threading +import itertools logger = logging.getLogger(__name__) @@ -43,6 +44,14 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) class Cache(object): + __slots__ = ( + "cache", + "max_entries", + "name", + "keylen", + "sequence", + "thread", + ) def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False): if lru: @@ -293,16 +302,21 @@ class CacheListDescriptor(object): # cached is a dict arg -> deferred, where deferred results in a # 2-tuple (`arg`, `result`) - cached = {} + results = {} + cached_defers = {} missing = [] for arg in list_args: key = list(keyargs) key[self.list_pos] = arg try: - res = cache.get(tuple(key)).observe() - res.addCallback(lambda r, arg: (arg, r), arg) - cached[arg] = res + res = cache.get(tuple(key)) + if not res.called: + res = res.observe() + res.addCallback(lambda r, arg: (arg, r), arg) + cached_defers[arg] = res + else: + results[arg] = res.result except KeyError: missing.append(arg) @@ -340,12 +354,22 @@ class CacheListDescriptor(object): res = observer.observe() res.addCallback(lambda r, arg: (arg, r), arg) - cached[arg] = res + cached_defers[arg] = res - return preserve_context_over_deferred(defer.gatherResults( - cached.values(), - consumeErrors=True, - ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res))) + if cached_defers: + return preserve_context_over_deferred(defer.gatherResults( + cached_defers.values(), + consumeErrors=True, + ).addCallback( + lambda res: { + k: v + for k, v in itertools.chain(results.items(), res) + } + )).addErrback( + unwrapFirstError + ) + else: + return results obj.__dict__[self.orig.__name__] = wrapped From e043ede4a2f18a47b67bf19368600183554824f7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 Jun 2016 11:52:32 +0100 Subject: [PATCH 074/414] Small optimisation to CacheListDescriptor --- synapse/metrics/metric.py | 22 ++++++++++------------ synapse/util/async.py | 9 +++++++++ synapse/util/caches/descriptors.py | 4 ++-- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 6f82b360bc..368fc24984 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -15,7 +15,6 @@ from itertools import chain -from collections import Counter # TODO(paul): I can't believe Python doesn't have one of these @@ -56,29 +55,30 @@ class CounterMetric(BaseMetric): """The simplest kind of metric; one that stores a monotonically-increasing integer that counts events.""" - __slots__ = ("counts") - def __init__(self, *args, **kwargs): super(CounterMetric, self).__init__(*args, **kwargs) - self.counts = Counter() + self.counts = {} # Scalar metrics are never empty if self.is_scalar(): self.counts[()] = 0 def inc_by(self, incr, *values): - # if len(values) != self.dimension(): - # raise ValueError( - # "Expected as many values to inc() as labels (%d)" % (self.dimension()) - # ) + if len(values) != self.dimension(): + raise ValueError( + "Expected as many values to inc() as labels (%d)" % (self.dimension()) + ) # TODO: should assert that the tag values are all strings - self.counts[values] += incr + if values not in self.counts: + self.counts[values] = incr + else: + self.counts[values] += incr def inc(self, *values): - self.counts[values] += 1 + self.inc_by(1, *values) def render_item(self, k): return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])] @@ -132,8 +132,6 @@ class CacheMetric(object): This metric generates standard metric name pairs, so that monitoring rules can easily be applied to measure hit ratio.""" - __slots__ = ("name", "hits", "total", "size") - def __init__(self, name, size_callback, labels=[]): self.name = name diff --git a/synapse/util/async.py b/synapse/util/async.py index 0d6f48e2d8..40be7fe7e3 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -102,6 +102,15 @@ class ObservableDeferred(object): def observers(self): return self._observers + def has_called(self): + return self._result is not None + + def has_succeeded(self): + return self._result is not None and self._result[0] is True + + def get_result(self): + return self._result[1] + def __getattr__(self, name): return getattr(self._deferred, name) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 4bbb16ed3c..5be4097279 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -311,12 +311,12 @@ class CacheListDescriptor(object): try: res = cache.get(tuple(key)) - if not res.called: + if not res.has_succeeded(): res = res.observe() res.addCallback(lambda r, arg: (arg, r), arg) cached_defers[arg] = res else: - results[arg] = res.result + results[arg] = res.get_result() except KeyError: missing.append(arg) From 81cf449daa8b310899014f5564f5fdf10289e79c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 11:19:27 +0100 Subject: [PATCH 075/414] Add methods to events, account data and receipt slaves Adds the methods needed by /sync to the slaved events, account data and receipt stores. --- .../replication/slave/storage/account_data.py | 41 ++++++++++++++++++- synapse/replication/slave/storage/events.py | 21 ++++++++-- synapse/replication/slave/storage/receipts.py | 25 ++++++++++- 3 files changed, 81 insertions(+), 6 deletions(-) diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index f59b0eabbc..735c03c7eb 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -15,7 +15,10 @@ from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker +from synapse.storage import DataStore from synapse.storage.account_data import AccountDataStore +from synapse.storage.tags import TagsStore +from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedAccountDataStore(BaseSlavedStore): @@ -25,6 +28,14 @@ class SlavedAccountDataStore(BaseSlavedStore): self._account_data_id_gen = SlavedIdTracker( db_conn, "account_data_max_stream_id", "stream_id", ) + self._account_data_stream_cache = StreamChangeCache( + "AccountDataAndTagsChangeCache", + self._account_data_id_gen.get_current_token(), + ) + + get_account_data_for_user = ( + AccountDataStore.__dict__["get_account_data_for_user"] + ) get_global_account_data_by_type_for_users = ( AccountDataStore.__dict__["get_global_account_data_by_type_for_users"] @@ -34,6 +45,16 @@ class SlavedAccountDataStore(BaseSlavedStore): AccountDataStore.__dict__["get_global_account_data_by_type_for_user"] ) + get_tags_for_user = TagsStore.__dict__["get_tags_for_user"] + + get_updated_tags = DataStore.get_updated_tags.__func__ + get_updated_account_data_for_user = ( + DataStore.get_updated_account_data_for_user.__func__ + ) + + def get_max_account_data_stream_id(self): + return self._account_data_id_gen.get_current_token() + def stream_positions(self): result = super(SlavedAccountDataStore, self).stream_positions() position = self._account_data_id_gen.get_current_token() @@ -47,15 +68,33 @@ class SlavedAccountDataStore(BaseSlavedStore): if stream: self._account_data_id_gen.advance(int(stream["position"])) for row in stream["rows"]: - user_id, data_type = row[1:3] + position, user_id, data_type = row[:3] self.get_global_account_data_by_type_for_user.invalidate( (data_type, user_id,) ) + self.get_account_data_for_user.invalidate((user_id,)) + self._account_data_stream_cache.entity_has_changed( + user_id, position + ) stream = result.get("room_account_data") if stream: self._account_data_id_gen.advance(int(stream["position"])) + for row in stream["rows"]: + position, user_id = row[:2] + self.get_account_data_for_user.invalidate((user_id,)) + self._account_data_stream_cache.entity_has_changed( + user_id, position + ) stream = result.get("tag_account_data") if stream: self._account_data_id_gen.advance(int(stream["position"])) + for row in stream["rows"]: + position, user_id = row[:2] + self.get_tags_for_user.invalidate((user_id,)) + self._account_data_stream_cache.entity_has_changed( + user_id, position + ) + + return super(SlavedAccountDataStore, self).process_replication(result) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index c0d741452d..cbc1ae4190 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -23,6 +23,7 @@ from synapse.storage.roommember import RoomMemberStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.event_push_actions import EventPushActionsStore from synapse.storage.state import StateStore +from synapse.storage.stream import StreamStore from synapse.util.caches.stream_change_cache import StreamChangeCache import ujson as json @@ -57,6 +58,9 @@ class SlavedEventStore(BaseSlavedStore): "EventsRoomStreamChangeCache", min_event_val, prefilled_cache=event_cache_prefill, ) + self._membership_stream_cache = StreamChangeCache( + "MembershipStreamChangeCache", events_max, + ) # Cached functions can't be accessed through a class instance so we need # to reach inside the __dict__ to extract them. @@ -87,6 +91,9 @@ class SlavedEventStore(BaseSlavedStore): _get_state_group_from_group = ( StateStore.__dict__["_get_state_group_from_group"] ) + get_recent_event_ids_for_room = ( + StreamStore.__dict__["get_recent_event_ids_for_room"] + ) get_unread_push_actions_for_user_in_range = ( DataStore.get_unread_push_actions_for_user_in_range.__func__ @@ -109,10 +116,16 @@ class SlavedEventStore(BaseSlavedStore): DataStore.get_room_events_stream_for_room.__func__ ) get_events_around = DataStore.get_events_around.__func__ + get_state_for_event = DataStore.get_state_for_event.__func__ get_state_for_events = DataStore.get_state_for_events.__func__ get_state_groups = DataStore.get_state_groups.__func__ + get_recent_events_for_room = DataStore.get_recent_events_for_room.__func__ + get_room_events_stream_for_rooms = ( + DataStore.get_room_events_stream_for_rooms.__func__ + ) + get_stream_token_for_event = DataStore.get_stream_token_for_event.__func__ - _set_before_and_after = DataStore._set_before_and_after + _set_before_and_after = staticmethod(DataStore._set_before_and_after) _get_events = DataStore._get_events.__func__ _get_events_from_cache = DataStore._get_events_from_cache.__func__ @@ -220,9 +233,9 @@ class SlavedEventStore(BaseSlavedStore): self.get_rooms_for_user.invalidate((event.state_key,)) # self.get_joined_hosts_for_room.invalidate((event.room_id,)) self.get_users_in_room.invalidate((event.room_id,)) - # self._membership_stream_cache.entity_has_changed( - # event.state_key, event.internal_metadata.stream_ordering - # ) + self._membership_stream_cache.entity_has_changed( + event.state_key, event.internal_metadata.stream_ordering + ) self.get_invited_rooms_for_user.invalidate((event.state_key,)) if not event.is_state(): diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index ec007516d0..ac9662d399 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -18,6 +18,7 @@ from ._slaved_id_tracker import SlavedIdTracker from synapse.storage import DataStore from synapse.storage.receipts import ReceiptsStore +from synapse.util.caches.stream_change_cache import StreamChangeCache # So, um, we want to borrow a load of functions intended for reading from # a DataStore, but we don't want to take functions that either write to the @@ -37,11 +38,28 @@ class SlavedReceiptsStore(BaseSlavedStore): db_conn, "receipts_linearized", "stream_id" ) + self._receipts_stream_cache = StreamChangeCache( + "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token() + ) + get_receipts_for_user = ReceiptsStore.__dict__["get_receipts_for_user"] + get_linearized_receipts_for_room = ( + ReceiptsStore.__dict__["get_linearized_receipts_for_room"] + ) + _get_linearized_receipts_for_rooms = ( + ReceiptsStore.__dict__["_get_linearized_receipts_for_rooms"] + ) + get_last_receipt_event_id_for_user = ( + ReceiptsStore.__dict__["get_last_receipt_event_id_for_user"] + ) get_max_receipt_stream_id = DataStore.get_max_receipt_stream_id.__func__ get_all_updated_receipts = DataStore.get_all_updated_receipts.__func__ + get_linearized_receipts_for_rooms = ( + DataStore.get_linearized_receipts_for_rooms.__func__ + ) + def stream_positions(self): result = super(SlavedReceiptsStore, self).stream_positions() result["receipts"] = self._receipts_id_gen.get_current_token() @@ -52,10 +70,15 @@ class SlavedReceiptsStore(BaseSlavedStore): if stream: self._receipts_id_gen.advance(int(stream["position"])) for row in stream["rows"]: - room_id, receipt_type, user_id = row[1:4] + position, room_id, receipt_type, user_id = row[:4] self.invalidate_caches_for_receipt(room_id, receipt_type, user_id) + self._receipts_stream_cache.entity_has_changed(room_id, position) return super(SlavedReceiptsStore, self).process_replication(result) def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id): self.get_receipts_for_user.invalidate((user_id, receipt_type)) + self.get_linearized_receipts_for_room.invalidate_many((room_id,)) + self.get_last_receipt_event_id_for_user.invalidate( + (user_id, room_id, receipt_type) + ) From ccb56fc24bd7d2675dc21796b29333538ca0d5fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 Jun 2016 12:47:06 +0100 Subject: [PATCH 076/414] Make get_joined_hosts_for_room use get_users_in_room --- synapse/storage/roommember.py | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 41b395e07c..64b4bd371b 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -238,23 +238,10 @@ class RoomMemberStore(SQLBaseStore): return results - @cached(max_entries=5000) + @cachedInlineCallbacks(max_entries=5000) def get_joined_hosts_for_room(self, room_id): - return self.runInteraction( - "get_joined_hosts_for_room", - self._get_joined_hosts_for_room_txn, - room_id, - ) - - def _get_joined_hosts_for_room_txn(self, txn, room_id): - rows = self._get_members_rows_txn( - txn, - room_id, membership=Membership.JOIN - ) - - joined_domains = set(get_domain_from_id(r["user_id"]) for r in rows) - - return joined_domains + user_ids = yield self.get_users_in_room(room_id) + defer.returnValue(set(get_domain_from_id(uid) for uid in user_ids)) def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None): rows = self._get_members_rows_txn( From 4c04222fa55d35ad3a75c5538ec477046b6c5b30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 Jun 2016 13:02:33 +0100 Subject: [PATCH 077/414] Poke notifier on next reactor tick --- synapse/handlers/message.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index c41dafdef5..15caf1950a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -26,9 +26,9 @@ from synapse.types import ( UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id ) from synapse.util import unwrapFirstError -from synapse.util.async import concurrently_execute +from synapse.util.async import concurrently_execute, run_on_reactor from synapse.util.caches.snapshot_cache import SnapshotCache -from synapse.util.logcontext import PreserveLoggingContext, preserve_fn +from synapse.util.logcontext import preserve_fn from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -908,13 +908,16 @@ class MessageHandler(BaseHandler): "Failed to get destination from event %s", s.event_id ) - with PreserveLoggingContext(): - # Don't block waiting on waking up all the listeners. + @defer.inlineCallbacks + def _notify(): + yield run_on_reactor() self.notifier.on_new_room_event( event, event_stream_id, max_stream_id, extra_users=extra_users ) + preserve_fn(_notify)() + # If invite, remove room_state from unsigned before sending. event.unsigned.pop("invite_room_state", None) From 73c711243382a48b9b67fddf5ed9df2d1ee1be43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 Jun 2016 11:29:44 +0100 Subject: [PATCH 078/414] Change CacheMetrics to be quicker We change it so that each cache has an individual CacheMetric, instead of having one global CacheMetric. This means that when a cache tries to increment a counter it does not need to go through so many indirections. --- synapse/metrics/__init__.py | 16 +++----- synapse/metrics/metric.py | 44 +++++++++++----------- synapse/util/caches/__init__.py | 20 +++++++--- synapse/util/caches/descriptors.py | 17 +++++++-- synapse/util/caches/dictionary_cache.py | 8 ++-- synapse/util/caches/expiringcache.py | 8 ++-- synapse/util/caches/stream_change_cache.py | 16 ++++---- tests/metrics/test_metric.py | 23 +++++------ 8 files changed, 82 insertions(+), 70 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 5664d5a381..c38f24485a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -33,11 +33,7 @@ from .metric import ( logger = logging.getLogger(__name__) -# We'll keep all the available metrics in a single toplevel dict, one shared -# for the entire process. We don't currently support per-HomeServer instances -# of metrics, because in practice any one python VM will host only one -# HomeServer anyway. This makes a lot of implementation neater -all_metrics = {} +all_metrics = [] class Metrics(object): @@ -53,7 +49,7 @@ class Metrics(object): metric = metric_class(full_name, *args, **kwargs) - all_metrics[full_name] = metric + all_metrics.append(metric) return metric def register_counter(self, *args, **kwargs): @@ -84,12 +80,12 @@ def render_all(): # TODO(paul): Internal hack update_resource_metrics() - for name in sorted(all_metrics.keys()): + for metric in all_metrics: try: - strs += all_metrics[name].render() + strs += metric.render() except Exception: - strs += ["# FAILED to render %s" % name] - logger.exception("Failed to render %s metric", name) + strs += ["# FAILED to render"] + logger.exception("Failed to render metric") strs.append("") # to generate a final CRLF diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 368fc24984..341043952a 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -47,9 +47,6 @@ class BaseMetric(object): for k, v in zip(self.labels, values)]) ) - def render(self): - return map_concat(self.render_item, sorted(self.counts.keys())) - class CounterMetric(BaseMetric): """The simplest kind of metric; one that stores a monotonically-increasing @@ -83,6 +80,9 @@ class CounterMetric(BaseMetric): def render_item(self, k): return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])] + def render(self): + return map_concat(self.render_item, sorted(self.counts.keys())) + class CallbackMetric(BaseMetric): """A metric that returns the numeric value returned by a callback whenever @@ -126,30 +126,30 @@ class DistributionMetric(object): class CacheMetric(object): - """A combination of two CounterMetrics, one to count cache hits and one to - count a total, and a callback metric to yield the current size. + __slots__ = ("name", "cache_name", "hits", "misses", "size_callback") - This metric generates standard metric name pairs, so that monitoring rules - can easily be applied to measure hit ratio.""" - - def __init__(self, name, size_callback, labels=[]): + def __init__(self, name, size_callback, cache_name): self.name = name + self.cache_name = cache_name - self.hits = CounterMetric(name + ":hits", labels=labels) - self.total = CounterMetric(name + ":total", labels=labels) + self.hits = 0 + self.misses = 0 - self.size = CallbackMetric( - name + ":size", - callback=size_callback, - labels=labels, - ) + self.size_callback = size_callback - def inc_hits(self, *values): - self.hits.inc(*values) - self.total.inc(*values) + def inc_hits(self): + self.hits += 1 - def inc_misses(self, *values): - self.total.inc(*values) + def inc_misses(self): + self.misses += 1 def render(self): - return self.hits.render() + self.total.render() + self.size.render() + size = self.size_callback() + hits = self.hits + total = self.misses + self.hits + + return [ + """%s:hits{name="%s"} %d""" % (self.name, self.cache_name, hits), + """%s:total{name="%s"} %d""" % (self.name, self.cache_name, total), + """%s:size{name="%s"} %d""" % (self.name, self.cache_name, size), + ] diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index d53569ca49..ebd715c5dc 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -24,11 +24,21 @@ DEBUG_CACHES = False metrics = synapse.metrics.get_metrics_for("synapse.util.caches") caches_by_name = {} -cache_counter = metrics.register_cache( - "cache", - lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()}, - labels=["name"], -) +# cache_counter = metrics.register_cache( +# "cache", +# lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()}, +# labels=["name"], +# ) + + +def register_cache(name, cache): + caches_by_name[name] = cache + return metrics.register_cache( + "cache", + lambda: len(cache), + name, + ) + _string_cache = LruCache(int(5000 * CACHE_SIZE_FACTOR)) caches_by_name["string_cache"] = _string_cache diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 758f5982b0..5d25c9e762 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -22,7 +22,7 @@ from synapse.util.logcontext import ( PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn ) -from . import caches_by_name, DEBUG_CACHES, cache_counter +from . import DEBUG_CACHES, register_cache from twisted.internet import defer @@ -43,6 +43,15 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1)) class Cache(object): + __slots__ = ( + "cache", + "max_entries", + "name", + "keylen", + "sequence", + "thread", + "metrics", + ) def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False): if lru: @@ -59,7 +68,7 @@ class Cache(object): self.keylen = keylen self.sequence = 0 self.thread = None - caches_by_name[name] = self.cache + self.metrics = register_cache(name, self.cache) def check_thread(self): expected_thread = self.thread @@ -74,10 +83,10 @@ class Cache(object): def get(self, key, default=_CacheSentinel): val = self.cache.get(key, _CacheSentinel) if val is not _CacheSentinel: - cache_counter.inc_hits(self.name) + self.metrics.inc_hits() return val - cache_counter.inc_misses(self.name) + self.metrics.inc_misses() if default is _CacheSentinel: raise KeyError() diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index f92d80542b..b0ca1bb79d 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -15,7 +15,7 @@ from synapse.util.caches.lrucache import LruCache from collections import namedtuple -from . import caches_by_name, cache_counter +from . import register_cache import threading import logging @@ -43,7 +43,7 @@ class DictionaryCache(object): __slots__ = [] self.sentinel = Sentinel() - caches_by_name[name] = self.cache + self.metrics = register_cache(name, self.cache) def check_thread(self): expected_thread = self.thread @@ -58,7 +58,7 @@ class DictionaryCache(object): def get(self, key, dict_keys=None): entry = self.cache.get(key, self.sentinel) if entry is not self.sentinel: - cache_counter.inc_hits(self.name) + self.metrics.inc_hits() if dict_keys is None: return DictionaryEntry(entry.full, dict(entry.value)) @@ -69,7 +69,7 @@ class DictionaryCache(object): if k in entry.value }) - cache_counter.inc_misses(self.name) + self.metrics.inc_misses() return DictionaryEntry(False, {}) def invalidate(self, key): diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 2b68c1ac93..080388958f 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.caches import cache_counter, caches_by_name +from synapse.util.caches import register_cache import logging @@ -49,7 +49,7 @@ class ExpiringCache(object): self._cache = {} - caches_by_name[cache_name] = self._cache + self.metrics = register_cache(cache_name, self._cache) def start(self): if not self._expiry_ms: @@ -78,9 +78,9 @@ class ExpiringCache(object): def __getitem__(self, key): try: entry = self._cache[key] - cache_counter.inc_hits(self._cache_name) + self.metrics.inc_hits() except KeyError: - cache_counter.inc_misses(self._cache_name) + self.metrics.inc_misses() raise if self._reset_expiry_on_get: diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index ea8a74ca69..3c051dabc4 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.util.caches import cache_counter, caches_by_name +from synapse.util.caches import register_cache from blist import sorteddict @@ -42,7 +42,7 @@ class StreamChangeCache(object): self._cache = sorteddict() self._earliest_known_stream_pos = current_stream_pos self.name = name - caches_by_name[self.name] = self._cache + self.metrics = register_cache(self.name, self._cache) for entity, stream_pos in prefilled_cache.items(): self.entity_has_changed(entity, stream_pos) @@ -53,19 +53,19 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos < self._earliest_known_stream_pos: - cache_counter.inc_misses(self.name) + self.metrics.inc_misses() return True latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: - cache_counter.inc_hits(self.name) + self.metrics.inc_hits() return False if stream_pos < latest_entity_change_pos: - cache_counter.inc_misses(self.name) + self.metrics.inc_misses() return True - cache_counter.inc_hits(self.name) + self.metrics.inc_hits() return False def get_entities_changed(self, entities, stream_pos): @@ -82,10 +82,10 @@ class StreamChangeCache(object): self._cache[k] for k in keys[i:] ).intersection(entities) - cache_counter.inc_hits(self.name) + self.metrics.inc_hits() else: result = entities - cache_counter.inc_misses(self.name) + self.metrics.inc_misses() return result diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py index f3c1927ce1..f85455a5af 100644 --- a/tests/metrics/test_metric.py +++ b/tests/metrics/test_metric.py @@ -61,9 +61,6 @@ class CounterMetricTestCase(unittest.TestCase): 'vector{method="PUT"} 1', ]) - # Check that passing too few values errors - self.assertRaises(ValueError, counter.inc) - class CallbackMetricTestCase(unittest.TestCase): @@ -138,27 +135,27 @@ class CacheMetricTestCase(unittest.TestCase): def test_cache(self): d = dict() - metric = CacheMetric("cache", lambda: len(d)) + metric = CacheMetric("cache", lambda: len(d), "cache_name") self.assertEquals(metric.render(), [ - 'cache:hits 0', - 'cache:total 0', - 'cache:size 0', + 'cache:hits{name="cache_name"} 0', + 'cache:total{name="cache_name"} 0', + 'cache:size{name="cache_name"} 0', ]) metric.inc_misses() d["key"] = "value" self.assertEquals(metric.render(), [ - 'cache:hits 0', - 'cache:total 1', - 'cache:size 1', + 'cache:hits{name="cache_name"} 0', + 'cache:total{name="cache_name"} 1', + 'cache:size{name="cache_name"} 1', ]) metric.inc_hits() self.assertEquals(metric.render(), [ - 'cache:hits 1', - 'cache:total 2', - 'cache:size 1', + 'cache:hits{name="cache_name"} 1', + 'cache:total{name="cache_name"} 2', + 'cache:size{name="cache_name"} 1', ]) From 58a224a6515dceacebc729f1e6fbb87a22f3a35a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 11:47:07 +0100 Subject: [PATCH 079/414] Pull out update_results_dict --- synapse/util/caches/descriptors.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 5be4097279..799fd2a9c6 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -32,7 +32,7 @@ import os import functools import inspect import threading -import itertools + logger = logging.getLogger(__name__) @@ -357,17 +357,16 @@ class CacheListDescriptor(object): cached_defers[arg] = res if cached_defers: + def update_results_dict(res): + results.update(res) + return results + return preserve_context_over_deferred(defer.gatherResults( cached_defers.values(), consumeErrors=True, - ).addCallback( - lambda res: { - k: v - for k, v in itertools.chain(results.items(), res) - } - )).addErrback( + ).addCallback(update_results_dict).addErrback( unwrapFirstError - ) + )) else: return results From abb151f3c9bf78f2825dba18da6bbc88ce61d32c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 11:57:26 +0100 Subject: [PATCH 080/414] Add a separate process that can handle /sync requests --- synapse/app/synchrotron.py | 467 +++++++++++++++++++++++++++++++++++++ 1 file changed, 467 insertions(+) create mode 100644 synapse/app/synchrotron.py diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py new file mode 100644 index 0000000000..f592ad352e --- /dev/null +++ b/synapse/app/synchrotron.py @@ -0,0 +1,467 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synapse + +from synapse.api.constants import EventTypes +from synapse.config._base import ConfigError +from synapse.config.database import DatabaseConfig +from synapse.config.logger import LoggingConfig +from synapse.config.appservice import AppServiceConfig +from synapse.events import FrozenEvent +from synapse.handlers.presence import PresenceHandler +from synapse.http.site import SynapseSite +from synapse.http.server import JsonResource +from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.rest.client.v2_alpha import sync +from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.replication.slave.storage.account_data import SlavedAccountDataStore +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.slave.storage.filtering import SlavedFilteringStore +from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore +from synapse.replication.slave.storage.presence import SlavedPresenceStore +from synapse.server import HomeServer +from synapse.storage.engines import create_engine +from synapse.storage.presence import UserPresenceState +from synapse.storage.roommember import RoomMemberStore +from synapse.util.async import sleep +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext +from synapse.util.manhole import manhole +from synapse.util.rlimit import change_resource_limit +from synapse.util.stringutils import random_string +from synapse.util.versionstring import get_version_string + +from twisted.internet import reactor, defer +from twisted.web.resource import Resource + +from daemonize import Daemonize + +import sys +import logging +import contextlib +import ujson as json + +logger = logging.getLogger("synapse.app.synchrotron") + + +class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig): + def read_config(self, config): + self.replication_url = config["replication_url"] + self.server_name = config["server_name"] + self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( + "use_insecure_ssl_client_just_for_testing_do_not_use", False + ) + self.user_agent_suffix = None + self.listeners = config["listeners"] + self.soft_file_limit = config.get("soft_file_limit") + self.daemonize = config.get("daemonize") + self.pid_file = self.abspath(config.get("pid_file")) + self.macaroon_secret_key = config["macaroon_secret_key"] + self.expire_access_token = config.get("expire_access_token", False) + + def default_config(self, server_name, **kwargs): + pid_file = self.abspath("synchroton.pid") + return """\ + # Slave configuration + + # The replication listener on the synapse to talk to. + #replication_url: https://localhost:{replication_port}/_synapse/replication + + server_name: "%(server_name)s" + + listeners: + # Enable a /sync listener on the synchrontron + #- type: http + # port: {http_port} + # bind_address: "" + # Enable a ssh manhole listener on the synchrotron + # - type: manhole + # port: {manhole_port} + # bind_address: 127.0.0.1 + # Enable a metric listener on the synchrotron + # - type: http + # port: {metrics_port} + # bind_address: 127.0.0.1 + # resources: + # - names: ["metrics"] + # compress: False + + report_stats: False + + daemonize: False + + pid_file: %(pid_file)s + """ % locals() + + +class SynchrotronSlavedStore( + SlavedPushRuleStore, + SlavedEventStore, + SlavedReceiptsStore, + SlavedAccountDataStore, + SlavedApplicationServiceStore, + SlavedRegistrationStore, + SlavedFilteringStore, + SlavedPresenceStore, +): + def get_presence_list_accepted(self, user_localpart): + return () + + def insert_client_ip(self, user, access_token, ip, user_agent): + pass + + # XXX: This is a bit broken because we don't persist forgotten rooms + # in a way that they can be streamed. This means that we don't have a + # way to invalidate the forgotten rooms cache correctly. + # For now we expire the cache every 10 minutes. + BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000 + who_forgot_in_room = ( + RoomMemberStore.__dict__["who_forgot_in_room"] + ) + + +class SynchrotronPresence(object): + def __init__(self, hs): + self.http_client = hs.get_simple_http_client() + self.store = hs.get_datastore() + self.user_to_num_current_syncs = {} + self.syncing_users_url = hs.config.replication_url + "/syncing_users" + self.clock = hs.get_clock() + + active_presence = self.store.take_presence_startup_info() + self.user_to_current_state = { + state.user_id: state + for state in active_presence + } + + self.process_id = random_string(16) + logger.info("Presence process_id is %r", self.process_id) + + def set_state(self, user, state): + # TODO Hows this supposed to work? + pass + + get_states = PresenceHandler.get_states.__func__ + current_state_for_users = PresenceHandler.current_state_for_users.__func__ + + @defer.inlineCallbacks + def user_syncing(self, user_id, affect_presence): + if affect_presence: + curr_sync = self.user_to_num_current_syncs.get(user_id, 0) + self.user_to_num_current_syncs[user_id] = curr_sync + 1 + # TODO: Send this less frequently. + # TODO: Make sure this doesn't race. Currently we can lose updates + # if two users come online in quick sucession and the second http + # to the master completes before the first. + # TODO: Don't block the sync request on this HTTP hit. + yield self._send_syncing_users() + + def _end(): + if affect_presence: + self.user_to_num_current_syncs[user_id] -= 1 + + @contextlib.contextmanager + def _user_syncing(): + try: + yield + finally: + _end() + + defer.returnValue(_user_syncing()) + + def _send_syncing_users(self): + return self.http_client.post_json_get_json(self.syncing_users_url, { + "process_id": self.process_id, + "syncing_users": [ + user_id for user_id, count in self.user_to_num_current_syncs.items() + if count > 0 + ], + }) + + def process_replication(self, result): + stream = result.get("presence", {"rows": []}) + for row in stream["rows"]: + ( + position, user_id, state, last_active_ts, + last_federation_update_ts, last_user_sync_ts, status_msg, + currently_active + ) = row + self.user_to_current_state[user_id] = UserPresenceState( + user_id, state, last_active_ts, + last_federation_update_ts, last_user_sync_ts, status_msg, + currently_active + ) + + +class SynchrotronTyping(object): + def __init__(self, hs): + self._latest_room_serial = 0 + self._room_serials = {} + self._room_typing = {} + + def stream_positions(self): + return {"typing": self._latest_room_serial} + + def process_replication(self, result): + stream = result.get("typing") + if stream: + self._latest_room_serial = int(stream["position"]) + + for row in stream["rows"]: + position, room_id, typing_json = row + typing = json.loads(typing_json) + self._room_serials[room_id] = position + self._room_typing[room_id] = typing + + +class SynchrotronApplicationService(object): + def notify_interested_services(self, event): + pass + + +class SynchrotronServer(HomeServer): + def get_db_conn(self, run_new_connection=True): + # Any param beginning with cp_ is a parameter for adbapi, and should + # not be passed to the database engine. + db_params = { + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + db_conn = self.database_engine.module.connect(**db_params) + + if run_new_connection: + self.database_engine.on_new_connection(db_conn) + return db_conn + + def setup(self): + logger.info("Setting up.") + self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self) + logger.info("Finished setting up.") + + def _listen_http(self, listener_config): + port = listener_config["port"] + bind_address = listener_config.get("bind_address", "") + site_tag = listener_config.get("tag", port) + resources = {} + for res in listener_config["resources"]: + for name in res["names"]: + if name == "metrics": + resources[METRICS_PREFIX] = MetricsResource(self) + elif name == "client": + resource = JsonResource(self, canonical_json=False) + sync.register_servlets(self, resource) + resources.update({ + "/_matrix/client/r0": resource, + "/_matrix/client/unstable": resource, + "/_matrix/client/v2_alpha": resource, + }) + + root_resource = create_resource_tree(resources, Resource()) + reactor.listenTCP( + port, + SynapseSite( + "synapse.access.http.%s" % (site_tag,), + site_tag, + listener_config, + root_resource, + ), + interface=bind_address + ) + logger.info("Synapse synchrotron now listening on port %d", port) + + def start_listening(self): + for listener in self.config.listeners: + if listener["type"] == "http": + self._listen_http(listener) + elif listener["type"] == "manhole": + reactor.listenTCP( + listener["port"], + manhole( + username="matrix", + password="rabbithole", + globals={"hs": self}, + ), + interface=listener.get("bind_address", '127.0.0.1') + ) + else: + logger.warn("Unrecognized listener type: %s", listener["type"]) + + @defer.inlineCallbacks + def replicate(self): + http_client = self.get_simple_http_client() + store = self.get_datastore() + replication_url = self.config.replication_url + clock = self.get_clock() + notifier = self.get_notifier() + presence_handler = self.get_presence_handler() + typing_handler = self.get_typing_handler() + + def expire_broken_caches(): + store.who_forgot_in_room.invalidate_all() + + def notify_from_stream( + result, stream_name, stream_key, room=None, user=None + ): + stream = result.get(stream_name) + if stream: + position_index = stream["field_names"].index("position") + if room: + room_index = stream["field_names"].index(room) + if user: + user_index = stream["field_names"].index(user) + + users = () + rooms = () + for row in stream["rows"]: + position = row[position_index] + + if user: + users = (row[user_index],) + + if room: + rooms = (row[room_index],) + + notifier.on_new_event( + stream_key, position, users=users, rooms=rooms + ) + + def notify(result): + stream = result.get("events") + if stream: + max_position = stream["position"] + for row in stream["rows"]: + position = row[0] + internal = json.loads(row[1]) + event_json = json.loads(row[2]) + event = FrozenEvent(event_json, internal_metadata_dict=internal) + extra_users = () + if event.type == EventTypes.Member: + extra_users = (event.state_key,) + notifier.on_new_room_event( + event, position, max_position, extra_users + ) + + notify_from_stream( + result, "push_rules", "push_rules_key", user="user_id" + ) + notify_from_stream( + result, "user_account_data", "account_data_key", user="user_id" + ) + notify_from_stream( + result, "room_account_data", "account_data_key", user="user_id" + ) + notify_from_stream( + result, "tag_account_data", "account_data_key", user="user_id" + ) + notify_from_stream( + result, "receipts", "receipt_key", room="room_id" + ) + notify_from_stream( + result, "typing", "typing_key", room="room_id" + ) + + next_expire_broken_caches_ms = 0 + while True: + try: + args = store.stream_positions() + args.update(typing_handler.stream_positions()) + args["timeout"] = 30000 + result = yield http_client.get_json(replication_url, args=args) + now_ms = clock.time_msec() + if now_ms > next_expire_broken_caches_ms: + expire_broken_caches() + next_expire_broken_caches_ms = ( + now_ms + store.BROKEN_CACHE_EXPIRY_MS + ) + yield store.process_replication(result) + typing_handler.process_replication(result) + presence_handler.process_replication(result) + notify(result) + except: + logger.exception("Error replicating from %r", replication_url) + sleep(5) + + def build_presence_handler(self): + return SynchrotronPresence(self) + + def build_typing_handler(self): + return SynchrotronTyping(self) + + +def setup(config_options): + try: + config = SynchrotronConfig.load_config( + "Synapse synchrotron", config_options + ) + except ConfigError as e: + sys.stderr.write("\n" + e.message + "\n") + sys.exit(1) + + if not config: + sys.exit(0) + + config.setup_logging() + + database_engine = create_engine(config.database_config) + + ss = SynchrotronServer( + config.server_name, + db_config=config.database_config, + config=config, + version_string=get_version_string("Synapse", synapse), + database_engine=database_engine, + application_service_handler=SynchrotronApplicationService(), + ) + + ss.setup() + ss.start_listening() + + change_resource_limit(ss.config.soft_file_limit) + + def start(): + ss.get_datastore().start_profiling() + ss.replicate() + + reactor.callWhenRunning(start) + + return ss + + +if __name__ == '__main__': + with LoggingContext("main"): + ps = setup(sys.argv[1:]) + + if ps.config.daemonize: + def run(): + with LoggingContext("run"): + change_resource_limit(ps.config.soft_file_limit) + reactor.run() + + daemon = Daemonize( + app="synapse-pusher", + pid=ps.config.pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + + daemon.start() + else: + reactor.run() From a7ff5a17702812ae586228396d534a8ed3d88475 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 13:40:55 +0100 Subject: [PATCH 081/414] Presence metrics. Change def of small delta --- synapse/handlers/presence.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index fc8538b41e..eb877763ee 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -50,6 +50,9 @@ timers_fired_counter = metrics.register_counter("timers_fired") federation_presence_counter = metrics.register_counter("federation_presence") bump_active_time_counter = metrics.register_counter("bump_active_time") +full_update_presence_counter = metrics.register_counter("full_update_presence") +partial_update_presence_counter = metrics.register_counter("partial_update_presence") + # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them # "currently_active" @@ -974,13 +977,13 @@ class PresenceEventSource(object): user_ids_changed = set() changed = None - if from_key and max_token - from_key < 100: - # For small deltas, its quicker to get all changes and then - # work out if we share a room or they're in our presence list + if from_key: changed = stream_change_cache.get_all_entities_changed(from_key) - # get_all_entities_changed can return None - if changed is not None: + if changed is not None and len(changed) < 100: + # For small deltas, its quicker to get all changes and then + # work out if we share a room or they're in our presence list + partial_update_presence_counter.inc() for other_user_id in changed: if other_user_id in friends: user_ids_changed.add(other_user_id) @@ -992,6 +995,8 @@ class PresenceEventSource(object): else: # Too many possible updates. Find all users we can see and check # if any of them have changed. + full_update_presence_counter.inc() + user_ids_to_check = set() for room_id in room_ids: users = yield self.store.get_users_in_room(room_id) From 4ce84a1acd89a7f61896e92605e5463864848122 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 13:49:16 +0100 Subject: [PATCH 082/414] Change metric style --- synapse/handlers/presence.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index eb877763ee..0e19f777b8 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -50,8 +50,7 @@ timers_fired_counter = metrics.register_counter("timers_fired") federation_presence_counter = metrics.register_counter("federation_presence") bump_active_time_counter = metrics.register_counter("bump_active_time") -full_update_presence_counter = metrics.register_counter("full_update_presence") -partial_update_presence_counter = metrics.register_counter("partial_update_presence") +get_updates_counter = metrics.register_counter("get_updates", labels=["type"]) # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them @@ -980,10 +979,10 @@ class PresenceEventSource(object): if from_key: changed = stream_change_cache.get_all_entities_changed(from_key) - if changed is not None and len(changed) < 100: + if changed is not None and len(changed) < 500: # For small deltas, its quicker to get all changes and then # work out if we share a room or they're in our presence list - partial_update_presence_counter.inc() + get_updates_counter.inc("stream") for other_user_id in changed: if other_user_id in friends: user_ids_changed.add(other_user_id) @@ -995,7 +994,7 @@ class PresenceEventSource(object): else: # Too many possible updates. Find all users we can see and check # if any of them have changed. - full_update_presence_counter.inc() + get_updates_counter.inc("full") user_ids_to_check = set() for room_id in room_ids: From ab116bdb0c2d8e295b1473af84c453d212dc07ea Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 14:03:42 +0100 Subject: [PATCH 083/414] Fix typo --- synapse/handlers/typing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 3c54307bed..861b8f7989 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -140,7 +140,7 @@ class TypingHandler(object): def user_left_room(self, user, room_id): user_id = user.to_string() if self.is_mine_id(user_id): - member = RoomMember(room_id=room_id, user=user_id) + member = RoomMember(room_id=room_id, user_id=user_id) yield self._stopped_typing(member) @defer.inlineCallbacks From 80aade380545a0b661e2bbef48e175900ed4d41f Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 14:24:19 +0100 Subject: [PATCH 084/414] Send updates to the syncing users every ten seconds or immediately if they've just come online --- synapse/app/synchrotron.py | 53 +++++++++++++++++++++++++++++++------- 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index f592ad352e..7b45c87a96 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -16,7 +16,7 @@ import synapse -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, PresenceState from synapse.config._base import ConfigError from synapse.config.database import DatabaseConfig from synapse.config.logger import LoggingConfig @@ -41,7 +41,7 @@ from synapse.storage.presence import UserPresenceState from synapse.storage.roommember import RoomMemberStore from synapse.util.async import sleep from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.logcontext import LoggingContext +from synapse.util.logcontext import LoggingContext, preserve_fn from synapse.util.manhole import manhole from synapse.util.rlimit import change_resource_limit from synapse.util.stringutils import random_string @@ -135,6 +135,8 @@ class SynchrotronSlavedStore( RoomMemberStore.__dict__["who_forgot_in_room"] ) +UPDATE_SYNCING_USERS_MS = 10 * 1000 + class SynchrotronPresence(object): def __init__(self, hs): @@ -153,6 +155,13 @@ class SynchrotronPresence(object): self.process_id = random_string(16) logger.info("Presence process_id is %r", self.process_id) + self._sending_sync = False + self._need_to_send_sync = False + self.clock.looping_call( + self._send_syncing_users_regularly, + UPDATE_SYNCING_USERS_MS, + ) + def set_state(self, user, state): # TODO Hows this supposed to work? pass @@ -165,12 +174,10 @@ class SynchrotronPresence(object): if affect_presence: curr_sync = self.user_to_num_current_syncs.get(user_id, 0) self.user_to_num_current_syncs[user_id] = curr_sync + 1 - # TODO: Send this less frequently. - # TODO: Make sure this doesn't race. Currently we can lose updates - # if two users come online in quick sucession and the second http - # to the master completes before the first. - # TODO: Don't block the sync request on this HTTP hit. - yield self._send_syncing_users() + prev_states = yield self.current_state_for_users([user_id]) + if prev_states[user_id].state == PresenceState.OFFLINE: + # TODO: Don't block the sync request on this HTTP hit. + yield self._send_syncing_users_now() def _end(): if affect_presence: @@ -185,8 +192,24 @@ class SynchrotronPresence(object): defer.returnValue(_user_syncing()) - def _send_syncing_users(self): - return self.http_client.post_json_get_json(self.syncing_users_url, { + def _send_syncing_users_regularly(self): + # Only send an update if we aren't in the middle of sending one. + if not self._sending_sync: + preserve_fn(self._send_syncing_users_now)() + + @defer.inlineCallbacks + def _send_syncing_users_now(self): + if self._sending_sync: + # We don't want to race with sending another update. + # Instead we wait for that update to finish and send another + # update afterwards. + self._need_to_send_sync = True + return + + # Flag that we are sending an update. + self._sending_sync = True + + yield self.http_client.post_json_get_json(self.syncing_users_url, { "process_id": self.process_id, "syncing_users": [ user_id for user_id, count in self.user_to_num_current_syncs.items() @@ -194,6 +217,16 @@ class SynchrotronPresence(object): ], }) + # Unset the flag as we are no longer sending an update. + self._sending_sync = False + if self._need_to_send_sync: + # If something happened while we were sending the update then + # we might need to send another update. + # TODO: Check if the update that was sent matches the current state + # as we only need to send an update if they are different. + self._need_to_send_sync = False + yield self._send_syncing_users_now() + def process_replication(self, result): stream = result.get("presence", {"rows": []}) for row in stream["rows"]: From eef541a2919649e6d756d45a29d47fe76cfe02e2 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 14:42:35 +0100 Subject: [PATCH 085/414] Move insert_client_ip to a separate class --- synapse/storage/__init__.py | 48 ++----------------------- synapse/storage/client_ips.py | 68 +++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 45 deletions(-) create mode 100644 synapse/storage/client_ips.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 6928a213e8..e93c3de66c 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -17,7 +17,7 @@ from twisted.internet import defer from .appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore ) -from ._base import Cache, LoggingTransaction +from ._base import LoggingTransaction from .directory import DirectoryStore from .events import EventsStore from .presence import PresenceStore, UserPresenceState @@ -45,6 +45,7 @@ from .search import SearchStore from .tags import TagsStore from .account_data import AccountDataStore from .openid import OpenIdStore +from .client_ips import ClientIpStore from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator @@ -58,12 +59,6 @@ import logging logger = logging.getLogger(__name__) -# Number of msec of granularity to store the user IP 'last seen' time. Smaller -# times give more inserts into the database even for readonly API hits -# 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120 * 1000 - - class DataStore(RoomMemberStore, RoomStore, RegistrationStore, StreamStore, ProfileStore, PresenceStore, TransactionStore, @@ -84,6 +79,7 @@ class DataStore(RoomMemberStore, RoomStore, AccountDataStore, EventPushActionsStore, OpenIdStore, + ClientIpStore, ): def __init__(self, db_conn, hs): @@ -91,11 +87,6 @@ class DataStore(RoomMemberStore, RoomStore, self._clock = hs.get_clock() self.database_engine = hs.database_engine - self.client_ip_last_seen = Cache( - name="client_ip_last_seen", - keylen=4, - ) - self._stream_id_gen = StreamIdGenerator( db_conn, "events", "stream_ordering", extra_tables=[("local_invites", "stream_id")] @@ -216,39 +207,6 @@ class DataStore(RoomMemberStore, RoomStore, return [UserPresenceState(**row) for row in rows] - @defer.inlineCallbacks - def insert_client_ip(self, user, access_token, ip, user_agent): - now = int(self._clock.time_msec()) - key = (user.to_string(), access_token, ip) - - try: - last_seen = self.client_ip_last_seen.get(key) - except KeyError: - last_seen = None - - # Rate-limited inserts - if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: - defer.returnValue(None) - - self.client_ip_last_seen.prefill(key, now) - - # It's safe not to lock here: a) no unique constraint, - # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely - yield self._simple_upsert( - "user_ips", - keyvalues={ - "user_id": user.to_string(), - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - }, - values={ - "last_seen": now, - }, - desc="insert_client_ip", - lock=False, - ) - @defer.inlineCallbacks def count_daily_users(self): """ diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py new file mode 100644 index 0000000000..a90990e006 --- /dev/null +++ b/synapse/storage/client_ips.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import SQLBaseStore, Cache + +from twisted.internet import defer + + +# Number of msec of granularity to store the user IP 'last seen' time. Smaller +# times give more inserts into the database even for readonly API hits +# 120 seconds == 2 minutes +LAST_SEEN_GRANULARITY = 120 * 1000 + + +class ClientIpStore(SQLBaseStore): + + def __init__(self, hs): + self.client_ip_last_seen = Cache( + name="client_ip_last_seen", + keylen=4, + ) + + super(ClientIpStore, self).__init__(hs) + + @defer.inlineCallbacks + def insert_client_ip(self, user, access_token, ip, user_agent): + now = int(self._clock.time_msec()) + key = (user.to_string(), access_token, ip) + + try: + last_seen = self.client_ip_last_seen.get(key) + except KeyError: + last_seen = None + + # Rate-limited inserts + if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY: + defer.returnValue(None) + + self.client_ip_last_seen.prefill(key, now) + + # It's safe not to lock here: a) no unique constraint, + # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely + yield self._simple_upsert( + "user_ips", + keyvalues={ + "user_id": user.to_string(), + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + }, + values={ + "last_seen": now, + }, + desc="insert_client_ip", + lock=False, + ) From 0b3c80a234cd8f16c8714af7e7b719dc2e635b20 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 14:55:01 +0100 Subject: [PATCH 086/414] Use ClientIpStore to record client ips --- synapse/app/synchrotron.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 7b45c87a96..0446a1643d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -27,6 +27,7 @@ from synapse.http.site import SynapseSite from synapse.http.server import JsonResource from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.rest.client.v2_alpha import sync +from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore @@ -36,6 +37,7 @@ from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.server import HomeServer +from synapse.storage.client_ips import ClientIpStore from synapse.storage.engines import create_engine from synapse.storage.presence import UserPresenceState from synapse.storage.roommember import RoomMemberStore @@ -119,13 +121,12 @@ class SynchrotronSlavedStore( SlavedRegistrationStore, SlavedFilteringStore, SlavedPresenceStore, + BaseSlavedStore, + ClientIpStore, # After BaseSlavedStre because the constructor is different ): def get_presence_list_accepted(self, user_localpart): return () - def insert_client_ip(self, user, access_token, ip, user_agent): - pass - # XXX: This is a bit broken because we don't persist forgotten rooms # in a way that they can be streamed. This means that we don't have a # way to invalidate the forgotten rooms cache correctly. From da491e75b2d46c885f7fbb9240501c223e7c59bd Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 14:56:36 +0100 Subject: [PATCH 087/414] Appease flake8 --- synapse/app/synchrotron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 0446a1643d..af06ce70d1 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -122,7 +122,7 @@ class SynchrotronSlavedStore( SlavedFilteringStore, SlavedPresenceStore, BaseSlavedStore, - ClientIpStore, # After BaseSlavedStre because the constructor is different + ClientIpStore, # After BaseSlavedStre because the constructor is different ): def get_presence_list_accepted(self, user_localpart): return () From 48340e4f13a8090feac070ebb507e7629d03b530 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 15:02:27 +0100 Subject: [PATCH 088/414] Clear the list of ongoing syncs on shutdown --- synapse/app/synchrotron.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index af06ce70d1..f4b416f777 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -163,6 +163,8 @@ class SynchrotronPresence(object): UPDATE_SYNCING_USERS_MS, ) + reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown) + def set_state(self, user, state): # TODO Hows this supposed to work? pass @@ -193,6 +195,13 @@ class SynchrotronPresence(object): defer.returnValue(_user_syncing()) + @defer.inlineCallbacks + def _on_shutdown(self): + # When the synchrotron is shutdown tell the master to clear the in + # progress syncs for this process + self.user_to_num_current_syncs.clear() + yield self._send_syncing_users_now() + def _send_syncing_users_regularly(self): # Only send an update if we aren't in the middle of sending one. if not self._sending_sync: From 21961c93c72c5d99d44bf6a264b641c18ac0219b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 15:31:56 +0100 Subject: [PATCH 089/414] Bump changelog and version --- CHANGES.rst | 50 +++++++++++++++++++++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index b027fb970c..776681de5f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,53 @@ +Changes in synapse v0.16.0-rc1 (2016-06-03) +=========================================== + +Features: + +* Add email notifications for missed messages (PR #759, #786, #799, #810, #815, + #821) +* Add a ``url_preview_ip_range_whitelist`` config param (PR #760) +* Add /report endpoint (PR #762) +* Add basic ignore user API (PR #763) +* Add an openidish mechanism for proving that you own a given user_id (PR #765) +* Allow clients to specify a server_name to avoid 'No known servers' (PR #794) +* Add secondary_directory_servers option to fetch room list from other servers + (PR #808, #813) + +Changes: + +* Report per request metrics for all of the things using request_handler (PR + #756) +* Correctly handle ``NULL`` password hashes from the database (PR #775) +* Allow receipts for events we haven't seen in the db (PR #784) +* Make synctl read a cache factor from config file (PR #785) +* Increment badge count per missed convo, not per msg (PR #793) +* Special case m.room.third_party_invite event auth to match invites (PR #814) + + +Bug fixes: + +* Fix typo in event_auth servlet path (PR #757) +* Fix password reset (PR #758) + + +Performance improvements: + +* Reduce database inserts when sending transactions (PR #767) +* Queue events by room for persistence (PR #768) +* Add cache to ``get_user_by_id`` (PR #772) +* Add and use ``get_domain_from_id`` (PR #773) +* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779) +* Remove unused indices (PR #782) +* Add caches to ``bulk_get_push_rules*`` (PR #804) +* Cache ``get_event_reference_hashes`` (PR #806) +* Add ``get_users_with_read_receipts_in_room`` cache (PR #809) +* Use state to calculate ``get_users_in_room`` (PR #811) +* Load push rules in storage layer so that they get cached (PR #825) +* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828) +* Poke notifier on next reactor tick (PR #829) +* Change CacheMetrics to be quicker (PR #830) + + Changes in synapse v0.15.0-rc1 (2016-04-26) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 988318f5ea..3b290db79f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.15.0-rc1" +__version__ = "0.16.0-rc1" From c11614bcdc9acf87388554e11f1c8d911bd85b57 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 15:50:15 +0100 Subject: [PATCH 090/414] Note that v0.15.x was never released --- CHANGES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 776681de5f..e77b31b583 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,8 @@ Changes in synapse v0.16.0-rc1 (2016-06-03) =========================================== +Version 0.15 was not released. See v0.15.0-rc1 below for additional changes. + Features: * Add email notifications for missed messages (PR #759, #786, #799, #810, #815, From 06d40c8b9841cd877e70e205d55a08f423ff2ec9 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 3 Jun 2016 16:31:23 +0100 Subject: [PATCH 091/414] Add substitutions to email notif From --- synapse/push/mailer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 88402e42a6..933a53fc3e 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -186,7 +186,7 @@ class Mailer(object): multipart_msg = MIMEMultipart('alternative') multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text) - multipart_msg['From'] = self.hs.config.email_notif_from + multipart_msg['From'] = self.hs.config.email_notif_from % (self.app_name, ) multipart_msg['To'] = email_address multipart_msg['Date'] = email.utils.formatdate() multipart_msg['Message-ID'] = email.utils.make_msgid() From fbf608decbf85051379dc24446b1b6e89ff97e8c Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 3 Jun 2016 16:38:39 +0100 Subject: [PATCH 092/414] Oops, we're using the dict form --- synapse/push/mailer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 933a53fc3e..011bc4d2b1 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -186,7 +186,9 @@ class Mailer(object): multipart_msg = MIMEMultipart('alternative') multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text) - multipart_msg['From'] = self.hs.config.email_notif_from % (self.app_name, ) + multipart_msg['From'] = self.hs.config.email_notif_from % { + "app": self.app_name + } multipart_msg['To'] = email_address multipart_msg['Date'] = email.utils.formatdate() multipart_msg['Message-ID'] = email.utils.make_msgid() From 72c4d482e99d30fe96e2b24389629abe5b572626 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 3 Jun 2016 16:39:50 +0100 Subject: [PATCH 093/414] 3rd time lucky: we'd already calculated it above --- synapse/push/mailer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 011bc4d2b1..e5c3929cd7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -186,9 +186,7 @@ class Mailer(object): multipart_msg = MIMEMultipart('alternative') multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text) - multipart_msg['From'] = self.hs.config.email_notif_from % { - "app": self.app_name - } + multipart_msg['From'] = from_string multipart_msg['To'] = email_address multipart_msg['Date'] = email.utils.formatdate() multipart_msg['Message-ID'] = email.utils.make_msgid() From 05e01f21d7012c1853ff566c8a76aa66087bfbd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 17:12:48 +0100 Subject: [PATCH 094/414] Remove event fetching from DB threads --- synapse/replication/slave/storage/events.py | 5 - synapse/storage/appservice.py | 21 ++- synapse/storage/events.py | 138 -------------------- synapse/storage/room.py | 46 ++++--- synapse/storage/search.py | 29 ++-- synapse/storage/stream.py | 34 +++-- tests/storage/test_appservice.py | 2 +- 7 files changed, 75 insertions(+), 200 deletions(-) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index cbc1ae4190..877c68508c 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -131,15 +131,10 @@ class SlavedEventStore(BaseSlavedStore): _get_events_from_cache = DataStore._get_events_from_cache.__func__ _invalidate_get_event_cache = DataStore._invalidate_get_event_cache.__func__ - _parse_events_txn = DataStore._parse_events_txn.__func__ - _get_events_txn = DataStore._get_events_txn.__func__ - _get_event_txn = DataStore._get_event_txn.__func__ _enqueue_events = DataStore._enqueue_events.__func__ _do_fetch = DataStore._do_fetch.__func__ - _fetch_events_txn = DataStore._fetch_events_txn.__func__ _fetch_event_rows = DataStore._fetch_event_rows.__func__ _get_event_from_row = DataStore._get_event_from_row.__func__ - _get_event_from_row_txn = DataStore._get_event_from_row_txn.__func__ _get_rooms_for_user_where_membership_is_txn = ( DataStore._get_rooms_for_user_where_membership_is_txn.__func__ ) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index feb9d228ae..ffb7d4a25b 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -298,6 +298,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore): dict(txn_id=txn_id, as_id=service.id) ) + @defer.inlineCallbacks def get_oldest_unsent_txn(self, service): """Get the oldest transaction which has not been sent for this service. @@ -308,12 +309,23 @@ class ApplicationServiceTransactionStore(SQLBaseStore): A Deferred which resolves to an AppServiceTransaction or None. """ - return self.runInteraction( + entry = yield self.runInteraction( "get_oldest_unsent_appservice_txn", self._get_oldest_unsent_txn, service ) + if not entry: + defer.returnValue(None) + + event_ids = json.loads(entry["event_ids"]) + + events = yield self.get_events(event_ids) + + defer.returnValue(AppServiceTransaction( + service=service, id=entry["txn_id"], events=events + )) + def _get_oldest_unsent_txn(self, txn, service): # Monotonically increasing txn ids, so just select the smallest # one in the txns table (we delete them when they are sent) @@ -328,12 +340,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore): entry = rows[0] - event_ids = json.loads(entry["event_ids"]) - events = self._get_events_txn(txn, event_ids) - - return AppServiceTransaction( - service=service, id=entry["txn_id"], events=events - ) + return entry def _get_last_txn(self, txn, service_id): txn.execute( diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2b3f79577b..b710505a7e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -762,41 +762,6 @@ class EventsStore(SQLBaseStore): if e_id in event_map and event_map[e_id] ]) - def _get_events_txn(self, txn, event_ids, check_redacted=True, - get_prev_content=False, allow_rejected=False): - if not event_ids: - return [] - - event_map = self._get_events_from_cache( - event_ids, - check_redacted=check_redacted, - get_prev_content=get_prev_content, - allow_rejected=allow_rejected, - ) - - missing_events_ids = [e for e in event_ids if e not in event_map] - - if not missing_events_ids: - return [ - event_map[e_id] for e_id in event_ids - if e_id in event_map and event_map[e_id] - ] - - missing_events = self._fetch_events_txn( - txn, - missing_events_ids, - check_redacted=check_redacted, - get_prev_content=get_prev_content, - allow_rejected=allow_rejected, - ) - - event_map.update(missing_events) - - return [ - event_map[e_id] for e_id in event_ids - if e_id in event_map and event_map[e_id] - ] - def _invalidate_get_event_cache(self, event_id): for check_redacted in (False, True): for get_prev_content in (False, True): @@ -804,18 +769,6 @@ class EventsStore(SQLBaseStore): (event_id, check_redacted, get_prev_content) ) - def _get_event_txn(self, txn, event_id, check_redacted=True, - get_prev_content=False, allow_rejected=False): - - events = self._get_events_txn( - txn, [event_id], - check_redacted=check_redacted, - get_prev_content=get_prev_content, - allow_rejected=allow_rejected, - ) - - return events[0] if events else None - def _get_events_from_cache(self, events, check_redacted, get_prev_content, allow_rejected): event_map = {} @@ -981,34 +934,6 @@ class EventsStore(SQLBaseStore): return rows - def _fetch_events_txn(self, txn, events, check_redacted=True, - get_prev_content=False, allow_rejected=False): - if not events: - return {} - - rows = self._fetch_event_rows( - txn, events, - ) - - if not allow_rejected: - rows[:] = [r for r in rows if not r["rejects"]] - - res = [ - self._get_event_from_row_txn( - txn, - row["internal_metadata"], row["json"], row["redacts"], - check_redacted=check_redacted, - get_prev_content=get_prev_content, - rejected_reason=row["rejects"], - ) - for row in rows - ] - - return { - r.event_id: r - for r in res - } - @defer.inlineCallbacks def _get_event_from_row(self, internal_metadata, js, redacted, check_redacted=True, get_prev_content=False, @@ -1070,69 +995,6 @@ class EventsStore(SQLBaseStore): defer.returnValue(ev) - def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted, - check_redacted=True, get_prev_content=False, - rejected_reason=None): - d = json.loads(js) - internal_metadata = json.loads(internal_metadata) - - if rejected_reason: - rejected_reason = self._simple_select_one_onecol_txn( - txn, - table="rejections", - keyvalues={"event_id": rejected_reason}, - retcol="reason", - ) - - ev = FrozenEvent( - d, - internal_metadata_dict=internal_metadata, - rejected_reason=rejected_reason, - ) - - if check_redacted and redacted: - ev = prune_event(ev) - - redaction_id = self._simple_select_one_onecol_txn( - txn, - table="redactions", - keyvalues={"redacts": ev.event_id}, - retcol="event_id", - ) - - ev.unsigned["redacted_by"] = redaction_id - # Get the redaction event. - - because = self._get_event_txn( - txn, - redaction_id, - check_redacted=False - ) - - if because: - ev.unsigned["redacted_because"] = because - - if get_prev_content and "replaces_state" in ev.unsigned: - prev = self._get_event_txn( - txn, - ev.unsigned["replaces_state"], - get_prev_content=False, - ) - if prev: - ev.unsigned["prev_content"] = prev.content - ev.unsigned["prev_sender"] = prev.sender - - self._get_event_cache.prefill( - (ev.event_id, check_redacted, get_prev_content), ev - ) - - return ev - - def _parse_events_txn(self, txn, rows): - event_ids = [r["event_id"] for r in rows] - - return self._get_events_txn(txn, event_ids) - @defer.inlineCallbacks def count_daily_messages(self): """ diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 26933e593a..97f9f1929c 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -194,32 +194,44 @@ class RoomStore(SQLBaseStore): @cachedInlineCallbacks() def get_room_name_and_aliases(self, room_id): - def f(txn): + def get_room_name(txn): sql = ( - "SELECT event_id FROM current_state_events " - "WHERE room_id = ? " + "SELECT name FROM room_names" + " INNER JOIN current_state_events USING (room_id, event_id)" + " WHERE room_id = ?" + " LIMIT 1" ) - sql += " AND ((type = 'm.room.name' AND state_key = '')" - sql += " OR type = 'm.room.aliases')" - txn.execute(sql, (room_id,)) - results = self.cursor_to_dict(txn) + rows = txn.fetchall() + if rows: + return rows[0][0] + else: + return None - return self._parse_events_txn(txn, results) + return [row[0] for row in txn.fetchall()] - events = yield self.runInteraction("get_room_name_and_aliases", f) + def get_room_aliases(txn): + sql = ( + "SELECT content FROM current_state_events" + " INNER JOIN events USING (room_id, event_id)" + " WHERE room_id = ?" + ) + txn.execute(sql, (room_id,)) + return [row[0] for row in txn.fetchall()] + + name = yield self.runInteraction("get_room_name", get_room_name) + alias_contents = yield self.runInteraction("get_room_aliases", get_room_aliases) - name = None aliases = [] - for e in events: - if e.type == 'm.room.name': - if 'name' in e.content: - name = e.content['name'] - elif e.type == 'm.room.aliases': - if 'aliases' in e.content: - aliases.extend(e.content['aliases']) + for c in alias_contents: + try: + content = json.loads(c) + except: + continue + + aliases.extend(content.get('aliases', [])) defer.returnValue((name, aliases)) diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 0224299625..12941d1775 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -21,6 +21,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine import logging import re +import ujson as json logger = logging.getLogger(__name__) @@ -52,7 +53,7 @@ class SearchStore(BackgroundUpdateStore): def reindex_search_txn(txn): sql = ( - "SELECT stream_ordering, event_id FROM events" + "SELECT stream_ordering, event_id, room_id, type, content FROM events" " WHERE ? <= stream_ordering AND stream_ordering < ?" " AND (%s)" " ORDER BY stream_ordering DESC" @@ -61,28 +62,30 @@ class SearchStore(BackgroundUpdateStore): txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - rows = txn.fetchall() + rows = self.cursor_to_dict(txn) if not rows: return 0 - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - events = self._get_events_txn(txn, event_ids) + min_stream_id = rows[-1]["stream_ordering"] event_search_rows = [] - for event in events: + for row in rows: try: - event_id = event.event_id - room_id = event.room_id - content = event.content - if event.type == "m.room.message": + event_id = row["event_id"] + room_id = row["room_id"] + etype = row["type"] + try: + content = json.loads(row["content"]) + except: + continue + + if etype == "m.room.message": key = "content.body" value = content["body"] - elif event.type == "m.room.topic": + elif etype == "m.room.topic": key = "content.topic" value = content["topic"] - elif event.type == "m.room.name": + elif etype == "m.room.name": key = "content.name" value = content["name"] except (KeyError, AttributeError): diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 95b12559a6..b9ad965fd6 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -132,29 +132,25 @@ class StreamStore(SQLBaseStore): return True return False - ret = self._get_events_txn( - txn, - # apply the filter on the room id list - [ - r["event_id"] for r in rows - if app_service_interested(r) - ], - get_prev_content=True - ) + return [r for r in rows if app_service_interested(r)] - self._set_before_and_after(ret, rows) + rows = yield self.runInteraction("get_appservice_room_stream", f) - if rows: - key = "s%d" % max(r["stream_ordering"] for r in rows) - else: - # Assume we didn't get anything because there was nothing to - # get. - key = to_key + ret = yield self._get_events( + [r["event_id"] for r in rows], + get_prev_content=True + ) - return ret, key + self._set_before_and_after(ret, rows, topo_order=from_id is None) - results = yield self.runInteraction("get_appservice_room_stream", f) - defer.returnValue(results) + if rows: + key = "s%d" % max(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = to_key + + defer.returnValue((ret, key)) @defer.inlineCallbacks def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 5734198121..f44c4870e3 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -357,7 +357,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): other_events = [Mock(event_id="e5"), Mock(event_id="e6")] # we aren't testing store._base stuff here, so mock this out - self.store._get_events_txn = Mock(return_value=events) + self.store.get_events = Mock(return_value=events) yield self._insert_txn(self.as_list[1]["id"], 9, other_events) yield self._insert_txn(service.id, 10, events) From 10ea3f46ba3eda2f7c220a5e5902b687feb3042c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 17:55:32 +0100 Subject: [PATCH 095/414] Change the way we cache events --- synapse/storage/events.py | 80 ++++++++++++++++++++------------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b710505a7e..779743b8f9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -139,6 +139,9 @@ class _EventPeristenceQueue(object): pass +_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) + + class EventsStore(SQLBaseStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" @@ -741,7 +744,6 @@ class EventsStore(SQLBaseStore): event_map = self._get_events_from_cache( event_ids, check_redacted=check_redacted, - get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) @@ -751,40 +753,49 @@ class EventsStore(SQLBaseStore): missing_events = yield self._enqueue_events( missing_events_ids, check_redacted=check_redacted, - get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) event_map.update(missing_events) - defer.returnValue([ + events = [ event_map[e_id] for e_id in event_id_list if e_id in event_map and event_map[e_id] - ]) + ] + + if get_prev_content: + for event in events: + if "replaces_state" in event.unsigned: + prev = yield self.get_event( + event.unsigned["replaces_state"], + get_prev_content=False, + allow_none=True, + ) + if prev: + event.unsigned = dict(event.unsigned) + event.unsigned["prev_content"] = prev.content + event.unsigned["prev_sender"] = prev.sender + + defer.returnValue(events) def _invalidate_get_event_cache(self, event_id): - for check_redacted in (False, True): - for get_prev_content in (False, True): - self._get_event_cache.invalidate( - (event_id, check_redacted, get_prev_content) - ) + self._get_event_cache.invalidate((event_id,)) - def _get_events_from_cache(self, events, check_redacted, get_prev_content, - allow_rejected): + def _get_events_from_cache(self, events, check_redacted, allow_rejected): event_map = {} for event_id in events: - try: - ret = self._get_event_cache.get( - (event_id, check_redacted, get_prev_content,) - ) + ret = self._get_event_cache.get((event_id,), None) + if not ret: + continue - if allow_rejected or not ret.rejected_reason: - event_map[event_id] = ret + if allow_rejected or not ret.event.rejected_reason: + if check_redacted and ret.redacted_event: + event_map[event_id] = ret.redacted_event else: - event_map[event_id] = None - except KeyError: - pass + event_map[event_id] = ret.event + else: + event_map[event_id] = None return event_map @@ -855,8 +866,7 @@ class EventsStore(SQLBaseStore): reactor.callFromThread(fire, event_list) @defer.inlineCallbacks - def _enqueue_events(self, events, check_redacted=True, - get_prev_content=False, allow_rejected=False): + def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): """Fetches events from the database using the _event_fetch_list. This allows batch and bulk fetching of events - it allows us to fetch events without having to create a new transaction for each request for events. @@ -895,7 +905,6 @@ class EventsStore(SQLBaseStore): preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], check_redacted=check_redacted, - get_prev_content=get_prev_content, rejected_reason=row["rejects"], ) for row in rows @@ -936,8 +945,7 @@ class EventsStore(SQLBaseStore): @defer.inlineCallbacks def _get_event_from_row(self, internal_metadata, js, redacted, - check_redacted=True, get_prev_content=False, - rejected_reason=None): + check_redacted=True, rejected_reason=None): d = json.loads(js) internal_metadata = json.loads(internal_metadata) @@ -949,14 +957,17 @@ class EventsStore(SQLBaseStore): desc="_get_event_from_row", ) - ev = FrozenEvent( + original_ev = FrozenEvent( d, internal_metadata_dict=internal_metadata, rejected_reason=rejected_reason, ) + ev = original_ev + redacted_event = None if check_redacted and redacted: ev = prune_event(ev) + redacted_event = ev redaction_id = yield self._simple_select_one_onecol( table="redactions", @@ -979,19 +990,10 @@ class EventsStore(SQLBaseStore): # will serialise this field correctly ev.unsigned["redacted_because"] = because - if get_prev_content and "replaces_state" in ev.unsigned: - prev = yield self.get_event( - ev.unsigned["replaces_state"], - get_prev_content=False, - allow_none=True, - ) - if prev: - ev.unsigned["prev_content"] = prev.content - ev.unsigned["prev_sender"] = prev.sender - - self._get_event_cache.prefill( - (ev.event_id, check_redacted, get_prev_content), ev - ) + self._get_event_cache.prefill((ev.event_id,), _EventCacheEntry( + event=original_ev, + redacted_event=redacted_event, + )) defer.returnValue(ev) From 8f79084bd44f76223048c1bd6d836f904edcc95e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 18:03:40 +0100 Subject: [PATCH 096/414] Add get_presence_list_accepted to the broken caches in synchrotron --- synapse/app/synchrotron.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index f4b416f777..c77854fab1 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -39,7 +39,7 @@ from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.server import HomeServer from synapse.storage.client_ips import ClientIpStore from synapse.storage.engines import create_engine -from synapse.storage.presence import UserPresenceState +from synapse.storage.presence import PresenceStore, UserPresenceState from synapse.storage.roommember import RoomMemberStore from synapse.util.async import sleep from synapse.util.httpresourcetree import create_resource_tree @@ -124,9 +124,6 @@ class SynchrotronSlavedStore( BaseSlavedStore, ClientIpStore, # After BaseSlavedStre because the constructor is different ): - def get_presence_list_accepted(self, user_localpart): - return () - # XXX: This is a bit broken because we don't persist forgotten rooms # in a way that they can be streamed. This means that we don't have a # way to invalidate the forgotten rooms cache correctly. @@ -136,6 +133,13 @@ class SynchrotronSlavedStore( RoomMemberStore.__dict__["who_forgot_in_room"] ) + # XXX: This is a bit broken because we don't persist the accepted list in a + # way that can be replicated. This means that we don't have a way to + # invalidate the cache correctly. + get_presence_list_accepted = PresenceStore.__dict__[ + "get_presence_list_accepted" + ] + UPDATE_SYNCING_USERS_MS = 10 * 1000 @@ -357,6 +361,7 @@ class SynchrotronServer(HomeServer): def expire_broken_caches(): store.who_forgot_in_room.invalidate_all() + store.get_presence_list_accepted.invalidate_all() def notify_from_stream( result, stream_name, stream_key, room=None, user=None From ac9716f1546ae486cac435b8a577cc2c54b666d6 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 3 Jun 2016 18:10:00 +0100 Subject: [PATCH 097/414] Fix spelling --- synapse/app/synchrotron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index c77854fab1..aa81e1c5da 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -122,7 +122,7 @@ class SynchrotronSlavedStore( SlavedFilteringStore, SlavedPresenceStore, BaseSlavedStore, - ClientIpStore, # After BaseSlavedStre because the constructor is different + ClientIpStore, # After BaseSlavedStore because the constructor is different ): # XXX: This is a bit broken because we don't persist forgotten rooms # in a way that they can be streamed. This means that we don't have a From cffe46408f40db082df76adc263cf5014031ae54 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Jun 2016 18:25:21 +0100 Subject: [PATCH 098/414] Don't rely on options when inserting event into cache --- synapse/storage/events.py | 83 ++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 779743b8f9..5db24e86f9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -741,13 +741,12 @@ class EventsStore(SQLBaseStore): event_id_list = event_ids event_ids = set(event_ids) - event_map = self._get_events_from_cache( + event_entry_map = self._get_events_from_cache( event_ids, - check_redacted=check_redacted, allow_rejected=allow_rejected, ) - missing_events_ids = [e for e in event_ids if e not in event_map] + missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: missing_events = yield self._enqueue_events( @@ -756,32 +755,40 @@ class EventsStore(SQLBaseStore): allow_rejected=allow_rejected, ) - event_map.update(missing_events) + event_entry_map.update(missing_events) - events = [ - event_map[e_id] for e_id in event_id_list - if e_id in event_map and event_map[e_id] - ] + events = [] + for event_id in event_id_list: + entry = event_entry_map.get(event_id, None) + if not entry: + continue - if get_prev_content: - for event in events: - if "replaces_state" in event.unsigned: - prev = yield self.get_event( - event.unsigned["replaces_state"], - get_prev_content=False, - allow_none=True, - ) - if prev: - event.unsigned = dict(event.unsigned) - event.unsigned["prev_content"] = prev.content - event.unsigned["prev_sender"] = prev.sender + if allow_rejected or not entry.event.rejected_reason: + if check_redacted and entry.redacted_event: + event = entry.redacted_event + else: + event = entry.event + + events.append(event) + + if get_prev_content: + if "replaces_state" in event.unsigned: + prev = yield self.get_event( + event.unsigned["replaces_state"], + get_prev_content=False, + allow_none=True, + ) + if prev: + event.unsigned = dict(event.unsigned) + event.unsigned["prev_content"] = prev.content + event.unsigned["prev_sender"] = prev.sender defer.returnValue(events) def _invalidate_get_event_cache(self, event_id): self._get_event_cache.invalidate((event_id,)) - def _get_events_from_cache(self, events, check_redacted, allow_rejected): + def _get_events_from_cache(self, events, allow_rejected): event_map = {} for event_id in events: @@ -790,10 +797,7 @@ class EventsStore(SQLBaseStore): continue if allow_rejected or not ret.event.rejected_reason: - if check_redacted and ret.redacted_event: - event_map[event_id] = ret.redacted_event - else: - event_map[event_id] = ret.event + event_map[event_id] = ret else: event_map[event_id] = None @@ -904,7 +908,6 @@ class EventsStore(SQLBaseStore): [ preserve_fn(self._get_event_from_row)( row["internal_metadata"], row["json"], row["redacts"], - check_redacted=check_redacted, rejected_reason=row["rejects"], ) for row in rows @@ -913,7 +916,7 @@ class EventsStore(SQLBaseStore): ) defer.returnValue({ - e.event_id: e + e.event.event_id: e for e in res if e }) @@ -945,7 +948,7 @@ class EventsStore(SQLBaseStore): @defer.inlineCallbacks def _get_event_from_row(self, internal_metadata, js, redacted, - check_redacted=True, rejected_reason=None): + rejected_reason=None): d = json.loads(js) internal_metadata = json.loads(internal_metadata) @@ -954,7 +957,7 @@ class EventsStore(SQLBaseStore): table="rejections", keyvalues={"event_id": rejected_reason}, retcol="reason", - desc="_get_event_from_row", + desc="_get_event_from_row_rejected_reason", ) original_ev = FrozenEvent( @@ -963,20 +966,18 @@ class EventsStore(SQLBaseStore): rejected_reason=rejected_reason, ) - ev = original_ev redacted_event = None - if check_redacted and redacted: - ev = prune_event(ev) - redacted_event = ev + if redacted: + redacted_event = prune_event(original_ev) redaction_id = yield self._simple_select_one_onecol( table="redactions", - keyvalues={"redacts": ev.event_id}, + keyvalues={"redacts": redacted_event.event_id}, retcol="event_id", - desc="_get_event_from_row", + desc="_get_event_from_row_redactions", ) - ev.unsigned["redacted_by"] = redaction_id + redacted_event.unsigned["redacted_by"] = redaction_id # Get the redaction event. because = yield self.get_event( @@ -988,14 +989,16 @@ class EventsStore(SQLBaseStore): if because: # It's fine to do add the event directly, since get_pdu_json # will serialise this field correctly - ev.unsigned["redacted_because"] = because + redacted_event.unsigned["redacted_because"] = because - self._get_event_cache.prefill((ev.event_id,), _EventCacheEntry( + cache_entry = _EventCacheEntry( event=original_ev, redacted_event=redacted_event, - )) + ) - defer.returnValue(ev) + self._get_event_cache.prefill((original_ev.event_id,), cache_entry) + + defer.returnValue(cache_entry) @defer.inlineCallbacks def count_daily_messages(self): From 70aee0717c22acf7eabb5f158cbaf527137bc90e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Jun 2016 11:08:12 +0100 Subject: [PATCH 099/414] Add events to cache when we persist them --- synapse/storage/events.py | 41 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5db24e86f9..16398dc0a8 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -635,6 +635,8 @@ class EventsStore(SQLBaseStore): ], ) + self._add_to_cache(txn, events_and_contexts) + if backfilled: # Backfilled events come before the current state so we don't need # to update the current state table @@ -676,6 +678,45 @@ class EventsStore(SQLBaseStore): return + def _add_to_cache(self, txn, events_and_contexts): + to_prefill = [] + + rows = [] + N = 200 + for i in range(0, len(events_and_contexts), N): + ev_map = { + e[0].event_id: e[0] + for e in events_and_contexts[i:i + N] + } + if not ev_map: + break + + sql = ( + "SELECT " + " e.event_id as event_id, " + " r.redacts as redacts," + " rej.event_id as rejects " + " FROM events as e" + " LEFT JOIN rejections as rej USING (event_id)" + " LEFT JOIN redactions as r ON e.event_id = r.redacts" + " WHERE e.event_id IN (%s)" + ) % (",".join(["?"] * len(ev_map)),) + + txn.execute(sql, ev_map.keys()) + rows = self.cursor_to_dict(txn) + for row in rows: + event = ev_map[row["event_id"]] + if not row["rejects"] and not row["redacts"]: + to_prefill.append(_EventCacheEntry( + event=event, + redacted_event=None, + )) + + def prefill(): + for cache_entry in to_prefill: + self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry) + txn.call_after(prefill) + def _store_redaction(self, txn, event): # invalidate the cache for the redacted event txn.call_after(self._invalidate_get_event_cache, event.redacts) From 7aa778fba9bb81087c3a1029e0a0d4ff55b1a065 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Jun 2016 11:58:09 +0100 Subject: [PATCH 100/414] Add metric counter for number of persisted events --- synapse/storage/events.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 5db24e86f9..ff4f742f6a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -27,6 +27,9 @@ from synapse.api.constants import EventTypes from canonicaljson import encode_canonical_json from collections import deque, namedtuple +import synapse +import synapse.metrics + import logging import math @@ -35,6 +38,10 @@ import ujson as json logger = logging.getLogger(__name__) +metrics = synapse.metrics.get_metrics_for(__name__) +persist_event_counter = metrics.register_counter("persisted_events") + + def encode_json(json_object): if USE_FROZEN_DICTS: # ujson doesn't like frozen_dicts @@ -261,6 +268,7 @@ class EventsStore(SQLBaseStore): events_and_contexts=chunk, backfilled=backfilled, ) + persist_event_counter.inc_by(len(chunk)) @defer.inlineCallbacks @log_function @@ -278,6 +286,7 @@ class EventsStore(SQLBaseStore): current_state=current_state, backfilled=backfilled, ) + persist_event_counter.inc() except _RollbackButIsFineException: pass From 377eb480ca66a376e85cf8927f7f9112ed60e8bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Jun 2016 15:14:21 +0100 Subject: [PATCH 101/414] Fire after 30s not 8h --- synapse/handlers/presence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0e19f777b8..2e772da660 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -183,7 +183,7 @@ class PresenceHandler(object): # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( - 30 * 1000, + 30, self.clock.looping_call, self._handle_timeouts, 5000, From 96dc600579cd6ef9937b0e007f51aa4da0fc122d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Jun 2016 15:44:41 +0100 Subject: [PATCH 102/414] Fix typos --- synapse/handlers/presence.py | 70 +++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 2e772da660..94160a5be7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -283,44 +283,48 @@ class PresenceHandler(object): """Checks the presence of users that have timed out and updates as appropriate. """ + logger.info("Handling presence timeouts") now = self.clock.time_msec() - with Measure(self.clock, "presence_handle_timeouts"): - # Fetch the list of users that *may* have timed out. Things may have - # changed since the timeout was set, so we won't necessarily have to - # take any action. - users_to_check = set(self.wheel_timer.fetch(now)) + try: + with Measure(self.clock, "presence_handle_timeouts"): + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = set(self.wheel_timer.fetch(now)) - # Check whether the lists of syncing processes from an external - # process have expired. - expired_process_ids = [ - process_id for process_id, last_update - in self.external_process_last_update.items() - if now - last_update > EXTERNAL_PROCESS_EXPIRY - ] - for process_id in expired_process_ids: - users_to_check.update( - self.external_process_to_current_syncs.pop(process_id, ()) + # Check whether the lists of syncing processes from an external + # process have expired. + expired_process_ids = [ + process_id for process_id, last_update + in self.external_process_last_updated_ms.items() + if now - last_update > EXTERNAL_PROCESS_EXPIRY + ] + for process_id in expired_process_ids: + users_to_check.update( + self.external_process_last_updated_ms.pop(process_id, ()) + ) + self.external_process_last_update.pop(process_id) + + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in users_to_check + ] + + timers_fired_counter.inc_by(len(states)) + + changes = handle_timeouts( + states, + is_mine_fn=self.is_mine_id, + syncing_user_ids=self.get_currently_syncing_users(), + now=now, ) - self.external_process_last_update.pop(process_id) - states = [ - self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) - ) - for user_id in users_to_check - ] - - timers_fired_counter.inc_by(len(states)) - - changes = handle_timeouts( - states, - is_mine_fn=self.is_mine_id, - syncing_users=self.get_syncing_users(), - now=now, - ) - - preserve_fn(self._update_states)(changes) + preserve_fn(self._update_states)(changes) + except: + logger.exception("Exception in _handle_timeouts loop") @defer.inlineCallbacks def bump_presence_active_time(self, user): From 216a05b3e39e08b0600a39fc111b4d669d06ff7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 6 Jun 2016 16:00:09 +0100 Subject: [PATCH 103/414] .values() returns list of sets --- synapse/handlers/presence.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 94160a5be7..6b70fa3817 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -406,7 +406,8 @@ class PresenceHandler(object): user_id for user_id, count in self.user_to_num_current_syncs.items() if count } - syncing_user_ids.update(self.external_process_to_current_syncs.values()) + for user_ids in self.external_process_to_current_syncs.values(): + syncing_user_ids.update(user_ids) return syncing_user_ids @defer.inlineCallbacks From 5ef84da4f11f1b1cceb0c44d9867bb597ee68e64 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 6 Jun 2016 16:05:28 +0100 Subject: [PATCH 104/414] Yield on the sleeps intended to backoff replication --- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index f1de1e7ce9..3c3fa38053 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -311,7 +311,7 @@ class PusherServer(HomeServer): poke_pushers(result) except: logger.exception("Error replicating from %r", replication_url) - sleep(30) + yield sleep(30) def setup(config_options): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index aa81e1c5da..7273055cc1 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -443,7 +443,7 @@ class SynchrotronServer(HomeServer): notify(result) except: logger.exception("Error replicating from %r", replication_url) - sleep(5) + yield sleep(5) def build_presence_handler(self): return SynchrotronPresence(self) From 4a5bbb1941ae63f1d6632aa35e80274e56c8dbb9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 6 Jun 2016 16:37:12 +0100 Subject: [PATCH 105/414] Fix a KeyError in the synchrotron presence --- synapse/app/synchrotron.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index aa81e1c5da..3d0d5cc15a 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -187,7 +187,10 @@ class SynchrotronPresence(object): yield self._send_syncing_users_now() def _end(): - if affect_presence: + # We check that the user_id is in user_to_num_current_syncs because + # user_to_num_current_syncs may have been cleared if we are + # shutting down. + if affect_presence and user_id in self.user_to_num_current_syncs: self.user_to_num_current_syncs[user_id] -= 1 @contextlib.contextmanager From 310197bab5cf8ed2c26fae522f15f092dbcdff58 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 09:34:50 +0100 Subject: [PATCH 106/414] Fix AS retries --- synapse/storage/appservice.py | 4 ++-- tests/storage/test_appservice.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index ffb7d4a25b..a281571637 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -320,10 +320,10 @@ class ApplicationServiceTransactionStore(SQLBaseStore): event_ids = json.loads(entry["event_ids"]) - events = yield self.get_events(event_ids) + event_map = yield self.get_events(event_ids) defer.returnValue(AppServiceTransaction( - service=service, id=entry["txn_id"], events=events + service=service, id=entry["txn_id"], events=event_map.values() )) def _get_oldest_unsent_txn(self, txn, service): diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index f44c4870e3..6db4b966db 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -353,21 +353,21 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_oldest_unsent_txn(self): service = Mock(id=self.as_list[0]["id"]) - events = [Mock(event_id="e1"), Mock(event_id="e2")] + events = {"e1": Mock(event_id="e1"), "e2": Mock(event_id="e2")} other_events = [Mock(event_id="e5"), Mock(event_id="e6")] # we aren't testing store._base stuff here, so mock this out self.store.get_events = Mock(return_value=events) yield self._insert_txn(self.as_list[1]["id"], 9, other_events) - yield self._insert_txn(service.id, 10, events) + yield self._insert_txn(service.id, 10, events.values()) yield self._insert_txn(service.id, 11, other_events) yield self._insert_txn(service.id, 12, other_events) txn = yield self.store.get_oldest_unsent_txn(service) self.assertEquals(service, txn.service) self.assertEquals(10, txn.id) - self.assertEquals(events, txn.events) + self.assertEquals(events.values(), txn.events) @defer.inlineCallbacks def test_get_appservices_by_state_single(self): From 84379062f9ec259abc302af321d4ed8f5a958c01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 10:24:50 +0100 Subject: [PATCH 107/414] Fix AS retries, but with correct ordering --- synapse/storage/appservice.py | 4 ++-- tests/storage/test_appservice.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index a281571637..d1ee533fac 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -320,10 +320,10 @@ class ApplicationServiceTransactionStore(SQLBaseStore): event_ids = json.loads(entry["event_ids"]) - event_map = yield self.get_events(event_ids) + events = yield self._get_events(event_ids) defer.returnValue(AppServiceTransaction( - service=service, id=entry["txn_id"], events=event_map.values() + service=service, id=entry["txn_id"], events=events )) def _get_oldest_unsent_txn(self, txn, service): diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 6db4b966db..3e2862daae 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -353,21 +353,21 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_oldest_unsent_txn(self): service = Mock(id=self.as_list[0]["id"]) - events = {"e1": Mock(event_id="e1"), "e2": Mock(event_id="e2")} + events = [Mock(event_id="e1"), Mock(event_id="e2")] other_events = [Mock(event_id="e5"), Mock(event_id="e6")] # we aren't testing store._base stuff here, so mock this out - self.store.get_events = Mock(return_value=events) + self.store._get_events = Mock(return_value=events) yield self._insert_txn(self.as_list[1]["id"], 9, other_events) - yield self._insert_txn(service.id, 10, events.values()) + yield self._insert_txn(service.id, 10, events) yield self._insert_txn(service.id, 11, other_events) yield self._insert_txn(service.id, 12, other_events) txn = yield self.store.get_oldest_unsent_txn(service) self.assertEquals(service, txn.service) self.assertEquals(10, txn.id) - self.assertEquals(events.values(), txn.events) + self.assertEquals(events, txn.events) @defer.inlineCallbacks def test_get_appservices_by_state_single(self): From 88625db05f274ad855fb51b33c84c09c947a6bd0 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 7 Jun 2016 11:33:36 +0100 Subject: [PATCH 108/414] Notify users for events in rooms they join. Change how the notifier updates the map from room_id to user streams on receiving a join event. Make it update the map when it notifies for the join event, rather than using the "user_joined_room" distributor signal --- synapse/notifier.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/notifier.py b/synapse/notifier.py index cbec4d30ae..30883a0696 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -14,7 +14,7 @@ # limitations under the License. from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.util.logutils import log_function @@ -152,10 +152,6 @@ class Notifier(object): self.appservice_handler = hs.get_application_service_handler() self.state_handler = hs.get_state_handler() - hs.get_distributor().observe( - "user_joined_room", self._user_joined_room - ) - self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) @@ -248,6 +244,9 @@ class Notifier(object): ) app_streams |= app_user_streams + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + self._user_joined_room(event.state_key, event.room_id) + self.on_new_event( "room_key", room_stream_id, users=extra_users, @@ -483,9 +482,8 @@ class Notifier(object): user_stream.appservice, set() ).add(user_stream) - def _user_joined_room(self, user, room_id): - user = str(user) - new_user_stream = self.user_to_user_stream.get(user) + def _user_joined_room(self, user_id, room_id): + new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) From 75331c5fca6d2207094b8cbf0b3bb34cc52a4ec4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 13:33:13 +0100 Subject: [PATCH 109/414] Change the way we do stats --- synapse/metrics/__init__.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index f317034b8f..ef14bcd840 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -153,11 +153,7 @@ reactor_metrics = get_metrics_for("reactor") tick_time = reactor_metrics.register_distribution("tick_time") pending_calls_metric = reactor_metrics.register_distribution("pending_calls") -gc_time = ( - reactor_metrics.register_distribution("gc_time_gen0"), - reactor_metrics.register_distribution("gc_time_gen2"), - reactor_metrics.register_distribution("gc_time_gen2"), -) +gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"]) def runUntilCurrentTimer(func): @@ -190,7 +186,7 @@ def runUntilCurrentTimer(func): # one if necessary. threshold = gc.get_threshold() counts = gc.get_count() - for i in [2, 1, 0]: + for i in (0, 1, 2): if threshold[i] < counts[i]: logger.info("Collecting gc %d", i) @@ -198,7 +194,7 @@ def runUntilCurrentTimer(func): gc.collect(i) end = time.time() * 1000 - gc_time[i].inc_by(end - start) + gc_time.inc_by(end - start, i) return ret From 48e65099b52383743a47844b6369e173b9a96f90 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 13:40:22 +0100 Subject: [PATCH 110/414] Also record number of unreachable objects --- synapse/metrics/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ef14bcd840..b29cec3de1 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -154,6 +154,7 @@ tick_time = reactor_metrics.register_distribution("tick_time") pending_calls_metric = reactor_metrics.register_distribution("pending_calls") gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"]) +gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"]) def runUntilCurrentTimer(func): @@ -186,15 +187,16 @@ def runUntilCurrentTimer(func): # one if necessary. threshold = gc.get_threshold() counts = gc.get_count() - for i in (0, 1, 2): + for i in (2, 1, 0): if threshold[i] < counts[i]: logger.info("Collecting gc %d", i) start = time.time() * 1000 - gc.collect(i) + unreachable = gc.collect(i) end = time.time() * 1000 gc_time.inc_by(end - start, i) + gc_unreachable.inc_by(unreachable, i) return ret From 0b2158719c43eab87ab7a9448ae1d85008b92b92 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 7 Jun 2016 15:07:11 +0100 Subject: [PATCH 111/414] Remove dead code. Loading push rules now happens in the datastore, so we can remove the methods that loaded them outside the datastore. The ``waiting_for_join_list`` in federation handler is populated by anything, so can be removed. The ``_get_members_events_txn`` method isn't called from anywhere so can be removed. --- synapse/handlers/federation.py | 13 ------------ synapse/push/bulk_push_rule_evaluator.py | 8 -------- synapse/push/clientformat.py | 26 ------------------------ synapse/storage/roommember.py | 7 ------- 4 files changed, 54 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 648a505e65..ff83c608e7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -66,10 +66,6 @@ class FederationHandler(BaseHandler): self.hs = hs - self.distributor.observe("user_joined_room", self.user_joined_room) - - self.waiting_for_join_list = {} - self.store = hs.get_datastore() self.replication_layer = hs.get_replication_layer() self.state_handler = hs.get_state_handler() @@ -1091,15 +1087,6 @@ class FederationHandler(BaseHandler): def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) - @log_function - def user_joined_room(self, user, room_id): - waiters = self.waiting_for_join_list.get( - (user.to_string(), room_id), - [] - ) - while waiters: - waiters.pop().callback(None) - @defer.inlineCallbacks @log_function def _handle_new_event(self, origin, event, state=None, auth_events=None, diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 6e42121b1d..756e5da513 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -14,7 +14,6 @@ # limitations under the License. import logging -import ujson as json from twisted.internet import defer @@ -27,13 +26,6 @@ from synapse.visibility import filter_events_for_clients logger = logging.getLogger(__name__) -def decode_rule_json(rule): - rule = dict(rule) - rule['conditions'] = json.loads(rule['conditions']) - rule['actions'] = json.loads(rule['actions']) - return rule - - @defer.inlineCallbacks def _get_rules(room_id, user_ids, store): rules_by_user = yield store.bulk_get_push_rules(user_ids) diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index b3983f7940..e0331b2d2d 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -13,37 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.push.baserules import list_with_base_rules - from synapse.push.rulekinds import ( PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP ) import copy -import simplejson as json - - -def load_rules_for_user(user, rawrules, enabled_map): - ruleslist = [] - for rawrule in rawrules: - rule = dict(rawrule) - rule["conditions"] = json.loads(rawrule["conditions"]) - rule["actions"] = json.loads(rawrule["actions"]) - ruleslist.append(rule) - - # We're going to be mutating this a lot, so do a deep copy - rules = list(list_with_base_rules(ruleslist)) - - for i, rule in enumerate(rules): - rule_id = rule['rule_id'] - if rule_id in enabled_map: - if rule.get('enabled', True) != bool(enabled_map[rule_id]): - # Rules are cached across users. - rule = dict(rule) - rule['enabled'] = bool(enabled_map[rule_id]) - rules[i] = rule - - return rules def format_push_rules_for_user(user, ruleslist): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 64b4bd371b..8bd693be72 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -243,13 +243,6 @@ class RoomMemberStore(SQLBaseStore): user_ids = yield self.get_users_in_room(room_id) defer.returnValue(set(get_domain_from_id(uid) for uid in user_ids)) - def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None): - rows = self._get_members_rows_txn( - txn, - room_id, membership, user_id, - ) - return [r["event_id"] for r in rows] - def _get_members_rows_txn(self, txn, room_id, membership=None, user_id=None): where_clause = "c.room_id = ?" where_values = [room_id] From dded389ac16ec023c986df400d25ca94a4a28677 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 15:45:56 +0100 Subject: [PATCH 112/414] Allow setting of gc.set_thresholds --- synapse/app/homeserver.py | 5 +++++ synapse/app/pusher.py | 5 +++++ synapse/app/synchrotron.py | 15 ++++++++++----- synapse/config/server.py | 19 ++++++++++++++++++- 4 files changed, 38 insertions(+), 6 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df675c0ed4..22e1721fc4 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -16,6 +16,7 @@ import synapse +import gc import logging import os import sys @@ -351,6 +352,8 @@ class SynapseService(service.Service): def startService(self): hs = setup(self.config) change_resource_limit(hs.config.soft_file_limit) + if hs.config.gc_thresholds: + gc.set_threshold(*hs.config.gc_thresholds) def stopService(self): return self._port.stopListening() @@ -422,6 +425,8 @@ def run(hs): # sys.settrace(logcontext_tracer) with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) + if hs.config.gc_thresholds: + gc.set_threshold(*hs.config.gc_thresholds) reactor.run() if hs.config.daemonize: diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 3c3fa38053..7e2bf7ecc2 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -43,6 +43,7 @@ from twisted.web.resource import Resource from daemonize import Daemonize +import gc import sys import logging @@ -342,6 +343,8 @@ def setup(config_options): ps.start_listening() change_resource_limit(ps.config.soft_file_limit) + if ps.config.gc_thresholds: + gc.set_threshold(*ps.config.gc_thresholds) def start(): ps.replicate() @@ -361,6 +364,8 @@ if __name__ == '__main__': def run(): with LoggingContext("run"): change_resource_limit(ps.config.soft_file_limit) + if ps.config.gc_thresholds: + gc.set_threshold(*ps.config.gc_thresholds) reactor.run() daemon = Daemonize( diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 5c552ffb29..f9673ab8d8 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -57,6 +57,7 @@ from daemonize import Daemonize import sys import logging import contextlib +import gc import ujson as json logger = logging.getLogger("synapse.app.synchrotron") @@ -484,6 +485,8 @@ def setup(config_options): ss.start_listening() change_resource_limit(ss.config.soft_file_limit) + if ss.config.gc_thresholds: + ss.set_threshold(*ss.config.gc_thresholds) def start(): ss.get_datastore().start_profiling() @@ -496,17 +499,19 @@ def setup(config_options): if __name__ == '__main__': with LoggingContext("main"): - ps = setup(sys.argv[1:]) + ss = setup(sys.argv[1:]) - if ps.config.daemonize: + if ss.config.daemonize: def run(): with LoggingContext("run"): - change_resource_limit(ps.config.soft_file_limit) + change_resource_limit(ss.config.soft_file_limit) + if ss.config.gc_thresholds: + gc.set_threshold(*ss.config.gc_thresholds) reactor.run() daemon = Daemonize( - app="synapse-pusher", - pid=ps.config.pid_file, + app="synapse-synchrotron", + pid=ss.config.pid_file, action=run, auto_close_fds=False, verbose=True, diff --git a/synapse/config/server.py b/synapse/config/server.py index c2d8f8a52f..44b8d422e0 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config +from ._base import Config, ConfigError class ServerConfig(Config): @@ -38,6 +38,20 @@ class ServerConfig(Config): self.listeners = config.get("listeners", []) + thresholds = config.get("gc_thresholds", None) + if thresholds is not None: + try: + assert len(thresholds) == 3 + self.gc_thresholds = ( + int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), + ) + except: + raise ConfigError( + "Value of `gc_threshold` must be a list of three integers if set" + ) + else: + self.gc_thresholds = None + bind_port = config.get("bind_port") if bind_port: self.listeners = [] @@ -157,6 +171,9 @@ class ServerConfig(Config): # hard limit. soft_file_limit: 0 + # The GC threshold parameters to pass to `gc.set_threshold`, if defined + # gc_thresholds: [700, 10, 10] + # A list of other Home Servers to fetch the public room directory from # and include in the public room directory of this home server # This is a temporary stopgap solution to populate new server with a From 2d1d1025fac846e2746dc627c0ebb6542c1488d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 16:26:25 +0100 Subject: [PATCH 113/414] Add gc_threshold to pusher and synchrotron --- synapse/app/pusher.py | 14 ++++++++++++++ synapse/app/synchrotron.py | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 7e2bf7ecc2..4ec23d84c1 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -65,6 +65,20 @@ class SlaveConfig(DatabaseConfig): self.pid_file = self.abspath(config.get("pid_file")) self.public_baseurl = config["public_baseurl"] + thresholds = config.get("gc_thresholds", None) + if thresholds is not None: + try: + assert len(thresholds) == 3 + self.gc_thresholds = ( + int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), + ) + except: + raise ConfigError( + "Value of `gc_threshold` must be a list of three integers if set" + ) + else: + self.gc_thresholds = None + # some things used by the auth handler but not actually used in the # pusher codebase self.bcrypt_rounds = None diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index f9673ab8d8..297e199453 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -78,6 +78,20 @@ class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig): self.macaroon_secret_key = config["macaroon_secret_key"] self.expire_access_token = config.get("expire_access_token", False) + thresholds = config.get("gc_thresholds", None) + if thresholds is not None: + try: + assert len(thresholds) == 3 + self.gc_thresholds = ( + int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), + ) + except: + raise ConfigError( + "Value of `gc_threshold` must be a list of three integers if set" + ) + else: + self.gc_thresholds = None + def default_config(self, server_name, **kwargs): pid_file = self.abspath("synchroton.pid") return """\ From 64935d11f7730702cafba8591512ddb57e8fadf1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 7 Jun 2016 16:35:28 +0100 Subject: [PATCH 114/414] Add script for running sytest with dendron --- jenkins-dendron-postgres.sh | 84 +++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100755 jenkins-dendron-postgres.sh diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh new file mode 100755 index 0000000000..8e3a4c51a9 --- /dev/null +++ b/jenkins-dendron-postgres.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +set -eux + +: ${WORKSPACE:="$(pwd)"} + +export PYTHONDONTWRITEBYTECODE=yep +export SYNAPSE_CACHE_FACTOR=1 + +# Output test results as junit xml +export TRIAL_FLAGS="--reporter=subunit" +export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" +# Write coverage reports to a separate file for each process +export COVERAGE_OPTS="-p" +export DUMP_COVERAGE_COMMAND="coverage help" + +# Output flake8 violations to violations.flake8.log +# Don't exit with non-0 status code on Jenkins, +# so that the build steps continue and a later step can decided whether to +# UNSTABLE or FAILURE this build. +export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" + +rm .coverage* || echo "No coverage files to remove" + +tox --notest -e py27 + +TOX_BIN=$WORKSPACE/.tox/py27/bin +python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install +$TOX_BIN/pip install psycopg2 +$TOX_BIN/pip install lxml + +: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} + +if [[ ! -e .dendron-base ]]; then + git clone https://github.com/matrix-org/dendron.git .dendron-base --mirror +else + (cd .dendron-base; git fetch -p) +fi + +rm -rf dendron +git clone .dendron-base dendron --shared +cd dendron + +: ${GOPATH:=${WORKSPACE}/.gopath} +if [[ "${GOPATH}" != *:* ]]; then + mkdir -p "${GOPATH}" + export PATH="${GOPATH}/bin:${PATH}" +fi +export GOPATH + +git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) + +go get github.com/constabulary/gb/... +gb generate +gb build + +cd .. + + +if [[ ! -e .sytest-base ]]; then + git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror +else + (cd .sytest-base; git fetch -p) +fi + +rm -rf sytest +git clone .sytest-base sytest --shared +cd sytest + +git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) + +: ${PORT_BASE:=8000} + +./jenkins/prep_sytest_for_postgres.sh + +echo >&2 "Running sytest with PostgreSQL"; +./jenkins/install_and_run.sh --python $TOX_BIN/python \ + --synapse-directory $WORKSPACE \ + --dendron $WORKSPACE/dendron/bin/dendron \ + --synchrotron \ + --pusher \ + --port-base $PORT_BASE + +cd .. From 18f0cc7d993408a754e7ff26e9474a969adf762a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 16:51:01 +0100 Subject: [PATCH 115/414] Record some more GC metrics --- synapse/metrics/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index b29cec3de1..8f69aa1ff3 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -156,6 +156,11 @@ pending_calls_metric = reactor_metrics.register_distribution("pending_calls") gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"]) gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"]) +reactor_metrics.register_callback("gc_total_objects", lambda: len(gc.get_objects())) +reactor_metrics.register_callback( + "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"] +) + def runUntilCurrentTimer(func): From 0f2165ccf4fd0ae6636018cea7e1b91141179e88 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2016 17:00:45 +0100 Subject: [PATCH 116/414] Don't track total objects as its too expensive to calculate --- synapse/metrics/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 8f69aa1ff3..bdd7292a30 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -156,7 +156,6 @@ pending_calls_metric = reactor_metrics.register_distribution("pending_calls") gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"]) gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"]) -reactor_metrics.register_callback("gc_total_objects", lambda: len(gc.get_objects())) reactor_metrics.register_callback( "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"] ) From bab916bccc57734fb96f7f9be66b1b15b2ed4dbf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:05:45 +0100 Subject: [PATCH 117/414] Bump version and changelog to v0.16.0-rc2 --- CHANGES.rst | 27 +++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index e77b31b583..40f7ebd734 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,30 @@ +Changes in synapse v0.16.0-rc2 (2016-06-08) +=========================================== + +Features: + +* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849) + +Changes: + +* Record metrics about GC (PR #771, #847, #852) +* Add metric counter for number of persisted events (PR #841) + +Bug fixes: + +* Fix 'From' header in email notifications (PR #843) +* Fix presence where timeouts were not being fired for the first 8h after + restarts (PR #842) +* Fix bug where synapse sent malformed transactions to AS's when retrying + transactions (Commits 310197b, 843790) + +Performance Improvements: + +* Remove event fetching from DB threads (PR #835) +* Change the way we cache events (PR #836) +* Add events to cache when we persist them (PR #840) + + Changes in synapse v0.16.0-rc1 (2016-06-03) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 3b290db79f..ad088a7880 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.0-rc1" +__version__ = "0.16.0-rc2" From 66503a69c9070230c99737976bff73f68079e4d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:13:56 +0100 Subject: [PATCH 118/414] Update commit hash in changelog --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 40f7ebd734..6194b3eb69 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,7 +16,7 @@ Bug fixes: * Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) * Fix bug where synapse sent malformed transactions to AS's when retrying - transactions (Commits 310197b, 843790) + transactions (Commits 310197b, 8437906) Performance Improvements: From 1a815fb04f1d17286be27379dd7463936606bd3a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:33:30 +0100 Subject: [PATCH 119/414] Don't hit DB for noop replications queries --- synapse/handlers/typing.py | 3 +++ synapse/storage/account_data.py | 3 +++ synapse/storage/presence.py | 3 +++ synapse/storage/push_rule.py | 3 +++ synapse/storage/tags.py | 3 +++ 5 files changed, 15 insertions(+) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 861b8f7989..5589296c09 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -221,6 +221,9 @@ class TypingHandler(object): def get_all_typing_updates(self, last_id, current_id): # TODO: Work out a way to do this without scanning the entire state. + if last_id == current_id: + return [] + rows = [] for room_id, serial in self._room_serials.items(): if last_id < serial and serial <= current_id: diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index ec7e8d40d2..3fa226e92d 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -138,6 +138,9 @@ class AccountDataStore(SQLBaseStore): A deferred pair of lists of tuples of stream_id int, user_id string, room_id string, type string, and content string. """ + if last_room_id == current_id and last_global_id == current_id: + return defer.succeed(([], [])) + def get_updated_account_data_txn(txn): sql = ( "SELECT stream_id, user_id, account_data_type, content" diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 3fab57a7e8..d03f7c541e 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -118,6 +118,9 @@ class PresenceStore(SQLBaseStore): ) def get_all_presence_updates(self, last_id, current_id): + if last_id == current_id: + return defer.succeed([]) + def get_all_presence_updates_txn(txn): sql = ( "SELECT stream_id, user_id, state, last_active_ts," diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 786d6f6d67..8183b7f1b0 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -421,6 +421,9 @@ class PushRuleStore(SQLBaseStore): def get_all_push_rule_updates(self, last_id, current_id, limit): """Get all the push rules changes that have happend on the server""" + if last_id == current_id: + return defer.succeed([]) + def get_all_push_rule_updates_txn(txn): sql = ( "SELECT stream_id, event_stream_ordering, user_id, rule_id," diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 9da23f34cb..5a2c1aa59b 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -68,6 +68,9 @@ class TagsStore(SQLBaseStore): A deferred list of tuples of stream_id int, user_id string, room_id string, tag string and content string. """ + if last_id == current_id: + defer.returnValue([]) + def get_all_updated_tags_txn(txn): sql = ( "SELECT stream_id, user_id, room_id" From 17aab5827a1a1eace4e44d130eef7da4dda6984f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 11:55:31 +0100 Subject: [PATCH 120/414] Add some logging for when servers ask for missing events --- synapse/federation/federation_server.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index f1d231b9d8..9f2a64dede 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -377,10 +377,20 @@ class FederationServer(FederationBase): @log_function def on_get_missing_events(self, origin, room_id, earliest_events, latest_events, limit, min_depth): + logger.info( + "on_get_missing_events: earliest_events: %r, latest_events: %r," + " limit: %d, min_depth: %d", + earliest_events, latest_events, limit, min_depth + ) missing_events = yield self.handler.on_get_missing_events( origin, room_id, earliest_events, latest_events, limit, min_depth ) + if len(missing_events) < 5: + logger.info("Returning %d events: %r", len(missing_events), missing_events) + else: + logger.info("Returning %d events", len(missing_events)) + time_now = self._clock.time_msec() defer.returnValue({ @@ -490,6 +500,11 @@ class FederationServer(FederationBase): latest = set(latest) latest |= seen + logger.info( + "Missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) + missing_events = yield self.get_missing_events( origin, pdu.room_id, @@ -517,6 +532,10 @@ class FederationServer(FederationBase): prevs = {e_id for e_id, _ in pdu.prev_events} seen = set(have_seen.keys()) if prevs - seen: + logger.info( + "Still missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) fetch_state = True if fetch_state: From 1fd6eb695d1fffbe830faf50c13607116300095b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:15:18 +0100 Subject: [PATCH 121/414] Enable auth on federation PublicRoomList --- synapse/federation/transport/server.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a1a334955f..ab9f38f010 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -532,11 +532,6 @@ class PublicRoomList(BaseFederationServlet): data = yield self.room_list_handler.get_local_public_room_list() defer.returnValue((200, data)) - # Avoid doing remote HS authorization checks which are done by default by - # BaseFederationServlet. - def _wrap(self, code): - return code - SERVLET_CLASSES = ( FederationSendServlet, From efeabd31801224cbacd31b61ff0d869b70b1820d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:23:15 +0100 Subject: [PATCH 122/414] Log user that is making /publicRooms calls --- synapse/rest/client/v1/room.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index db52a1fc39..604c2a565e 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -279,6 +279,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): + try: + yield self.auth.get_user_by_req(request) + except AuthError: + # This endpoint isn't authed, but its useful to know who's hitting + # it if they *do* supply an access token + pass + handler = self.hs.get_room_list_handler() data = yield handler.get_aggregated_public_room_list() From d88faf92d16d9384433452e4fb7901fd2bd6eda4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:39:31 +0100 Subject: [PATCH 123/414] Fix up federation PublicRoomList --- synapse/federation/transport/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ab9f38f010..6fc3e2207c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -528,7 +528,7 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" @defer.inlineCallbacks - def on_GET(self, request): + def on_GET(self, origin, content, query): data = yield self.room_list_handler.get_local_public_room_list() defer.returnValue((200, data)) From 690029d1a3ebd26f56656a723fefdeafd71310e4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 14:47:42 +0100 Subject: [PATCH 124/414] Don't make rooms visibile by default --- synapse/rest/client/v1/room.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 604c2a565e..86fbe2747d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -72,8 +72,6 @@ class RoomCreateRestServlet(ClientV1RestServlet): def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) - # default visibility - user_supplied_config.setdefault("visibility", "public") return user_supplied_config def on_OPTIONS(self, request): From defa28efa186013fab18b3da76f60273cb6c3bb1 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 8 Jun 2016 15:11:31 +0100 Subject: [PATCH 125/414] Disable the synchrotron on jenkins until the sytest support lands (#855) * Disable the synchrotron on jenkins until the sytest support lands * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins * Poke jenkins --- jenkins-dendron-postgres.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 8e3a4c51a9..d15836e6bf 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -73,11 +73,12 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b ./jenkins/prep_sytest_for_postgres.sh +mkdir -p var + echo >&2 "Running sytest with PostgreSQL"; ./jenkins/install_and_run.sh --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ - --synchrotron \ --pusher \ --port-base $PORT_BASE From 81c07a32fd27260b5112dcc87845a9e87fa5db58 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Jun 2016 15:43:37 +0100 Subject: [PATCH 126/414] Pull full state for each room all at once --- synapse/handlers/room.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 9fd34588dd..ae44c7a556 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,7 +20,7 @@ from ._base import BaseHandler from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken from synapse.api.constants import ( - EventTypes, JoinRules, RoomCreationPreset, + EventTypes, JoinRules, RoomCreationPreset, Membership, ) from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.util import stringutils @@ -367,14 +367,10 @@ class RoomListHandler(BaseHandler): @defer.inlineCallbacks def handle_room(room_id): - # We pull each bit of state out indvidually to avoid pulling the - # full state into memory. Due to how the caching works this should - # be fairly quick, even if not originally in the cache. - def get_state(etype, state_key): - return self.state_handler.get_current_state(room_id, etype, state_key) + current_state = yield self.state_handler.get_current_state(room_id) # Double check that this is actually a public room. - join_rules_event = yield get_state(EventTypes.JoinRules, "") + join_rules_event = current_state.get((EventTypes.JoinRules, "")) if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if join_rule and join_rule != JoinRules.PUBLIC: @@ -382,47 +378,51 @@ class RoomListHandler(BaseHandler): result = {"room_id": room_id} - joined_users = yield self.store.get_users_in_room(room_id) - if len(joined_users) == 0: + num_joined_users = len([ + 1 for _, event in current_state.items() + if event.type == EventTypes.Member + and event.membership == Membership.JOIN + ]) + if num_joined_users == 0: return - result["num_joined_members"] = len(joined_users) + result["num_joined_members"] = num_joined_users aliases = yield self.store.get_aliases_for_room(room_id) if aliases: result["aliases"] = aliases - name_event = yield get_state(EventTypes.Name, "") + name_event = yield current_state.get((EventTypes.Name, "")) if name_event: name = name_event.content.get("name", None) if name: result["name"] = name - topic_event = yield get_state(EventTypes.Topic, "") + topic_event = current_state.get((EventTypes.Topic, "")) if topic_event: topic = topic_event.content.get("topic", None) if topic: result["topic"] = topic - canonical_event = yield get_state(EventTypes.CanonicalAlias, "") + canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) if canonical_event: canonical_alias = canonical_event.content.get("alias", None) if canonical_alias: result["canonical_alias"] = canonical_alias - visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "") + visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) visibility = None if visibility_event: visibility = visibility_event.content.get("history_visibility", None) result["world_readable"] = visibility == "world_readable" - guest_event = yield get_state(EventTypes.GuestAccess, "") + guest_event = current_state.get((EventTypes.GuestAccess, "")) guest = None if guest_event: guest = guest_event.content.get("guest_access", None) result["guest_can_join"] = guest == "can_join" - avatar_event = yield get_state("m.room.avatar", "") + avatar_event = current_state.get(("m.room.avatar", "")) if avatar_event: avatar_url = avatar_event.content.get("url", None) if avatar_url: From 6e7dc7c7dde377794c23d5db6f25ffacfb08e82a Mon Sep 17 00:00:00 2001 From: Negar Fazeli Date: Wed, 8 Jun 2016 19:16:46 +0200 Subject: [PATCH 127/414] Fix a bug caused by a change in auth_handler function Fix the relevant unit test cases --- synapse/handlers/register.py | 4 ++-- tests/handlers/test_register.py | 9 +++------ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bbc07b045e..e0aaefe7be 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -388,8 +388,8 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - auth_handler = self.hs.get_handlers().auth_handler - token = auth_handler.generate_short_term_login_token(user_id, duration_seconds) + token = self.auth_handler().generate_short_term_login_token( + user_id, duration_seconds) if need_register: yield self.store.register( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 9d5c653b45..69a5e5b1d4 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -41,14 +41,15 @@ class RegistrationTestCase(unittest.TestCase): handlers=None, http_client=None, expire_access_token=True) + self.auth_handler = Mock( + generate_short_term_login_token=Mock(return_value='secret')) self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.hs.get_handlers().profile_handler = Mock() self.mock_handler = Mock(spec=[ "generate_short_term_login_token", ]) - - self.hs.get_handlers().auth_handler = self.mock_handler + self.hs.get_auth_handler = Mock(return_value=self.auth_handler) @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): @@ -56,8 +57,6 @@ class RegistrationTestCase(unittest.TestCase): local_part = "someone" display_name = "someone" user_id = "@someone:test" - mock_token = self.mock_handler.generate_short_term_login_token - mock_token.return_value = 'secret' result_user_id, result_token = yield self.handler.get_or_create_user( local_part, display_name, duration_ms) self.assertEquals(result_user_id, user_id) @@ -75,8 +74,6 @@ class RegistrationTestCase(unittest.TestCase): local_part = "frank" display_name = "Frank" user_id = "@frank:test" - mock_token = self.mock_handler.generate_short_term_login_token - mock_token.return_value = 'secret' result_user_id, result_token = yield self.handler.get_or_create_user( local_part, display_name, duration_ms) self.assertEquals(result_user_id, user_id) From 95f305c35a790e8f10fef7e16268dfaba6bc4c31 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 10:57:11 +0100 Subject: [PATCH 128/414] Remove redundant exception log in /events --- synapse/rest/client/v1/events.py | 41 +++++++++++++++----------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index d1afa0f0d5..498bb9e18a 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -45,30 +45,27 @@ class EventStreamRestServlet(ClientV1RestServlet): raise SynapseError(400, "Guest users must specify room_id param") if "room_id" in request.args: room_id = request.args["room_id"][0] - try: - handler = self.handlers.event_stream_handler - pagin_config = PaginationConfig.from_request(request) - timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS - if "timeout" in request.args: - try: - timeout = int(request.args["timeout"][0]) - except ValueError: - raise SynapseError(400, "timeout must be in milliseconds.") - as_client_event = "raw" not in request.args + handler = self.handlers.event_stream_handler + pagin_config = PaginationConfig.from_request(request) + timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS + if "timeout" in request.args: + try: + timeout = int(request.args["timeout"][0]) + except ValueError: + raise SynapseError(400, "timeout must be in milliseconds.") - chunk = yield handler.get_stream( - requester.user.to_string(), - pagin_config, - timeout=timeout, - as_client_event=as_client_event, - affect_presence=(not is_guest), - room_id=room_id, - is_guest=is_guest, - ) - except: - logger.exception("Event stream failed") - raise + as_client_event = "raw" not in request.args + + chunk = yield handler.get_stream( + requester.user.to_string(), + pagin_config, + timeout=timeout, + as_client_event=as_client_event, + affect_presence=(not is_guest), + room_id=room_id, + is_guest=is_guest, + ) defer.returnValue((200, chunk)) From eba4ff1bcbd99ae5b23f7cdae2306662319d3b4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 11:29:43 +0100 Subject: [PATCH 129/414] 502 on /thumbnail when can't contact remote server --- synapse/rest/media/v1/media_repository.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index d96bf9afe2..2468c3ac42 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -26,6 +26,7 @@ from .thumbnailer import Thumbnailer from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.util.stringutils import random_string +from synapse.api.errors import SynapseError from twisted.internet import defer, threads @@ -134,10 +135,15 @@ class MediaRepository(object): request_path = "/".join(( "/_matrix/media/v1/download", server_name, media_id, )) - length, headers = yield self.client.get_file( - server_name, request_path, output_stream=f, - max_size=self.max_upload_size, - ) + try: + length, headers = yield self.client.get_file( + server_name, request_path, output_stream=f, + max_size=self.max_upload_size, + ) + except Exception as e: + logger.warn("Failed to fetch remoted media %r", e) + raise SynapseError(502, "Failed to fetch remoted media") + media_type = headers["Content-Type"][0] time_now_ms = self.clock.time_msec() From a31befbcd0f8238920b0ca198d35ef657c78e766 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 13:23:41 +0100 Subject: [PATCH 130/414] Bump version and changelog --- CHANGES.rst | 10 ++++++++++ synapse/__init__.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6194b3eb69..ad974c586b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,13 @@ +Changes in synapse v0.16.0 (2016-06-09) +======================================= + +NB: As of v0.14 all AS config files must have an ID specified. + + +Bug fixes: + +* Don't make rooms published by default (PR #857) + Changes in synapse v0.16.0-rc2 (2016-06-08) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index ad088a7880..dc211e9637 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.0-rc2" +__version__ = "0.16.0" From 8327d5df709f1726c961743d937852e487648f5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 Jun 2016 14:16:26 +0100 Subject: [PATCH 131/414] Change CHANGELOG --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index ad974c586b..32f18e7098 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,7 @@ Changes in synapse v0.16.0 (2016-06-09) ======================================= -NB: As of v0.14 all AS config files must have an ID specified. +NB: As of v0.14 all AS config files must have an ID field. Bug fixes: From 7dbb473339bc41daf6c05b64756f97e011f653f5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 9 Jun 2016 18:50:38 +0100 Subject: [PATCH 132/414] Add function to load config without generating it Renames ``load_config`` to ``load_or_generate_config`` Adds a method called ``load_config`` that just loads the config. The main synapse.app.homeserver will continue to use ``load_or_generate_config`` to retain backwards compat. However new worker processes can use ``load_config`` to load the config avoiding some of the cruft needed to generate the config. As the new ``load_config`` method is expected to be used by new configs it removes support for the legacy commandline overrides that ``load_or_generate_config`` supports --- synapse/app/homeserver.py | 3 +- synapse/config/_base.py | 145 ++++++++++++++++++++++++---------- tests/config/test_generate.py | 2 +- tests/config/test_load.py | 22 +++++- 4 files changed, 125 insertions(+), 47 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 22e1721fc4..40ffd9bf0d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -266,10 +266,9 @@ def setup(config_options): HomeServer """ try: - config = HomeServerConfig.load_config( + config = HomeServerConfig.load_or_generate_config( "Synapse Homeserver", config_options, - generate_section="Homeserver" ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 7449f36491..af9f17bf7b 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -157,9 +157,40 @@ class Config(object): return default_config, config @classmethod - def load_config(cls, description, argv, generate_section=None): - obj = cls() + def load_config(cls, description, argv): + config_parser = argparse.ArgumentParser( + description=description, + ) + config_parser.add_argument( + "-c", "--config-path", + action="append", + metavar="CONFIG_FILE", + help="Specify config file. Can be given multiple times and" + " may specify directories containing *.yaml files." + ) + config_parser.add_argument( + "--keys-directory", + metavar="DIRECTORY", + help="Where files such as certs and signing keys are stored when" + " their location is given explicitly in the config." + " Defaults to the directory containing the last config file", + ) + + config_args = config_parser.parse_args(argv) + + config_files = find_config_files(search_paths=config_args.config_path) + + obj = cls() + obj.read_config_files( + config_files, + keys_directory=config_args.keys_directory, + generate_keys=False, + ) + return obj + + @classmethod + def load_or_generate_config(cls, description, argv): config_parser = argparse.ArgumentParser(add_help=False) config_parser.add_argument( "-c", "--config-path", @@ -176,7 +207,7 @@ class Config(object): config_parser.add_argument( "--report-stats", action="store", - help="Stuff", + help="Whether the generated config reports anonymized usage statistics", choices=["yes", "no"] ) config_parser.add_argument( @@ -197,36 +228,11 @@ class Config(object): ) config_args, remaining_args = config_parser.parse_known_args(argv) + config_files = find_config_files(search_paths=config_args.config_path) + generate_keys = config_args.generate_keys - config_files = [] - if config_args.config_path: - for config_path in config_args.config_path: - if os.path.isdir(config_path): - # We accept specifying directories as config paths, we search - # inside that directory for all files matching *.yaml, and then - # we apply them in *sorted* order. - files = [] - for entry in os.listdir(config_path): - entry_path = os.path.join(config_path, entry) - if not os.path.isfile(entry_path): - print ( - "Found subdirectory in config directory: %r. IGNORING." - ) % (entry_path, ) - continue - - if not entry.endswith(".yaml"): - print ( - "Found file in config directory that does not" - " end in '.yaml': %r. IGNORING." - ) % (entry_path, ) - continue - - files.append(entry_path) - - config_files.extend(sorted(files)) - else: - config_files.append(config_path) + obj = cls() if config_args.generate_config: if config_args.report_stats is None: @@ -299,28 +305,43 @@ class Config(object): " -c CONFIG-FILE\"" ) - if config_args.keys_directory: - config_dir_path = config_args.keys_directory - else: - config_dir_path = os.path.dirname(config_args.config_path[-1]) - config_dir_path = os.path.abspath(config_dir_path) + obj.read_config_files( + config_files, + keys_directory=config_args.keys_directory, + generate_keys=generate_keys, + ) + + if generate_keys: + return None + + obj.invoke_all("read_arguments", args) + + return obj + + def read_config_files(self, config_files, keys_directory=None, + generate_keys=False): + if not keys_directory: + keys_directory = os.path.dirname(config_files[-1]) + + config_dir_path = os.path.abspath(keys_directory) specified_config = {} for config_file in config_files: - yaml_config = cls.read_config_file(config_file) + yaml_config = self.read_config_file(config_file) specified_config.update(yaml_config) if "server_name" not in specified_config: raise ConfigError(MISSING_SERVER_NAME) server_name = specified_config["server_name"] - _, config = obj.generate_config( + _, config = self.generate_config( config_dir_path=config_dir_path, server_name=server_name, is_generating_file=False, ) config.pop("log_config") config.update(specified_config) + if "report_stats" not in config: raise ConfigError( MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + @@ -328,11 +349,51 @@ class Config(object): ) if generate_keys: - obj.invoke_all("generate_files", config) + self.invoke_all("generate_files", config) return - obj.invoke_all("read_config", config) + self.invoke_all("read_config", config) - obj.invoke_all("read_arguments", args) - return obj +def find_config_files(search_paths): + """Finds config files using a list of search paths. If a path is a file + then that file path is added to the list. If a search path is a directory + then all the "*.yaml" files in that directory are added to the list in + sorted order. + + Args: + search_paths(list(str)): A list of paths to search. + + Returns: + list(str): A list of file paths. + """ + + config_files = [] + if search_paths: + for config_path in search_paths: + if os.path.isdir(config_path): + # We accept specifying directories as config paths, we search + # inside that directory for all files matching *.yaml, and then + # we apply them in *sorted* order. + files = [] + for entry in os.listdir(config_path): + entry_path = os.path.join(config_path, entry) + if not os.path.isfile(entry_path): + print ( + "Found subdirectory in config directory: %r. IGNORING." + ) % (entry_path, ) + continue + + if not entry.endswith(".yaml"): + print ( + "Found file in config directory that does not" + " end in '.yaml': %r. IGNORING." + ) % (entry_path, ) + continue + + files.append(entry_path) + + config_files.extend(sorted(files)) + else: + config_files.append(config_path) + return config_files diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 4329d73974..8f57fbeb23 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -30,7 +30,7 @@ class ConfigGenerationTestCase(unittest.TestCase): shutil.rmtree(self.dir) def test_generate_config_generates_files(self): - HomeServerConfig.load_config("", [ + HomeServerConfig.load_or_generate_config("", [ "--generate-config", "-c", self.file, "--report-stats=yes", diff --git a/tests/config/test_load.py b/tests/config/test_load.py index bf46233c5c..161a87d7e3 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -34,6 +34,8 @@ class ConfigLoadingTestCase(unittest.TestCase): self.generate_config_and_remove_lines_containing("server_name") with self.assertRaises(Exception): HomeServerConfig.load_config("", ["-c", self.file]) + with self.assertRaises(Exception): + HomeServerConfig.load_or_generate_config("", ["-c", self.file]) def test_generates_and_loads_macaroon_secret_key(self): self.generate_config() @@ -54,11 +56,24 @@ class ConfigLoadingTestCase(unittest.TestCase): "was: %r" % (config.macaroon_secret_key,) ) + config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) + self.assertTrue( + hasattr(config, "macaroon_secret_key"), + "Want config to have attr macaroon_secret_key" + ) + if len(config.macaroon_secret_key) < 5: + self.fail( + "Want macaroon secret key to be string of at least length 5," + "was: %r" % (config.macaroon_secret_key,) + ) + def test_load_succeeds_if_macaroon_secret_key_missing(self): self.generate_config_and_remove_lines_containing("macaroon") config1 = HomeServerConfig.load_config("", ["-c", self.file]) config2 = HomeServerConfig.load_config("", ["-c", self.file]) + config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key) + self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key) def test_disable_registration(self): self.generate_config() @@ -70,14 +85,17 @@ class ConfigLoadingTestCase(unittest.TestCase): config = HomeServerConfig.load_config("", ["-c", self.file]) self.assertFalse(config.enable_registration) + config = HomeServerConfig.load_or_generate_config("", ["-c", self.file]) + self.assertFalse(config.enable_registration) + # Check that either config value is clobbered by the command line. - config = HomeServerConfig.load_config("", [ + config = HomeServerConfig.load_or_generate_config("", [ "-c", self.file, "--enable-registration" ]) self.assertTrue(config.enable_registration) def generate_config(self): - HomeServerConfig.load_config("", [ + HomeServerConfig.load_or_generate_config("", [ "--generate-config", "-c", self.file, "--report-stats=yes", From 50f69e2ef266cf08aaff0311705fcf56dc1bd9f3 Mon Sep 17 00:00:00 2001 From: Bartek Rutkowski Date: Fri, 10 Jun 2016 11:33:43 +0100 Subject: [PATCH 133/414] Change /bin/bash to /bin/sh in tox.ini No features of Bash are used here, so using /bin/sh makes it more portable to systems that don't have Bash natively (like BSD systems). --- tox.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 757b7189c3..52d93c65e5 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,7 @@ deps = setenv = PYTHONDONTWRITEBYTECODE = no_byte_code commands = - /bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ + /bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \ {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}" {env:DUMP_COVERAGE_COMMAND:coverage report -m} @@ -26,4 +26,4 @@ skip_install = True basepython = python2.7 deps = flake8 -commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}" +commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}" From fdc015c6e9b023c5cb87491b7e64efd46eedd129 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 10 Jun 2016 16:30:26 +0100 Subject: [PATCH 134/414] Enable testing the synchrotron on jenkins --- jenkins-dendron-postgres.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index d15836e6bf..7e6f24aa7d 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -80,6 +80,7 @@ echo >&2 "Running sytest with PostgreSQL"; --synapse-directory $WORKSPACE \ --dendron $WORKSPACE/dendron/bin/dendron \ --pusher \ + --synchrotron \ --port-base $PORT_BASE cd .. From 33546b58aa0a235d159269b8705a8999d219f41a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sun, 12 Jun 2016 23:11:29 +0100 Subject: [PATCH 135/414] point to the CAPTCHA docs --- synapse/config/captcha.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index b54dbabbee..7ba0c2de6a 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -27,6 +27,7 @@ class CaptchaConfig(Config): def default_config(self, **kwargs): return """\ ## Captcha ## + # See docs/CAPTCHA_SETUP for full details of configuring this. # This Home Server's ReCAPTCHA public key. recaptcha_public_key: "YOUR_PUBLIC_KEY" From 36e2aade8790f3f2d86e8f6cc8a6de21e8bec4fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Jun 2016 13:25:29 +0100 Subject: [PATCH 136/414] Make get_domain_from_id throw SynapseError on invalid ID --- synapse/types.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/types.py b/synapse/types.py index 7b6ae44bdd..f639651a73 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -22,7 +22,10 @@ Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"]) def get_domain_from_id(string): - return string.split(":", 1)[1] + try: + return string.split(":", 1)[1] + except IndexError: + raise SynapseError(400, "Invalid ID: %r", string) class DomainSpecificString( From 255c229f23635f7dc0299de5d54460eea2e2af1c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 15 Jun 2016 10:23:03 +0100 Subject: [PATCH 137/414] Work around TLS bug in twisted Wrap up twisted's FileBodyProducer to work around https://twistedmatrix.com/trac/ticket/8473. Hopefully this fixes https://matrix.org/jira/browse/SYN-700. --- synapse/http/client.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index c7fa692435..3ec9bc7faf 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint from canonicaljson import encode_canonical_json -from twisted.internet import defer, reactor, ssl, protocol +from twisted.internet import defer, reactor, ssl, protocol, task from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint from twisted.web.client import ( BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent, - readBody, FileBodyProducer, PartialDownloadError, + readBody, PartialDownloadError, ) +from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web._newclient import ResponseDone @@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def creatorForNetloc(self, hostname, port): return self + + +class FileBodyProducer(TwistedFileBodyProducer): + """Workaround for https://twistedmatrix.com/trac/ticket/8473 + + We override the pauseProducing and resumeProducing methods in twisted's + FileBodyProducer so that they do not raise exceptions if the task has + already completed. + """ + + def pauseProducing(self): + try: + super(FileBodyProducer, self).pauseProducing() + except task.TaskDone: + # task has already completed + pass + + def resumeProducing(self): + try: + super(FileBodyProducer, self).resumeProducing() + except task.NotPaused: + # task was not paused (probably because it had already completed) + pass From b31c49d6760b4cdeefc8e0b43d6639be4576e249 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 10:58:07 +0100 Subject: [PATCH 138/414] Correctly mark backfilled events as backfilled --- synapse/handlers/federation.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ff83c608e7..c2df43e2f6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -345,19 +345,21 @@ class FederationHandler(BaseHandler): ) missing_auth = required_auth - set(auth_events) - results = yield defer.gatherResults( - [ - self.replication_layer.get_pdu( - [dest], - event_id, - outlier=True, - timeout=10000, - ) - for event_id in missing_auth - ], - consumeErrors=True - ).addErrback(unwrapFirstError) - auth_events.update({a.event_id: a for a in results}) + if missing_auth: + logger.info("Missing auth for backfill: %r", missing_auth) + results = yield defer.gatherResults( + [ + self.replication_layer.get_pdu( + [dest], + event_id, + outlier=True, + timeout=10000, + ) + for event_id in missing_auth + ], + consumeErrors=True + ).addErrback(unwrapFirstError) + auth_events.update({a.event_id: a for a in results}) ev_infos = [] for a in auth_events.values(): @@ -399,7 +401,7 @@ class FederationHandler(BaseHandler): # previous to work out the state. # TODO: We can probably do something more clever here. yield self._handle_new_event( - dest, event + dest, event, backfilled=True, ) defer.returnValue(events) From d41a1a91d3cce28e5416a91b7494d079e4c765f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 15:12:59 +0100 Subject: [PATCH 139/414] Linearize fetching of gaps on incoming events This potentially stops the server from doing multiple requests for the same data. --- synapse/federation/federation_base.py | 3 + synapse/federation/federation_client.py | 2 + synapse/federation/federation_server.py | 76 +++++++++++++++---------- synapse/federation/replication.py | 2 + 4 files changed, 52 insertions(+), 31 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index a0b7cb7963..da2f5e8cfd 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -31,6 +31,9 @@ logger = logging.getLogger(__name__) class FederationBase(object): + def __init__(self, hs): + pass + @defer.inlineCallbacks def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False, include_none=False): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d835c1b038..b06387051c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -52,6 +52,8 @@ sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"]) class FederationClient(FederationBase): + def __init__(self, hs): + super(FederationClient, self).__init__(hs) def start_get_pdu_cache(self): self._get_pdu_cache = ExpiringCache( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9f2a64dede..fe92457ba1 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -19,6 +19,7 @@ from twisted.internet import defer from .federation_base import FederationBase from .units import Transaction, Edu +from synapse.util.async import Linearizer from synapse.util.logutils import log_function from synapse.events import FrozenEvent import synapse.metrics @@ -44,6 +45,11 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[ class FederationServer(FederationBase): + def __init__(self, hs): + super(FederationServer, self).__init__(hs) + + self._room_pdu_linearizer = Linearizer() + def set_handler(self, handler): """Sets the handler that the replication layer will use to communicate receipt of new PDUs from other home servers. The required methods are @@ -491,43 +497,51 @@ class FederationServer(FederationBase): pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: if get_missing and prevs - seen: - latest = yield self.store.get_latest_event_ids_in_room( - pdu.room_id - ) + # If we're missing stuff, ensure we only fetch stuff one + # at a time. + with (yield self._room_pdu_linearizer.queue(pdu.room_id)): + # We recalculate seen, since it may have changed. + have_seen = yield self.store.have_events(prevs) + seen = set(have_seen.keys()) - # We add the prev events that we have seen to the latest - # list to ensure the remote server doesn't give them to us - latest = set(latest) - latest |= seen + if prevs - seen: + latest = yield self.store.get_latest_event_ids_in_room( + pdu.room_id + ) - logger.info( - "Missing %d events for room %r: %r...", - len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] - ) + # We add the prev events that we have seen to the latest + # list to ensure the remote server doesn't give them to us + latest = set(latest) + latest |= seen - missing_events = yield self.get_missing_events( - origin, - pdu.room_id, - earliest_events_ids=list(latest), - latest_events=[pdu], - limit=10, - min_depth=min_depth, - ) + logger.info( + "Missing %d events for room %r: %r...", + len(prevs - seen), pdu.room_id, list(prevs - seen)[:5] + ) - # We want to sort these by depth so we process them and - # tell clients about them in order. - missing_events.sort(key=lambda x: x.depth) + missing_events = yield self.get_missing_events( + origin, + pdu.room_id, + earliest_events_ids=list(latest), + latest_events=[pdu], + limit=10, + min_depth=min_depth, + ) - for e in missing_events: - yield self._handle_new_pdu( - origin, - e, - get_missing=False - ) + # We want to sort these by depth so we process them and + # tell clients about them in order. + missing_events.sort(key=lambda x: x.depth) - have_seen = yield self.store.have_events( - [ev for ev, _ in pdu.prev_events] - ) + for e in missing_events: + yield self._handle_new_pdu( + origin, + e, + get_missing=False + ) + + have_seen = yield self.store.have_events( + [ev for ev, _ in pdu.prev_events] + ) prevs = {e_id for e_id, _ in pdu.prev_events} seen = set(have_seen.keys()) diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py index 3e062a5eab..ea66a5dcbc 100644 --- a/synapse/federation/replication.py +++ b/synapse/federation/replication.py @@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer): self.hs = hs + super(ReplicationLayer, self).__init__(hs) + def __str__(self): return "" % self.server_name From 0ef0655b83adee8671358e35c42e2e646ef8d2fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 15:50:17 +0100 Subject: [PATCH 140/414] Bump version and changelog --- CHANGES.rst | 27 ++++++++++++++++++++++++++- synapse/__init__.py | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 32f18e7098..d5f465792c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,28 @@ +Changes in synapse v0.16.1-rc1 (2016-06-15) +=========================================== + +Features: None + +Changes: + +* Log requester for ``/publicRoom`` endpoints when possible (PR #856) +* 502 on ``/thumbnail`` when can't connect to remote server (PR #862) +* Linearize fetching of gaps on incoming events (PR #871) + + +Bugs fixes: + +* Fix bug where rooms where marked as published by default (PR #857) +* Fix bug where joining room with an event with invalid sender (PR #868) +* Fix bug where backfilled events were sent down sync streams (PR #869) +* Fix bug where outgoing connections could wedge indefinitely (PR #870) + + +Performance improvements: + +* Improve ``/publicRooms`` performance(PR #859) + + Changes in synapse v0.16.0 (2016-06-09) ======================================= @@ -28,7 +53,7 @@ Bug fixes: * Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) -Performance Improvements: +Performance improvements: * Remove event fetching from DB threads (PR #835) * Change the way we cache events (PR #836) diff --git a/synapse/__init__.py b/synapse/__init__.py index dc211e9637..faaa86d972 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.0" +__version__ = "0.16.1-rc1" From 0477368e9afb7de9d8f95352f47973e51f0a837c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:06:26 +0100 Subject: [PATCH 141/414] Update change log --- CHANGES.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d5f465792c..1a47aae857 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -15,7 +15,8 @@ Bugs fixes: * Fix bug where rooms where marked as published by default (PR #857) * Fix bug where joining room with an event with invalid sender (PR #868) * Fix bug where backfilled events were sent down sync streams (PR #869) -* Fix bug where outgoing connections could wedge indefinitely (PR #870) +* Fix bug where outgoing connections could wedge indefinitely, causing push + notifications to be unreliable (PR #870) Performance improvements: From a60169ea0987df41ee540eefbb77cf3ff53446bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:57:48 +0100 Subject: [PATCH 142/414] Handle og props with not content --- synapse/rest/media/v1/preview_url_resource.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 37dd1de899..fc72896e0c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -252,7 +252,8 @@ class PreviewUrlResource(Resource): og = {} for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"): - og[tag.attrib['property']] = tag.attrib['content'] + if 'content' in tag.attrib: + og[tag.attrib['property']] = tag.attrib['content'] # TODO: grab article: meta tags too, e.g.: From 1e9026e484be0f90256ae60c05eed9d1f87cf6b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:58:05 +0100 Subject: [PATCH 143/414] Handle floats as img widths --- synapse/rest/media/v1/preview_url_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index fc72896e0c..a6807df620 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -280,7 +280,7 @@ class PreviewUrlResource(Resource): # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") images = sorted(images, key=lambda i: ( - -1 * int(i.attrib['width']) * int(i.attrib['height']) + -1 * float(i.attrib['width']) * float(i.attrib['height']) )) if not images: images = tree.xpath("//img[@src]") From 09a17f965cf55dca45983473ed744f539b9ec92e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2016 16:58:12 +0100 Subject: [PATCH 144/414] Line lengths --- synapse/rest/media/v1/preview_url_resource.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a6807df620..74c64f1371 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -288,9 +288,9 @@ class PreviewUrlResource(Resource): og['og:image'] = images[0].attrib['src'] # pre-cache the image for posterity - # FIXME: it might be cleaner to use the same flow as the main /preview_url request - # itself and benefit from the same caching etc. But for now we just rely on the - # caching on the master request to speed things up. + # FIXME: it might be cleaner to use the same flow as the main /preview_url + # request itself and benefit from the same caching etc. But for now we + # just rely on the caching on the master request to speed things up. if 'og:image' in og and og['og:image']: image_info = yield self._download_url( self._rebase_url(og['og:image'], media_info['uri']), requester.user From ed5f43a55accc8502a60b721871b208db704de3e Mon Sep 17 00:00:00 2001 From: Salvatore LaMendola Date: Thu, 16 Jun 2016 00:43:42 -0400 Subject: [PATCH 145/414] Fix TypeError in call to bcrypt.hashpw - At the very least, this TypeError caused logins to fail on my own running instance of Synapse, and the simple (explicit) UTF-8 conversion resolved login errors for me. Signed-off-by: Salvatore LaMendola --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 200793b5ed..b38f81e999 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -626,6 +626,6 @@ class AuthHandler(BaseHandler): Whether self.hash(password) == stored_hash (bool). """ if stored_hash: - return bcrypt.hashpw(password, stored_hash) == stored_hash + return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash else: return False From 885ee861f7270fef1370a2d63e009a8fceaf8dd5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:06:12 +0100 Subject: [PATCH 146/414] Inline the synchrotron and pusher configs into the main config --- synapse/app/pusher.py | 169 ++++++++++------------------------- synapse/app/synchrotron.py | 135 +++++++--------------------- synapse/config/homeserver.py | 4 +- synapse/config/logger.py | 86 +++++++++--------- synapse/config/server.py | 31 ++++--- 5 files changed, 144 insertions(+), 281 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 4ec23d84c1..6c8c02fb38 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -18,10 +18,9 @@ import synapse from synapse.server import HomeServer from synapse.config._base import ConfigError -from synapse.config.database import DatabaseConfig -from synapse.config.logger import LoggingConfig -from synapse.config.emailconfig import EmailConfig -from synapse.config.key import KeyConfig +from synapse.config.workers import clobber_with_worker_config +from synapse.config.logger import setup_logging +from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseSite from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.storage.roommember import RoomMemberStore @@ -43,98 +42,12 @@ from twisted.web.resource import Resource from daemonize import Daemonize -import gc import sys import logging logger = logging.getLogger("synapse.app.pusher") -class SlaveConfig(DatabaseConfig): - def read_config(self, config): - self.replication_url = config["replication_url"] - self.server_name = config["server_name"] - self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( - "use_insecure_ssl_client_just_for_testing_do_not_use", False - ) - self.user_agent_suffix = None - self.start_pushers = True - self.listeners = config["listeners"] - self.soft_file_limit = config.get("soft_file_limit") - self.daemonize = config.get("daemonize") - self.pid_file = self.abspath(config.get("pid_file")) - self.public_baseurl = config["public_baseurl"] - - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None - - # some things used by the auth handler but not actually used in the - # pusher codebase - self.bcrypt_rounds = None - self.ldap_enabled = None - self.ldap_server = None - self.ldap_port = None - self.ldap_tls = None - self.ldap_search_base = None - self.ldap_search_property = None - self.ldap_email_property = None - self.ldap_full_name_property = None - - # We would otherwise try to use the registration shared secret as the - # macaroon shared secret if there was no macaroon_shared_secret, but - # that means pulling in RegistrationConfig too. We don't need to be - # backwards compaitible in the pusher codebase so just make people set - # macaroon_shared_secret. We set this to None to prevent it referencing - # an undefined key. - self.registration_shared_secret = None - - def default_config(self, server_name, **kwargs): - pid_file = self.abspath("pusher.pid") - return """\ - # Slave configuration - - # The replication listener on the synapse to talk to. - #replication_url: https://localhost:{replication_port}/_synapse/replication - - server_name: "%(server_name)s" - - listeners: [] - # Enable a ssh manhole listener on the pusher. - # - type: manhole - # port: {manhole_port} - # bind_address: 127.0.0.1 - # Enable a metric listener on the pusher. - # - type: http - # port: {metrics_port} - # bind_address: 127.0.0.1 - # resources: - # - names: ["metrics"] - # compress: False - - report_stats: False - - daemonize: False - - pid_file: %(pid_file)s - - """ % locals() - - -class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig): - pass - - class PusherSlaveStore( SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore, SlavedAccountDataStore @@ -232,8 +145,8 @@ class PusherServer(HomeServer): ) logger.info("Synapse pusher now listening on port %d", port) - def start_listening(self): - for listener in self.config.listeners: + def start_listening(self, listeners): + for listener in listeners: if listener["type"] == "http": self._listen_http(listener) elif listener["type"] == "manhole": @@ -329,19 +242,32 @@ class PusherServer(HomeServer): yield sleep(30) -def setup(config_options): +def setup(worker_name, config_options): try: - config = PusherSlaveConfig.load_config( + config = HomeServerConfig.load_config( "Synapse pusher", config_options ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - if not config: - sys.exit(0) + worker_config = config.workers[worker_name] - config.setup_logging() + setup_logging(worker_config.log_config, worker_config.log_file) + + clobber_with_worker_config(config, worker_config) + + if config.start_pushers: + sys.stderr.write( + "\nThe pushers must be disabled in the main synapse process" + "\nbefore they can be run in a separate worker." + "\nPlease add ``start_pushers: false`` to the main config" + "\n" + ) + sys.exit(1) + + # Force the pushers to start since they will be disabled in the main config + config.start_pushers = True database_engine = create_engine(config.database_config) @@ -354,11 +280,15 @@ def setup(config_options): ) ps.setup() - ps.start_listening() + ps.start_listening(worker_config.listeners) - change_resource_limit(ps.config.soft_file_limit) - if ps.config.gc_thresholds: - gc.set_threshold(*ps.config.gc_thresholds) + def run(): + with LoggingContext("run"): + logger.info("Running") + change_resource_limit(worker_config.soft_file_limit) + if worker_config.gc_thresholds: + ps.set_threshold(worker_config.gc_thresholds) + reactor.run() def start(): ps.replicate() @@ -367,30 +297,21 @@ def setup(config_options): reactor.callWhenRunning(start) - return ps + if worker_config.daemonize: + daemon = Daemonize( + app="synapse-pusher", + pid=worker_config.pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() if __name__ == '__main__': with LoggingContext("main"): - ps = setup(sys.argv[1:]) - - if ps.config.daemonize: - def run(): - with LoggingContext("run"): - change_resource_limit(ps.config.soft_file_limit) - if ps.config.gc_thresholds: - gc.set_threshold(*ps.config.gc_thresholds) - reactor.run() - - daemon = Daemonize( - app="synapse-pusher", - pid=ps.config.pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - - daemon.start() - else: - reactor.run() + worker_name = sys.argv[1] + ps = setup(worker_name, sys.argv[2:]) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 297e199453..7a607faef6 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -18,9 +18,9 @@ import synapse from synapse.api.constants import EventTypes, PresenceState from synapse.config._base import ConfigError -from synapse.config.database import DatabaseConfig -from synapse.config.logger import LoggingConfig -from synapse.config.appservice import AppServiceConfig +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.config.workers import clobber_with_worker_config from synapse.events import FrozenEvent from synapse.handlers.presence import PresenceHandler from synapse.http.site import SynapseSite @@ -57,76 +57,11 @@ from daemonize import Daemonize import sys import logging import contextlib -import gc import ujson as json logger = logging.getLogger("synapse.app.synchrotron") -class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig): - def read_config(self, config): - self.replication_url = config["replication_url"] - self.server_name = config["server_name"] - self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get( - "use_insecure_ssl_client_just_for_testing_do_not_use", False - ) - self.user_agent_suffix = None - self.listeners = config["listeners"] - self.soft_file_limit = config.get("soft_file_limit") - self.daemonize = config.get("daemonize") - self.pid_file = self.abspath(config.get("pid_file")) - self.macaroon_secret_key = config["macaroon_secret_key"] - self.expire_access_token = config.get("expire_access_token", False) - - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None - - def default_config(self, server_name, **kwargs): - pid_file = self.abspath("synchroton.pid") - return """\ - # Slave configuration - - # The replication listener on the synapse to talk to. - #replication_url: https://localhost:{replication_port}/_synapse/replication - - server_name: "%(server_name)s" - - listeners: - # Enable a /sync listener on the synchrontron - #- type: http - # port: {http_port} - # bind_address: "" - # Enable a ssh manhole listener on the synchrotron - # - type: manhole - # port: {manhole_port} - # bind_address: 127.0.0.1 - # Enable a metric listener on the synchrotron - # - type: http - # port: {metrics_port} - # bind_address: 127.0.0.1 - # resources: - # - names: ["metrics"] - # compress: False - - report_stats: False - - daemonize: False - - pid_file: %(pid_file)s - """ % locals() - - class SynchrotronSlavedStore( SlavedPushRuleStore, SlavedEventStore, @@ -350,8 +285,8 @@ class SynchrotronServer(HomeServer): ) logger.info("Synapse synchrotron now listening on port %d", port) - def start_listening(self): - for listener in self.config.listeners: + def start_listening(self, listeners): + for listener in listeners: if listener["type"] == "http": self._listen_http(listener) elif listener["type"] == "manhole": @@ -470,19 +405,20 @@ class SynchrotronServer(HomeServer): return SynchrotronTyping(self) -def setup(config_options): +def start(worker_name, config_options): try: - config = SynchrotronConfig.load_config( + config = HomeServerConfig.load_config( "Synapse synchrotron", config_options ) except ConfigError as e: sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - if not config: - sys.exit(0) + worker_config = config.workers[worker_name] - config.setup_logging() + setup_logging(worker_config.log_config, worker_config.log_file) + + clobber_with_worker_config(config, worker_config) database_engine = create_engine(config.database_config) @@ -496,11 +432,15 @@ def setup(config_options): ) ss.setup() - ss.start_listening() + ss.start_listening(worker_config.listeners) - change_resource_limit(ss.config.soft_file_limit) - if ss.config.gc_thresholds: - ss.set_threshold(*ss.config.gc_thresholds) + def run(): + with LoggingContext("run"): + logger.info("Running") + change_resource_limit(worker_config.soft_file_limit) + if worker_config.gc_thresholds: + ss.set_threshold(worker_config.gc_thresholds) + reactor.run() def start(): ss.get_datastore().start_profiling() @@ -508,30 +448,21 @@ def setup(config_options): reactor.callWhenRunning(start) - return ss + if worker_config.daemonize: + daemon = Daemonize( + app="synapse-synchrotron", + pid=worker_config.pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() if __name__ == '__main__': with LoggingContext("main"): - ss = setup(sys.argv[1:]) - - if ss.config.daemonize: - def run(): - with LoggingContext("run"): - change_resource_limit(ss.config.soft_file_limit) - if ss.config.gc_thresholds: - gc.set_threshold(*ss.config.gc_thresholds) - reactor.run() - - daemon = Daemonize( - app="synapse-synchrotron", - pid=ss.config.pid_file, - action=run, - auto_close_fds=False, - verbose=True, - logger=logger, - ) - - daemon.start() - else: - reactor.run() + worker_name = sys.argv[1] + start(worker_name, sys.argv[2:]) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index fc2445484c..79b0534b3b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -32,13 +32,15 @@ from .password import PasswordConfig from .jwt import JWTConfig from .ldap import LDAPConfig from .emailconfig import EmailConfig +from .workers import WorkerConfig class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, RatelimitConfig, ContentRepositoryConfig, CaptchaConfig, VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig, AppServiceConfig, KeyConfig, SAML2Config, CasConfig, - JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,): + JWTConfig, LDAPConfig, PasswordConfig, EmailConfig, + WorkerConfig,): pass diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 5047db898f..dc68683fbc 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -126,54 +126,58 @@ class LoggingConfig(Config): ) def setup_logging(self): - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" - " - %(message)s" - ) - if self.log_config is None: + setup_logging(self.log_config, self.log_file, self.verbosity) - level = logging.INFO - level_for_storage = logging.INFO - if self.verbosity: - level = logging.DEBUG - if self.verbosity > 1: - level_for_storage = logging.DEBUG - # FIXME: we need a logging.WARN for a -q quiet option - logger = logging.getLogger('') - logger.setLevel(level) +def setup_logging(log_config=None, log_file=None, verbosity=None): + log_format = ( + "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" + " - %(message)s" + ) + if log_config is None: - logging.getLogger('synapse.storage').setLevel(level_for_storage) + level = logging.INFO + level_for_storage = logging.INFO + if verbosity: + level = logging.DEBUG + if verbosity > 1: + level_for_storage = logging.DEBUG - formatter = logging.Formatter(log_format) - if self.log_file: - # TODO: Customisable file size / backup count - handler = logging.handlers.RotatingFileHandler( - self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 - ) + # FIXME: we need a logging.WARN for a -q quiet option + logger = logging.getLogger('') + logger.setLevel(level) - def sighup(signum, stack): - logger.info("Closing log file due to SIGHUP") - handler.doRollover() - logger.info("Opened new log file due to SIGHUP") + logging.getLogger('synapse.storage').setLevel(level_for_storage) - # TODO(paul): obviously this is a terrible mechanism for - # stealing SIGHUP, because it means no other part of synapse - # can use it instead. If we want to catch SIGHUP anywhere - # else as well, I'd suggest we find a nicer way to broadcast - # it around. - if getattr(signal, "SIGHUP"): - signal.signal(signal.SIGHUP, sighup) - else: - handler = logging.StreamHandler() - handler.setFormatter(formatter) + formatter = logging.Formatter(log_format) + if log_file: + # TODO: Customisable file size / backup count + handler = logging.handlers.RotatingFileHandler( + log_file, maxBytes=(1000 * 1000 * 100), backupCount=3 + ) - handler.addFilter(LoggingContextFilter(request="")) + def sighup(signum, stack): + logger.info("Closing log file due to SIGHUP") + handler.doRollover() + logger.info("Opened new log file due to SIGHUP") - logger.addHandler(handler) + # TODO(paul): obviously this is a terrible mechanism for + # stealing SIGHUP, because it means no other part of synapse + # can use it instead. If we want to catch SIGHUP anywhere + # else as well, I'd suggest we find a nicer way to broadcast + # it around. + if getattr(signal, "SIGHUP"): + signal.signal(signal.SIGHUP, sighup) else: - with open(self.log_config, 'r') as f: - logging.config.dictConfig(yaml.load(f)) + handler = logging.StreamHandler() + handler.setFormatter(formatter) - observer = PythonLoggingObserver() - observer.start() + handler.addFilter(LoggingContextFilter(request="")) + + logger.addHandler(handler) + else: + with open(log_config, 'r') as f: + logging.config.dictConfig(yaml.load(f)) + + observer = PythonLoggingObserver() + observer.start() diff --git a/synapse/config/server.py b/synapse/config/server.py index 44b8d422e0..f370b22c32 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -38,19 +38,7 @@ class ServerConfig(Config): self.listeners = config.get("listeners", []) - thresholds = config.get("gc_thresholds", None) - if thresholds is not None: - try: - assert len(thresholds) == 3 - self.gc_thresholds = ( - int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), - ) - except: - raise ConfigError( - "Value of `gc_threshold` must be a list of three integers if set" - ) - else: - self.gc_thresholds = None + self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) bind_port = config.get("bind_port") if bind_port: @@ -264,3 +252,20 @@ class ServerConfig(Config): type=int, help="Turn on the twisted telnet manhole" " service on the given port.") + + +def read_gc_thresholds(thresholds): + """Reads the three integer thresholds for garbage collection. Ensures that + the thresholds are integers if thresholds are supplied. + """ + if thresholds is None: + return None + try: + assert len(thresholds) == 3 + return ( + int(thresholds[0]), int(thresholds[1]), int(thresholds[2]), + ) + except: + raise ConfigError( + "Value of `gc_threshold` must be a list of three integers if set" + ) From dbb5a39b64e3b52978ecb98f8f64b7b50acf9b59 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:09:15 +0100 Subject: [PATCH 147/414] Add worker config module --- synapse/config/workers.py | 71 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 synapse/config/workers.py diff --git a/synapse/config/workers.py b/synapse/config/workers.py new file mode 100644 index 0000000000..fd19e38b87 --- /dev/null +++ b/synapse/config/workers.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 matrix.org +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from ._base import Config +from .server import read_gc_thresholds + + +Worker = collections.namedtuple("Worker", [ + "app", + "listeners", + "pid_file", + "daemonize", + "log_file", + "log_config", + "event_cache_size", + "soft_file_limit", + "gc_thresholds", + "replication_url", +]) + + +def clobber_with_worker_config(config, worker_config): + """Overrides some of the keys of the main config with worker-specific + values.""" + config.event_cache_size = worker_config.event_cache_size + config.replication_url = worker_config.replication_url + + +def read_worker_config(config): + return Worker( + app=config["app"], + listeners=config.get("listeners", []), + pid_file=config.get("pid_file"), + daemonize=config["daemonize"], + log_file=config.get("log_file"), + log_config=config.get("log_config"), + event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")), + soft_file_limit=config.get("soft_file_limit"), + gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")), + replication_url=config.get("replication_url"), + ) + + +class WorkerConfig(Config): + """The workers are processes run separately to the main synapse process. + Each worker has a name that identifies it within the config file. + They have their own pid_file and listener configuration. They use the + replication_url to talk to the main synapse process. They have their + own cache size tuning, gc threshold tuning and open file limits.""" + + def read_config(self, config): + workers = config.get("workers", {}) + + self.workers = { + worker_name: read_worker_config(worker_config) + for worker_name, worker_config in workers.items() + } From 80a1bc7db517298baec24c1f11a144552719fb7b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 11:29:45 +0100 Subject: [PATCH 148/414] Comment on what's going on in clobber_with_worker_config --- synapse/config/workers.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index fd19e38b87..4f4658c0a8 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -35,8 +35,19 @@ Worker = collections.namedtuple("Worker", [ def clobber_with_worker_config(config, worker_config): """Overrides some of the keys of the main config with worker-specific - values.""" + values. We only need to override the keys that are accessed deep + withing synapse code. Most of the keys that we want to override in + the workers are accessed in setup code that is rewritten specifically + for the workers. In that new code we can access the worker config directly, + so we don't need to override the values in the main config.""" + + # TODO: The event_cache_size is accessed in the db setup. It should be + # possible to rejigg that code so that the cache size is pulled from the + # worker config directly. config.event_cache_size = worker_config.event_cache_size + + # TODO: The replication_url should only be accessed within worker specific + # code so it really shouldn't need to be clobbered in the main config. config.replication_url = worker_config.replication_url From bde13833cb42fc6e09928ffb4f4efad9244abffa Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 12:44:40 +0100 Subject: [PATCH 149/414] Access replication_url from the worker config directly --- synapse/app/pusher.py | 5 +++-- synapse/app/synchrotron.py | 5 +++-- synapse/config/workers.py | 4 ---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 6c8c02fb38..a26a3bd394 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -112,7 +112,7 @@ class PusherServer(HomeServer): def remove_pusher(self, app_id, push_key, user_id): http_client = self.get_simple_http_client() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url url = replication_url + "/remove_pushers" return http_client.post_json_get_json(url, { "remove": [{ @@ -166,7 +166,7 @@ class PusherServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url pusher_pool = self.get_pusherpool() clock = self.get_clock() @@ -275,6 +275,7 @@ def setup(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, + worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, ) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 7a607faef6..4443c73e6a 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -98,7 +98,7 @@ class SynchrotronPresence(object): self.http_client = hs.get_simple_http_client() self.store = hs.get_datastore() self.user_to_num_current_syncs = {} - self.syncing_users_url = hs.config.replication_url + "/syncing_users" + self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users" self.clock = hs.get_clock() active_presence = self.store.take_presence_startup_info() @@ -306,7 +306,7 @@ class SynchrotronServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.config.replication_url + replication_url = self.worker_config.replication_url clock = self.get_clock() notifier = self.get_notifier() presence_handler = self.get_presence_handler() @@ -426,6 +426,7 @@ def start(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, + worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, application_service_handler=SynchrotronApplicationService(), diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 4f4658c0a8..f2c77ef59a 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -46,10 +46,6 @@ def clobber_with_worker_config(config, worker_config): # worker config directly. config.event_cache_size = worker_config.event_cache_size - # TODO: The replication_url should only be accessed within worker specific - # code so it really shouldn't need to be clobbered in the main config. - config.replication_url = worker_config.replication_url - def read_worker_config(config): return Worker( From 364d6167926d5d8b2a312e3d35623d2e05330e0a Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 12:53:15 +0100 Subject: [PATCH 150/414] Access the event_cache_size directly from the server object. This means that the workers can override the event_cache_size directly without clobbering the value in the main synapse config. --- synapse/app/pusher.py | 6 +++--- synapse/app/synchrotron.py | 6 +++--- synapse/config/workers.py | 14 -------------- synapse/server.py | 3 +++ synapse/storage/_base.py | 2 +- 5 files changed, 10 insertions(+), 21 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index a26a3bd394..5d4db4f892 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -18,7 +18,6 @@ import synapse from synapse.server import HomeServer from synapse.config._base import ConfigError -from synapse.config.workers import clobber_with_worker_config from synapse.config.logger import setup_logging from synapse.config.homeserver import HomeServerConfig from synapse.http.site import SynapseSite @@ -241,6 +240,9 @@ class PusherServer(HomeServer): logger.exception("Error replicating from %r", replication_url) yield sleep(30) + def get_event_cache_size(self): + return self.worker_config.event_cache_size + def setup(worker_name, config_options): try: @@ -255,8 +257,6 @@ def setup(worker_name, config_options): setup_logging(worker_config.log_config, worker_config.log_file) - clobber_with_worker_config(config, worker_config) - if config.start_pushers: sys.stderr.write( "\nThe pushers must be disabled in the main synapse process" diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 4443c73e6a..d10bb2b3f0 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -20,7 +20,6 @@ from synapse.api.constants import EventTypes, PresenceState from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.config.workers import clobber_with_worker_config from synapse.events import FrozenEvent from synapse.handlers.presence import PresenceHandler from synapse.http.site import SynapseSite @@ -404,6 +403,9 @@ class SynchrotronServer(HomeServer): def build_typing_handler(self): return SynchrotronTyping(self) + def get_event_cache_size(self): + return self.worker_config.event_cache_size + def start(worker_name, config_options): try: @@ -418,8 +420,6 @@ def start(worker_name, config_options): setup_logging(worker_config.log_config, worker_config.log_file) - clobber_with_worker_config(config, worker_config) - database_engine = create_engine(config.database_config) ss = SynchrotronServer( diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f2c77ef59a..503358e03e 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -33,20 +33,6 @@ Worker = collections.namedtuple("Worker", [ ]) -def clobber_with_worker_config(config, worker_config): - """Overrides some of the keys of the main config with worker-specific - values. We only need to override the keys that are accessed deep - withing synapse code. Most of the keys that we want to override in - the workers are accessed in setup code that is rewritten specifically - for the workers. In that new code we can access the worker config directly, - so we don't need to override the values in the main config.""" - - # TODO: The event_cache_size is accessed in the db setup. It should be - # possible to rejigg that code so that the cache size is pulled from the - # worker config directly. - config.event_cache_size = worker_config.event_cache_size - - def read_worker_config(config): return Worker( app=config["app"], diff --git a/synapse/server.py b/synapse/server.py index dd4b81c658..b3c31ece73 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -236,6 +236,9 @@ class HomeServer(object): def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) + def get_event_cache_size(self): + return self.config.event_cache_size + def _make_dependency_method(depname): def _get(hs): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 32c6677d47..2932880cc5 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -166,7 +166,7 @@ class SQLBaseStore(object): self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, - max_entries=hs.config.event_cache_size) + max_entries=hs.get_event_cache_size()) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR From a352b68acf473f59012340b7f481f3dfd6544ac6 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 16 Jun 2016 17:29:50 +0100 Subject: [PATCH 151/414] Use worker_ prefixes for worker config, use existing support for multiple config files --- synapse/app/pusher.py | 29 ++++++++++------------ synapse/app/synchrotron.py | 29 ++++++++++------------ synapse/config/workers.py | 49 +++++++------------------------------- synapse/server.py | 3 --- synapse/storage/_base.py | 2 +- 5 files changed, 33 insertions(+), 79 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 5d4db4f892..9ac26d52c6 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -111,7 +111,7 @@ class PusherServer(HomeServer): def remove_pusher(self, app_id, push_key, user_id): http_client = self.get_simple_http_client() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url url = replication_url + "/remove_pushers" return http_client.post_json_get_json(url, { "remove": [{ @@ -165,7 +165,7 @@ class PusherServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url pusher_pool = self.get_pusherpool() clock = self.get_clock() @@ -240,11 +240,8 @@ class PusherServer(HomeServer): logger.exception("Error replicating from %r", replication_url) yield sleep(30) - def get_event_cache_size(self): - return self.worker_config.event_cache_size - -def setup(worker_name, config_options): +def start(config_options): try: config = HomeServerConfig.load_config( "Synapse pusher", config_options @@ -253,9 +250,9 @@ def setup(worker_name, config_options): sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - worker_config = config.workers[worker_name] + assert config.worker_app == "synapse.app.pusher" - setup_logging(worker_config.log_config, worker_config.log_file) + setup_logging(config.worker_log_config, config.worker_log_file) if config.start_pushers: sys.stderr.write( @@ -275,20 +272,19 @@ def setup(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, - worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, ) ps.setup() - ps.start_listening(worker_config.listeners) + ps.start_listening(config.worker_listeners) def run(): with LoggingContext("run"): logger.info("Running") - change_resource_limit(worker_config.soft_file_limit) - if worker_config.gc_thresholds: - ps.set_threshold(worker_config.gc_thresholds) + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + ps.set_threshold(config.gc_thresholds) reactor.run() def start(): @@ -298,10 +294,10 @@ def setup(worker_name, config_options): reactor.callWhenRunning(start) - if worker_config.daemonize: + if config.worker_daemonize: daemon = Daemonize( app="synapse-pusher", - pid=worker_config.pid_file, + pid=config.worker_pid_file, action=run, auto_close_fds=False, verbose=True, @@ -314,5 +310,4 @@ def setup(worker_name, config_options): if __name__ == '__main__': with LoggingContext("main"): - worker_name = sys.argv[1] - ps = setup(worker_name, sys.argv[2:]) + ps = start(sys.argv[1:]) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index d10bb2b3f0..160db8637e 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -97,7 +97,7 @@ class SynchrotronPresence(object): self.http_client = hs.get_simple_http_client() self.store = hs.get_datastore() self.user_to_num_current_syncs = {} - self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users" + self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users" self.clock = hs.get_clock() active_presence = self.store.take_presence_startup_info() @@ -305,7 +305,7 @@ class SynchrotronServer(HomeServer): def replicate(self): http_client = self.get_simple_http_client() store = self.get_datastore() - replication_url = self.worker_config.replication_url + replication_url = self.config.worker_replication_url clock = self.get_clock() notifier = self.get_notifier() presence_handler = self.get_presence_handler() @@ -403,11 +403,8 @@ class SynchrotronServer(HomeServer): def build_typing_handler(self): return SynchrotronTyping(self) - def get_event_cache_size(self): - return self.worker_config.event_cache_size - -def start(worker_name, config_options): +def start(config_options): try: config = HomeServerConfig.load_config( "Synapse synchrotron", config_options @@ -416,9 +413,9 @@ def start(worker_name, config_options): sys.stderr.write("\n" + e.message + "\n") sys.exit(1) - worker_config = config.workers[worker_name] + assert config.worker_app == "synapse.app.synchrotron" - setup_logging(worker_config.log_config, worker_config.log_file) + setup_logging(config.worker_log_config, config.worker_log_file) database_engine = create_engine(config.database_config) @@ -426,21 +423,20 @@ def start(worker_name, config_options): config.server_name, db_config=config.database_config, config=config, - worker_config=worker_config, version_string=get_version_string("Synapse", synapse), database_engine=database_engine, application_service_handler=SynchrotronApplicationService(), ) ss.setup() - ss.start_listening(worker_config.listeners) + ss.start_listening(config.worker_listeners) def run(): with LoggingContext("run"): logger.info("Running") - change_resource_limit(worker_config.soft_file_limit) - if worker_config.gc_thresholds: - ss.set_threshold(worker_config.gc_thresholds) + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + ss.set_threshold(config.gc_thresholds) reactor.run() def start(): @@ -449,10 +445,10 @@ def start(worker_name, config_options): reactor.callWhenRunning(start) - if worker_config.daemonize: + if config.worker_daemonize: daemon = Daemonize( app="synapse-synchrotron", - pid=worker_config.pid_file, + pid=config.worker_pid_file, action=run, auto_close_fds=False, verbose=True, @@ -465,5 +461,4 @@ def start(worker_name, config_options): if __name__ == '__main__': with LoggingContext("main"): - worker_name = sys.argv[1] - start(worker_name, sys.argv[2:]) + start(sys.argv[1:]) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 503358e03e..904789d155 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -13,52 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections - from ._base import Config -from .server import read_gc_thresholds - - -Worker = collections.namedtuple("Worker", [ - "app", - "listeners", - "pid_file", - "daemonize", - "log_file", - "log_config", - "event_cache_size", - "soft_file_limit", - "gc_thresholds", - "replication_url", -]) - - -def read_worker_config(config): - return Worker( - app=config["app"], - listeners=config.get("listeners", []), - pid_file=config.get("pid_file"), - daemonize=config["daemonize"], - log_file=config.get("log_file"), - log_config=config.get("log_config"), - event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")), - soft_file_limit=config.get("soft_file_limit"), - gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")), - replication_url=config.get("replication_url"), - ) class WorkerConfig(Config): """The workers are processes run separately to the main synapse process. - Each worker has a name that identifies it within the config file. They have their own pid_file and listener configuration. They use the - replication_url to talk to the main synapse process. They have their - own cache size tuning, gc threshold tuning and open file limits.""" + replication_url to talk to the main synapse process.""" def read_config(self, config): - workers = config.get("workers", {}) - - self.workers = { - worker_name: read_worker_config(worker_config) - for worker_name, worker_config in workers.items() - } + self.worker_app = config.get("worker_app") + self.worker_listeners = config.get("worker_listeners") + self.worker_daemonize = config.get("worker_daemonize") + self.worker_pid_file = config.get("worker_pid_file") + self.worker_log_file = config.get("worker_log_file") + self.worker_log_config = config.get("worker_log_config") + self.worker_replication_url = config.get("worker_replication_url") diff --git a/synapse/server.py b/synapse/server.py index b3c31ece73..dd4b81c658 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -236,9 +236,6 @@ class HomeServer(object): def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) - def get_event_cache_size(self): - return self.config.event_cache_size - def _make_dependency_method(depname): def _get(hs): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2932880cc5..32c6677d47 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -166,7 +166,7 @@ class SQLBaseStore(object): self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, - max_entries=hs.get_event_cache_size()) + max_entries=hs.config.event_cache_size) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR From 8c75040c25495bf29f4c76ca0fcc032975210012 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 11:48:12 +0100 Subject: [PATCH 152/414] Fix setting gc thresholds in the workers --- synapse/app/pusher.py | 3 ++- synapse/app/synchrotron.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 9ac26d52c6..4f1d18ab5f 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -43,6 +43,7 @@ from daemonize import Daemonize import sys import logging +import gc logger = logging.getLogger("synapse.app.pusher") @@ -284,7 +285,7 @@ def start(config_options): logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: - ps.set_threshold(config.gc_thresholds) + gc.set_threshold(*config.gc_thresholds) reactor.run() def start(): diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 160db8637e..8cf5bbbb6d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -56,6 +56,7 @@ from daemonize import Daemonize import sys import logging import contextlib +import gc import ujson as json logger = logging.getLogger("synapse.app.synchrotron") @@ -436,7 +437,7 @@ def start(config_options): logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: - ss.set_threshold(config.gc_thresholds) + gc.set_threshold(*config.gc_thresholds) reactor.run() def start(): From ded01c3bf65fd6bb83c9d3546ea44859208e4578 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 13:49:16 +0100 Subject: [PATCH 153/414] Fix ``KeyError: 'msgtype'``. Use ``.get`` Fixes a key error where the mailer tried to get the ``msgtype`` of an event that was missing a ``msgtype``. ``` File "synapse/push/mailer.py", line 264, in get_notif_vars File "synapse/push/mailer.py", line 285, in get_message_vars File ".../frozendict/__init__.py", line 10, in __getitem__ return self.__dict[key] KeyError: 'msgtype' ``` --- synapse/push/mailer.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index e5c3929cd7..1028731bc9 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -273,16 +273,16 @@ class Mailer(object): sender_state_event = room_state[("m.room.member", event.sender)] sender_name = name_from_member_event(sender_state_event) - sender_avatar_url = None - if "avatar_url" in sender_state_event.content: - sender_avatar_url = sender_state_event.content["avatar_url"] + sender_avatar_url = sender_state_event.content.get("avatar_url") # 'hash' for deterministically picking default images: use # sender_hash % the number of default images to choose from sender_hash = string_ordinal_total(event.sender) + msgtype = event.content.get("msgtype") + ret = { - "msgtype": event.content["msgtype"], + "msgtype": msgtype, "is_historical": event.event_id != notif['event_id'], "id": event.event_id, "ts": event.origin_server_ts, @@ -291,9 +291,9 @@ class Mailer(object): "sender_hash": sender_hash, } - if event.content["msgtype"] == "m.text": + if msgtype == "m.text": self.add_text_message_vars(ret, event) - elif event.content["msgtype"] == "m.image": + elif msgtype == "m.image": self.add_image_message_vars(ret, event) if "body" in event.content: @@ -302,16 +302,17 @@ class Mailer(object): return ret def add_text_message_vars(self, messagevars, event): - if "format" in event.content: - msgformat = event.content["format"] - else: - msgformat = None + msgformat = event.content.get("format") + messagevars["format"] = msgformat - if msgformat == "org.matrix.custom.html": - messagevars["body_text_html"] = safe_markup(event.content["formatted_body"]) - else: - messagevars["body_text_html"] = safe_text(event.content["body"]) + formatted_body = event.content.get("formatted_body") + body = event.content.get("body") + + if msgformat == "org.matrix.custom.html" and formatted_body: + messagevars["body_text_html"] = safe_markup(formatted_body) + elif body: + messagevars["body_text_html"] = safe_text(body) return messagevars From 2884712ca733f45d32468ecf2ede7a1518e85be4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 14:47:33 +0100 Subject: [PATCH 154/414] Only re-sign our own events --- synapse/federation/federation_server.py | 15 +++++++++------ synapse/handlers/federation.py | 15 +++++++++------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index fe92457ba1..2a589524a4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -193,13 +193,16 @@ class FederationServer(FederationBase): ) for event in auth_chain: - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) ) - ) else: raise NotImplementedError("Specify an event") diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c2df43e2f6..6c0bc7eafa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1018,13 +1018,16 @@ class FederationHandler(BaseHandler): res = results.values() for event in res: - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) ) - ) defer.returnValue(res) else: From 3e41de05cc13220f5cd88ae78002adf782728322 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 15:11:22 +0100 Subject: [PATCH 155/414] Turn use_frozen_events off by default --- synapse/config/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index f370b22c32..7840dc3ad6 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -27,7 +27,7 @@ class ServerConfig(Config): self.daemonize = config.get("daemonize") self.print_pidfile = config.get("print_pidfile") self.user_agent_suffix = config.get("user_agent_suffix") - self.use_frozen_dicts = config.get("use_frozen_dicts", True) + self.use_frozen_dicts = config.get("use_frozen_dicts", False) self.public_baseurl = config.get("public_baseurl") self.secondary_directory_servers = config.get("secondary_directory_servers", []) From 0113ad36ee7bc315aa162c42277b90764825f219 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 15:13:13 +0100 Subject: [PATCH 156/414] Enable use_frozen_events in tests --- tests/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/utils.py b/tests/utils.py index e19ae581e0..6e41ae1ff6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -54,6 +54,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.trusted_third_party_id_servers = [] config.room_invite_state_types = [] + config.use_frozen_dicts = True config.database_config = {"name": "sqlite3"} if "clock" not in kargs: From 120c2387053bdc30824d6b15931532664f739192 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 16:10:37 +0100 Subject: [PATCH 157/414] Disable responding with canonical json for federation --- synapse/federation/transport/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 6fc3e2207c..8a1965f45a 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -37,7 +37,7 @@ class TransportLayerServer(JsonResource): self.hs = hs self.clock = hs.get_clock() - super(TransportLayerServer, self).__init__(hs) + super(TransportLayerServer, self).__init__(hs, canonical_json=False) self.authenticator = Authenticator(hs) self.ratelimiter = FederationRateLimiter( From 8f4a9bbc16e6b54f1ab110085e42884fd16abb6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2016 16:43:45 +0100 Subject: [PATCH 158/414] Linearize some federation endpoints based on (origin, room_id) --- synapse/federation/federation_server.py | 141 +++++++++++++----------- synapse/federation/transport/server.py | 2 +- 2 files changed, 77 insertions(+), 66 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2a589524a4..85f5e752fe 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -49,6 +49,7 @@ class FederationServer(FederationBase): super(FederationServer, self).__init__(hs) self._room_pdu_linearizer = Linearizer() + self._server_linearizer = Linearizer() def set_handler(self, handler): """Sets the handler that the replication layer will use to communicate @@ -89,11 +90,14 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_backfill_request(self, origin, room_id, versions, limit): - pdus = yield self.handler.on_backfill_request( - origin, room_id, versions, limit - ) + with (yield self._server_linearizer.queue((origin, room_id))): + pdus = yield self.handler.on_backfill_request( + origin, room_id, versions, limit + ) - defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict())) + res = self._transaction_from_pdus(pdus).get_dict() + + defer.returnValue((200, res)) @defer.inlineCallbacks @log_function @@ -184,27 +188,28 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_context_state_request(self, origin, room_id, event_id): - if event_id: - pdus = yield self.handler.get_state_for_pdu( - origin, room_id, event_id, - ) - auth_chain = yield self.store.get_auth_chain( - [pdu.event_id for pdu in pdus] - ) + with (yield self._server_linearizer.queue((origin, room_id))): + if event_id: + pdus = yield self.handler.get_state_for_pdu( + origin, room_id, event_id, + ) + auth_chain = yield self.store.get_auth_chain( + [pdu.event_id for pdu in pdus] + ) - for event in auth_chain: - # We sign these again because there was a bug where we - # incorrectly signed things the first time round - if self.hs.is_mine_id(event.event_id): - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] + for event in auth_chain: + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) ) - ) - else: - raise NotImplementedError("Specify an event") + else: + raise NotImplementedError("Specify an event") defer.returnValue((200, { "pdus": [pdu.get_pdu_json() for pdu in pdus], @@ -283,14 +288,16 @@ class FederationServer(FederationBase): @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): - time_now = self._clock.time_msec() - auth_pdus = yield self.handler.on_event_auth(event_id) - defer.returnValue((200, { - "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus], - })) + with (yield self._server_linearizer.queue((origin, room_id))): + time_now = self._clock.time_msec() + auth_pdus = yield self.handler.on_event_auth(event_id) + res = { + "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus], + } + defer.returnValue((200, res)) @defer.inlineCallbacks - def on_query_auth_request(self, origin, content, event_id): + def on_query_auth_request(self, origin, content, room_id, event_id): """ Content is a dict with keys:: auth_chain (list): A list of events that give the auth chain. @@ -309,32 +316,33 @@ class FederationServer(FederationBase): Returns: Deferred: Results in `dict` with the same format as `content` """ - auth_chain = [ - self.event_from_pdu_json(e) - for e in content["auth_chain"] - ] + with (yield self._server_linearizer.queue((origin, room_id))): + auth_chain = [ + self.event_from_pdu_json(e) + for e in content["auth_chain"] + ] - signed_auth = yield self._check_sigs_and_hash_and_fetch( - origin, auth_chain, outlier=True - ) + signed_auth = yield self._check_sigs_and_hash_and_fetch( + origin, auth_chain, outlier=True + ) - ret = yield self.handler.on_query_auth( - origin, - event_id, - signed_auth, - content.get("rejects", []), - content.get("missing", []), - ) + ret = yield self.handler.on_query_auth( + origin, + event_id, + signed_auth, + content.get("rejects", []), + content.get("missing", []), + ) - time_now = self._clock.time_msec() - send_content = { - "auth_chain": [ - e.get_pdu_json(time_now) - for e in ret["auth_chain"] - ], - "rejects": ret.get("rejects", []), - "missing": ret.get("missing", []), - } + time_now = self._clock.time_msec() + send_content = { + "auth_chain": [ + e.get_pdu_json(time_now) + for e in ret["auth_chain"] + ], + "rejects": ret.get("rejects", []), + "missing": ret.get("missing", []), + } defer.returnValue( (200, send_content) @@ -386,21 +394,24 @@ class FederationServer(FederationBase): @log_function def on_get_missing_events(self, origin, room_id, earliest_events, latest_events, limit, min_depth): - logger.info( - "on_get_missing_events: earliest_events: %r, latest_events: %r," - " limit: %d, min_depth: %d", - earliest_events, latest_events, limit, min_depth - ) - missing_events = yield self.handler.on_get_missing_events( - origin, room_id, earliest_events, latest_events, limit, min_depth - ) + with (yield self._server_linearizer.queue((origin, room_id))): + logger.info( + "on_get_missing_events: earliest_events: %r, latest_events: %r," + " limit: %d, min_depth: %d", + earliest_events, latest_events, limit, min_depth + ) + missing_events = yield self.handler.on_get_missing_events( + origin, room_id, earliest_events, latest_events, limit, min_depth + ) - if len(missing_events) < 5: - logger.info("Returning %d events: %r", len(missing_events), missing_events) - else: - logger.info("Returning %d events", len(missing_events)) + if len(missing_events) < 5: + logger.info( + "Returning %d events: %r", len(missing_events), missing_events + ) + else: + logger.info("Returning %d events", len(missing_events)) - time_now = self._clock.time_msec() + time_now = self._clock.time_msec() defer.returnValue({ "events": [ev.get_pdu_json(time_now) for ev in missing_events], diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 8a1965f45a..26fa88ae84 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -388,7 +388,7 @@ class FederationQueryAuthServlet(BaseFederationServlet): @defer.inlineCallbacks def on_POST(self, origin, content, query, context, event_id): new_content = yield self.handler.on_query_auth_request( - origin, content, event_id + origin, content, context, event_id ) defer.returnValue((200, new_content)) From 9f1800fba852314332d7e682484e456d28838619 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:14:16 +0100 Subject: [PATCH 159/414] Remove registered_users from the distributor. The only place that was observed was to set the profile. I've made it so that the profile is set within store.register in the same transaction that creates the user. This required some slight changes to the registration code for upgrading guest users, since it previously relied on the distributor swallowing errors if the profile already existed. --- synapse/handlers/profile.py | 7 ------- synapse/handlers/register.py | 23 ++++++++++------------- synapse/storage/profile.py | 6 ------ synapse/storage/registration.py | 17 ++++++++++++++--- synapse/util/distributor.py | 4 ---- 5 files changed, 24 insertions(+), 33 deletions(-) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index e37409170d..711a6a567f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler): "profile", self.on_profile_query ) - distributor = hs.get_distributor() - - distributor.observe("registered_user", self.registered_user) - - def registered_user(self, user): - return self.store.create_profile(user.localpart) - @defer.inlineCallbacks def get_displayname(self, target_user): if self.hs.is_mine(target_user): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e0aaefe7be..4fb12915dc 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -23,7 +23,6 @@ from synapse.api.errors import ( from ._base import BaseHandler from synapse.util.async import run_on_reactor from synapse.http.client import CaptchaServerHttpClient -from synapse.util.distributor import registered_user import logging import urllib @@ -37,8 +36,6 @@ class RegistrationHandler(BaseHandler): super(RegistrationHandler, self).__init__(hs) self.auth = hs.get_auth() - self.distributor = hs.get_distributor() - self.distributor.declare("registered_user") self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -140,9 +137,10 @@ class RegistrationHandler(BaseHandler): password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, + create_profile_with_localpart=( + None if was_guest else user.localpart + ), ) - - yield registered_user(self.distributor, user) else: # autogen a sequential user ID attempts = 0 @@ -160,7 +158,8 @@ class RegistrationHandler(BaseHandler): user_id=user_id, token=token, password_hash=password_hash, - make_guest=make_guest + make_guest=make_guest, + create_profile_with_localpart=user.localpart, ) except SynapseError: # if user id is taken, just generate another @@ -168,7 +167,6 @@ class RegistrationHandler(BaseHandler): user_id = None token = None attempts += 1 - yield registered_user(self.distributor, user) # We used to generate default identicons here, but nowadays # we want clients to generate their own as part of their branding @@ -201,8 +199,8 @@ class RegistrationHandler(BaseHandler): token=token, password_hash="", appservice_id=service_id, + create_profile_with_localpart=user.localpart, ) - yield registered_user(self.distributor, user) defer.returnValue((user_id, token)) @defer.inlineCallbacks @@ -248,9 +246,9 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=None + password_hash=None, + create_profile_with_localpart=user.localpart, ) - yield registered_user(self.distributor, user) except Exception as e: yield self.store.add_access_token_to_user(user_id, token) # Ignore Registration errors @@ -395,10 +393,9 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=None + password_hash=None, + create_profile_with_localpart=user.localpart, ) - - yield registered_user(self.distributor, user) else: yield self.store.user_delete_access_tokens(user_id=user_id) yield self.store.add_access_token_to_user(user_id=user_id, token=token) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 26a40905ae..c3c3f9ffd8 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -17,12 +17,6 @@ from ._base import SQLBaseStore class ProfileStore(SQLBaseStore): - def create_profile(self, user_localpart): - return self._simple_insert( - table="profiles", - values={"user_id": user_localpart}, - desc="create_profile", - ) def get_profile_displayname(self, user_localpart): return self._simple_select_one_onecol( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index bda84a744a..3de9e0f709 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -76,7 +76,8 @@ class RegistrationStore(SQLBaseStore): @defer.inlineCallbacks def register(self, user_id, token, password_hash, - was_guest=False, make_guest=False, appservice_id=None): + was_guest=False, make_guest=False, appservice_id=None, + create_profile_with_localpart=None): """Attempts to register an account. Args: @@ -88,6 +89,8 @@ class RegistrationStore(SQLBaseStore): make_guest (boolean): True if the the new user should be guest, false to add a regular user account. appservice_id (str): The ID of the appservice registering the user. + create_profile_with_localpart (str): Optionally create a profile for + the given localpart. Raises: StoreError if the user_id could not be registered. """ @@ -99,7 +102,8 @@ class RegistrationStore(SQLBaseStore): password_hash, was_guest, make_guest, - appservice_id + appservice_id, + create_profile_with_localpart, ) self.get_user_by_id.invalidate((user_id,)) self.is_guest.invalidate((user_id,)) @@ -112,7 +116,8 @@ class RegistrationStore(SQLBaseStore): password_hash, was_guest, make_guest, - appservice_id + appservice_id, + create_profile_with_localpart, ): now = int(self.clock.time()) @@ -157,6 +162,12 @@ class RegistrationStore(SQLBaseStore): (next_id, user_id, token,) ) + if create_profile_with_localpart: + txn.execute( + "INSERT INTO profiles(user_id) VALUES (?)", + (create_profile_with_localpart,) + ) + @cached() def get_user_by_id(self, user_id): return self._simple_select_one( diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index d7cccc06b1..e68f94ce77 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -27,10 +27,6 @@ import logging logger = logging.getLogger(__name__) -def registered_user(distributor, user): - return distributor.fire("registered_user", user) - - def user_left_room(distributor, user, room_id): return preserve_context_over_fn( distributor.fire, From 0c13d45522c5c8c0b68322498a220969eb894ad5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:18:53 +0100 Subject: [PATCH 160/414] Add a comment on why we don't create a profile for upgrading users --- synapse/handlers/register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 4fb12915dc..0b7517221d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -138,6 +138,7 @@ class RegistrationHandler(BaseHandler): was_guest=was_guest, make_guest=make_guest, create_profile_with_localpart=( + # If the user was a guest then they already have a profile None if was_guest else user.localpart ), ) From 41e4b2efeafa6e2f4cbfef4f30620b9f58b020a4 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 17 Jun 2016 19:20:47 +0100 Subject: [PATCH 161/414] Add the create_profile method back since the tests use it --- synapse/storage/profile.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index c3c3f9ffd8..26a40905ae 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -17,6 +17,12 @@ from ._base import SQLBaseStore class ProfileStore(SQLBaseStore): + def create_profile(self, user_localpart): + return self._simple_insert( + table="profiles", + values={"user_id": user_localpart}, + desc="create_profile", + ) def get_profile_displayname(self, user_localpart): return self._simple_select_one_onecol( From 4d362a61ea3173f1be0ac58147db29acfbe1b4c3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jun 2016 14:17:15 +0100 Subject: [PATCH 162/414] Bump version and changelog --- CHANGES.rst | 15 +++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1a47aae857..ecaaa189d0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,18 @@ +Changes in synapse v0.16.1 (2016-06-20) +======================================= + +Bug fixes: + +* Fix assorted bugs in ``/preview_url`` (PR #872) +* Fix TypeError when setting unicode passwords (PR #873) + + +Performance improvements: + +* Turn ``use_frozen_events`` off by default (PR #877) +* Disable responding with canonical json for federation (PR #878) + + Changes in synapse v0.16.1-rc1 (2016-06-15) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index faaa86d972..3cd79b1247 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.1-rc1" +__version__ = "0.16.1" From d5fb561709cf2181cd5b8fffd2cf70a3fb52e5ab Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 20 Jun 2016 17:53:38 +0100 Subject: [PATCH 163/414] Optionally make committing to postgres asynchronous. Useful when running tests when you don't care whether the server will lose data that it claims that it has committed. --- synapse/storage/engines/__init__.py | 2 +- synapse/storage/engines/postgres.py | 13 ++++++++++++- synapse/storage/engines/sqlite3.py | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index 7bb5de1fe7..338b495611 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -32,7 +32,7 @@ def create_engine(database_config): if engine_class: module = importlib.import_module(name) - return engine_class(module) + return engine_class(module, database_config) raise RuntimeError( "Unsupported database engine '%s'" % (name,) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index c2290943b4..a6ae79dfad 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -19,9 +19,10 @@ from ._base import IncorrectDatabaseSetup class PostgresEngine(object): single_threaded = False - def __init__(self, database_module): + def __init__(self, database_module, database_config): self.module = database_module self.module.extensions.register_type(self.module.extensions.UNICODE) + self.synchronous_commit = database_config.get("synchronous_commit", True) def check_database(self, txn): txn.execute("SHOW SERVER_ENCODING") @@ -40,9 +41,19 @@ class PostgresEngine(object): db_conn.set_isolation_level( self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ ) + # Asynchronous commit, don't wait for the server to call fsync before + # ending the transaction. + # https://www.postgresql.org/docs/current/static/wal-async-commit.html + if not self.synchronous_commit: + cursor = db_conn.cursor() + cursor.execute("SET synchronous_commit TO OFF") + cursor.close() def is_deadlock(self, error): if isinstance(error, self.module.DatabaseError): + # https://www.postgresql.org/docs/current/static/errcodes-appendix.html + # "40001" serialization_failure + # "40P01" deadlock_detected return error.pgcode in ["40001", "40P01"] return False diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py index 14203aa500..755c9a1f07 100644 --- a/synapse/storage/engines/sqlite3.py +++ b/synapse/storage/engines/sqlite3.py @@ -21,7 +21,7 @@ import struct class Sqlite3Engine(object): single_threaded = True - def __init__(self, database_module): + def __init__(self, database_module, database_config): self.module = database_module def check_database(self, txn): From 6b40e4f52ad4bc0cbab4a0178d3f033d049d84fa Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 21 Jun 2016 11:37:56 +0100 Subject: [PATCH 164/414] Fix substitution failure in mail template --- res/templates/notif_mail.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html index 8aee68b591..535bea764d 100644 --- a/res/templates/notif_mail.html +++ b/res/templates/notif_mail.html @@ -36,7 +36,7 @@
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because an event was received at {{ reason.received_at|format_ts("%c") }} - which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} (delay_before_mail_ms) mins ago, + which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago, {% if reason.last_sent_ts %} and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }}, which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago. From 13e334506cf9093d2872ede95f1527c0c42d71fd Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 21 Jun 2016 11:47:39 +0100 Subject: [PATCH 165/414] Remove the legacy v0 content upload API. The existing content can still be downloaded. The last upload to the matrix.org server was in January 2015, so it is probably safe to remove the upload API. --- synapse/app/homeserver.py | 3 +- synapse/config/server.py | 20 ---- synapse/rest/media/v0/content_repository.py | 112 +------------------- 3 files changed, 3 insertions(+), 132 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 40ffd9bf0d..9c2dd32953 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -147,7 +147,7 @@ class SynapseHomeServer(HomeServer): MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo, CONTENT_REPO_PREFIX: ContentRepoResource( - self, self.config.uploads_path, self.auth, self.content_addr + self, self.config.uploads_path ), }) @@ -301,7 +301,6 @@ def setup(config_options): db_config=config.database_config, tls_server_context_factory=tls_server_context_factory, config=config, - content_addr=config.content_addr, version_string=version_string, database_engine=database_engine, ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 7840dc3ad6..d7e6f20518 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -107,26 +107,6 @@ class ServerConfig(Config): ] }) - # Attempt to guess the content_addr for the v0 content repostitory - content_addr = config.get("content_addr") - if not content_addr: - for listener in self.listeners: - if listener["type"] == "http" and not listener.get("tls", False): - unsecure_port = listener["port"] - break - else: - raise RuntimeError("Could not determine 'content_addr'") - - host = self.server_name - if ':' not in host: - host = "%s:%d" % (host, unsecure_port) - else: - host = host.split(':')[0] - host = "%s:%d" % (host, unsecure_port) - content_addr = "http://%s" % (host,) - - self.content_addr = content_addr - def default_config(self, server_name, **kwargs): if ":" in server_name: bind_port = int(server_name.split(":")[1]) diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index d9fc045fc6..956bd5da75 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -15,14 +15,12 @@ from synapse.http.server import respond_with_json_bytes, finish_request -from synapse.util.stringutils import random_string from synapse.api.errors import ( - cs_exception, SynapseError, CodeMessageException, Codes, cs_error + Codes, cs_error ) from twisted.protocols.basic import FileSender from twisted.web import server, resource -from twisted.internet import defer import base64 import simplejson as json @@ -50,64 +48,10 @@ class ContentRepoResource(resource.Resource): """ isLeaf = True - def __init__(self, hs, directory, auth, external_addr): + def __init__(self, hs, directory): resource.Resource.__init__(self) self.hs = hs self.directory = directory - self.auth = auth - self.external_addr = external_addr.rstrip('/') - self.max_upload_size = hs.config.max_upload_size - - if not os.path.isdir(self.directory): - os.mkdir(self.directory) - logger.info("ContentRepoResource : Created %s directory.", - self.directory) - - @defer.inlineCallbacks - def map_request_to_name(self, request): - # auth the user - requester = yield self.auth.get_user_by_req(request) - - # namespace all file uploads on the user - prefix = base64.urlsafe_b64encode( - requester.user.to_string() - ).replace('=', '') - - # use a random string for the main portion - main_part = random_string(24) - - # suffix with a file extension if we can make one. This is nice to - # provide a hint to clients on the file information. We will also reuse - # this info to spit back the content type to the client. - suffix = "" - if request.requestHeaders.hasHeader("Content-Type"): - content_type = request.requestHeaders.getRawHeaders( - "Content-Type")[0] - suffix = "." + base64.urlsafe_b64encode(content_type) - if (content_type.split("/")[0].lower() in - ["image", "video", "audio"]): - file_ext = content_type.split("/")[-1] - # be a little paranoid and only allow a-z - file_ext = re.sub("[^a-z]", "", file_ext) - suffix += "." + file_ext - - file_name = prefix + main_part + suffix - file_path = os.path.join(self.directory, file_name) - logger.info("User %s is uploading a file to path %s", - request.user.user_id.to_string(), - file_path) - - # keep trying to make a non-clashing file, with a sensible max attempts - attempts = 0 - while os.path.exists(file_path): - main_part = random_string(24) - file_name = prefix + main_part + suffix - file_path = os.path.join(self.directory, file_name) - attempts += 1 - if attempts > 25: # really? Really? - raise SynapseError(500, "Unable to create file.") - - defer.returnValue(file_path) def render_GET(self, request): # no auth here on purpose, to allow anyone to view, even across home @@ -155,58 +99,6 @@ class ContentRepoResource(resource.Resource): return server.NOT_DONE_YET - def render_POST(self, request): - self._async_render(request) - return server.NOT_DONE_YET - def render_OPTIONS(self, request): respond_with_json_bytes(request, 200, {}, send_cors=True) return server.NOT_DONE_YET - - @defer.inlineCallbacks - def _async_render(self, request): - try: - # TODO: The checks here are a bit late. The content will have - # already been uploaded to a tmp file at this point - content_length = request.getHeader("Content-Length") - if content_length is None: - raise SynapseError( - msg="Request must specify a Content-Length", code=400 - ) - if int(content_length) > self.max_upload_size: - raise SynapseError( - msg="Upload request body is too large", - code=413, - ) - - fname = yield self.map_request_to_name(request) - - # TODO I have a suspicious feeling this is just going to block - with open(fname, "wb") as f: - f.write(request.content.read()) - - # FIXME (erikj): These should use constants. - file_name = os.path.basename(fname) - # FIXME: we can't assume what the repo's public mounted path is - # ...plus self-signed SSL won't work to remote clients anyway - # ...and we can't assume that it's SSL anyway, as we might want to - # serve it via the non-SSL listener... - url = "%s/_matrix/content/%s" % ( - self.external_addr, file_name - ) - - respond_with_json_bytes(request, 200, - json.dumps({"content_token": url}), - send_cors=True) - - except CodeMessageException as e: - logger.exception(e) - respond_with_json_bytes(request, e.code, - json.dumps(cs_exception(e))) - except Exception as e: - logger.error("Failed to store file: %s" % e) - respond_with_json_bytes( - request, - 500, - json.dumps({"error": "Internal server error"}), - send_cors=True) From 5cc7564c5c56880ff98af934b9169eac4fe895d3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 21 Jun 2016 16:38:05 +0100 Subject: [PATCH 166/414] Optionally start or stop workers in synctl. Optionally start or stop an individual worker by passing -w with the path to the worker config. Optionally start or stop every worker and the main synapse by passing -a with a path to a directory containing worker configs. The "-w" is intended to be used to bounce individual workers proceses. THe "-a" is intended for when you want to restart all the workers simultaneuously, for example when performing database upgrades. --- synapse/app/synctl.py | 186 +++++++++++++++++++++++++++++++++++------- 1 file changed, 155 insertions(+), 31 deletions(-) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index 39f4bf6e53..bb41962d47 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -14,11 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys +import argparse +import collections +import glob import os import os.path -import subprocess import signal +import subprocess +import sys import yaml SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"] @@ -28,60 +31,181 @@ RED = "\x1b[1;31m" NORMAL = "\x1b[m" +def write(message, colour=NORMAL, stream=sys.stdout): + if colour == NORMAL: + stream.write(message + "\n") + else: + stream.write(colour + message + NORMAL + "\n") + + def start(configfile): - print ("Starting ...") + write("Starting ...") args = SYNAPSE args.extend(["--daemonize", "-c", configfile]) try: subprocess.check_call(args) - print (GREEN + "started" + NORMAL) + write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN) except subprocess.CalledProcessError as e: - print ( - RED + - "error starting (exit code: %d); see above for logs" % e.returncode + - NORMAL + write( + "error starting (exit code: %d); see above for logs" % e.returncode, + colour=RED, ) -def stop(pidfile): +def start_worker(app, configfile, worker_configfile): + args = [ + "python", "-B", + "-m", app, + "-c", configfile, + "-c", worker_configfile + ] + + try: + subprocess.check_call(args) + write("started %s(%r)" % (app, worker_configfile), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting %s(%r) (exit code: %d); see above for logs" % ( + app, worker_configfile, e.returncode, + ), + colour=RED, + ) + + +def stop(pidfile, app): if os.path.exists(pidfile): pid = int(open(pidfile).read()) os.kill(pid, signal.SIGTERM) - print (GREEN + "stopped" + NORMAL) + write("stopped %s" % (app,), colour=GREEN) + + +Worker = collections.namedtuple("Worker", [ + "app", "configfile", "pidfile", "cache_factor" +]) def main(): - configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml" - if not os.path.exists(configfile): - sys.stderr.write( - "No config file found\n" - "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), configfile - ) + parser = argparse.ArgumentParser() + + parser.add_argument( + "action", + choices=["start", "stop", "restart"], + help="whether to start, stop or restart the synapse", + ) + parser.add_argument( + "configfile", + nargs="?", + default="homeserver.yaml", + help="the homeserver config file, defaults to homserver.yaml", + ) + parser.add_argument( + "-w", "--worker", + metavar="WORKERCONFIG", + help="start or stop a single worker", + ) + parser.add_argument( + "-a", "--all-processes", + metavar="WORKERCONFIGDIR", + help="start or stop all the workers in the given directory" + " and the main synapse process", + ) + + options = parser.parse_args() + + if options.worker and options.all_processes: + write( + 'Cannot use "--worker" with "--all-processes"', + stream=sys.stderr ) sys.exit(1) - config = yaml.load(open(configfile)) + configfile = options.configfile + + if not os.path.exists(configfile): + write( + "No config file found\n" + "To generate a config file, run '%s -c %s --generate-config" + " --server-name='\n" % ( + " ".join(SYNAPSE), options.configfile + ), + stream=sys.stderr, + ) + sys.exit(1) + + with open(configfile) as stream: + config = yaml.load(stream) + pidfile = config["pid_file"] - cache_factor = config.get("synctl_cache_factor", None) + cache_factor = config.get("synctl_cache_factor") + start_stop_synapse = True if cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - action = sys.argv[1] if sys.argv[1:] else "usage" - if action == "start": - start(configfile) - elif action == "stop": - stop(pidfile) - elif action == "restart": - stop(pidfile) - start(configfile) - else: - sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],)) - sys.exit(1) + worker_configfiles = [] + if options.worker: + start_stop_synapse = False + worker_configfile = options.worker + if not os.path.exists(worker_configfile): + write( + "No worker config found at %r" % (worker_configfile,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.append(worker_configfile) + + if options.all_processes: + worker_configdir = options.all_processes + if not os.path.isdir(worker_configdir): + write( + "No worker config directory found at %r" % (worker_configdir,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.extend(sorted(glob.glob( + os.path.join(worker_configdir, "*.yaml") + ))) + + workers = [] + for worker_configfile in worker_configfiles: + with open(worker_configfile) as stream: + worker_config = yaml.load(stream) + worker_app = worker_config["worker_app"] + worker_pidfile = worker_config["worker_pid_file"] + worker_daemonize = worker_config["worker_daemonize"] + assert worker_daemonize # TODO print something more user friendly + worker_cache_factor = worker_config.get("synctl_cache_factor") + workers.append(Worker( + worker_app, worker_configfile, worker_pidfile, worker_cache_factor, + )) + + action = options.action + + if action == "stop" or action == "restart": + for worker in workers: + stop(worker.pidfile, worker.app) + + if start_stop_synapse: + stop(pidfile, "synapse.app.homeserver") + + # TODO: Wait for synapse to actually shutdown before starting it again + + if action == "start" or action == "restart": + if start_stop_synapse: + start(configfile) + + for worker in workers: + if worker.cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + + start_worker(worker.app, configfile, worker.configfile) + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + else: + os.environ.pop("SYNAPSE_CACHE_FACTOR", None) if __name__ == "__main__": From 0a32208e5dde4980a5962f17e9b27f2e28e1f3f1 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Mon, 6 Jun 2016 02:05:57 +0200 Subject: [PATCH 167/414] Rework ldap integration with ldap3 Use the pure-python ldap3 library, which eliminates the need for a system dependency. Offer both a `search` and `simple_bind` mode, for more sophisticated ldap scenarios. - `search` tries to find a matching DN within the `user_base` while employing the `user_filter`, then tries the bind when a single matching DN was found. - `simple_bind` tries the bind against a specific DN by combining the localpart and `user_base` Offer support for STARTTLS on a plain connection. The configuration was changed to reflect these new possibilities. Signed-off-by: Martin Weinelt --- synapse/config/ldap.py | 102 +++++++++++----- synapse/handlers/auth.py | 211 +++++++++++++++++++++++++++------ synapse/python_dependencies.py | 3 + tests/utils.py | 1 + 4 files changed, 253 insertions(+), 64 deletions(-) diff --git a/synapse/config/ldap.py b/synapse/config/ldap.py index 9c14593a99..d83c2230be 100644 --- a/synapse/config/ldap.py +++ b/synapse/config/ldap.py @@ -13,40 +13,88 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ._base import Config +from ._base import Config, ConfigError + + +MISSING_LDAP3 = ( + "Missing ldap3 library. This is required for LDAP Authentication." +) + + +class LDAPMode(object): + SIMPLE = "simple", + SEARCH = "search", + + LIST = (SIMPLE, SEARCH) class LDAPConfig(Config): def read_config(self, config): - ldap_config = config.get("ldap_config", None) - if ldap_config: - self.ldap_enabled = ldap_config.get("enabled", False) - self.ldap_server = ldap_config["server"] - self.ldap_port = ldap_config["port"] - self.ldap_tls = ldap_config.get("tls", False) - self.ldap_search_base = ldap_config["search_base"] - self.ldap_search_property = ldap_config["search_property"] - self.ldap_email_property = ldap_config["email_property"] - self.ldap_full_name_property = ldap_config["full_name_property"] - else: - self.ldap_enabled = False - self.ldap_server = None - self.ldap_port = None - self.ldap_tls = False - self.ldap_search_base = None - self.ldap_search_property = None - self.ldap_email_property = None - self.ldap_full_name_property = None + ldap_config = config.get("ldap_config", {}) + + self.ldap_enabled = ldap_config.get("enabled", False) + + if self.ldap_enabled: + # verify dependencies are available + try: + import ldap3 + ldap3 # to stop unused lint + except ImportError: + raise ConfigError(MISSING_LDAP3) + + self.ldap_mode = LDAPMode.SIMPLE + + # verify config sanity + self.require_keys(ldap_config, [ + "uri", + "base", + "attributes", + ]) + + self.ldap_uri = ldap_config["uri"] + self.ldap_start_tls = ldap_config.get("start_tls", False) + self.ldap_base = ldap_config["base"] + self.ldap_attributes = ldap_config["attributes"] + + if "bind_dn" in ldap_config: + self.ldap_mode = LDAPMode.SEARCH + self.require_keys(ldap_config, [ + "bind_dn", + "bind_password", + ]) + + self.ldap_bind_dn = ldap_config["bind_dn"] + self.ldap_bind_password = ldap_config["bind_password"] + self.ldap_filter = ldap_config.get("filter", None) + + # verify attribute lookup + self.require_keys(ldap_config['attributes'], [ + "uid", + "name", + "mail", + ]) + + def require_keys(self, config, required): + missing = [key for key in required if key not in config] + if missing: + raise ConfigError( + "LDAP enabled but missing required config values: {}".format( + ", ".join(missing) + ) + ) def default_config(self, **kwargs): return """\ # ldap_config: # enabled: true - # server: "ldap://localhost" - # port: 389 - # tls: false - # search_base: "ou=Users,dc=example,dc=com" - # search_property: "cn" - # email_property: "email" - # full_name_property: "givenName" + # uri: "ldap://ldap.example.com:389" + # start_tls: true + # base: "ou=users,dc=example,dc=com" + # attributes: + # uid: "cn" + # mail: "email" + # name: "givenName" + # #bind_dn: + # #bind_password: + # #filter: "(objectClass=posixAccount)" """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index b38f81e999..968095c141 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -20,6 +20,7 @@ from synapse.api.constants import LoginType from synapse.types import UserID from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError from synapse.util.async import run_on_reactor +from synapse.config.ldap import LDAPMode from twisted.web.client import PartialDownloadError @@ -28,6 +29,12 @@ import bcrypt import pymacaroons import simplejson +try: + import ldap3 +except ImportError: + ldap3 = None + pass + import synapse.util.stringutils as stringutils @@ -50,17 +57,20 @@ class AuthHandler(BaseHandler): self.INVALID_TOKEN_HTTP_STATUS = 401 self.ldap_enabled = hs.config.ldap_enabled - self.ldap_server = hs.config.ldap_server - self.ldap_port = hs.config.ldap_port - self.ldap_tls = hs.config.ldap_tls - self.ldap_search_base = hs.config.ldap_search_base - self.ldap_search_property = hs.config.ldap_search_property - self.ldap_email_property = hs.config.ldap_email_property - self.ldap_full_name_property = hs.config.ldap_full_name_property - - if self.ldap_enabled is True: - import ldap - logger.info("Import ldap version: %s", ldap.__version__) + if self.ldap_enabled: + if not ldap3: + raise RuntimeError( + 'Missing ldap3 library. This is required for LDAP Authentication.' + ) + self.ldap_mode = hs.config.ldap_mode + self.ldap_uri = hs.config.ldap_uri + self.ldap_start_tls = hs.config.ldap_start_tls + self.ldap_base = hs.config.ldap_base + self.ldap_filter = hs.config.ldap_filter + self.ldap_attributes = hs.config.ldap_attributes + if self.ldap_mode == LDAPMode.SEARCH: + self.ldap_bind_dn = hs.config.ldap_bind_dn + self.ldap_bind_password = hs.config.ldap_bind_password self.hs = hs # FIXME better possibility to access registrationHandler later? @@ -452,40 +462,167 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def _check_ldap_password(self, user_id, password): - if not self.ldap_enabled: - logger.debug("LDAP not configured") + """ Attempt to authenticate a user against an LDAP Server + and register an account if none exists. + + Returns: + True if authentication against LDAP was successful + """ + + if not ldap3 or not self.ldap_enabled: defer.returnValue(False) - import ldap + if self.ldap_mode not in LDAPMode.LIST: + raise RuntimeError( + 'Invalid ldap mode specified: {mode}'.format( + mode=self.ldap_mode + ) + ) - logger.info("Authenticating %s with LDAP" % user_id) try: - ldap_url = "%s:%s" % (self.ldap_server, self.ldap_port) - logger.debug("Connecting LDAP server at %s" % ldap_url) - l = ldap.initialize(ldap_url) - if self.ldap_tls: - logger.debug("Initiating TLS") - self._connection.start_tls_s() + server = ldap3.Server(self.ldap_uri) + logger.debug( + "Attempting ldap connection with %s", + self.ldap_uri + ) - local_name = UserID.from_string(user_id).localpart - - dn = "%s=%s, %s" % ( - self.ldap_search_property, - local_name, - self.ldap_search_base) - logger.debug("DN for LDAP authentication: %s" % dn) - - l.simple_bind_s(dn.encode('utf-8'), password.encode('utf-8')) - - if not (yield self.does_user_exist(user_id)): - handler = self.hs.get_handlers().registration_handler - user_id, access_token = ( - yield handler.register(localpart=local_name) + localpart = UserID.from_string(user_id).localpart + if self.ldap_mode == LDAPMode.SIMPLE: + # bind with the the local users ldap credentials + bind_dn = "{prop}={value},{base}".format( + prop=self.ldap_attributes['uid'], + value=localpart, + base=self.ldap_base + ) + conn = ldap3.Connection(server, bind_dn, password) + logger.debug( + "Established ldap connection in simple mode: %s", + conn ) + if self.ldap_start_tls: + conn.start_tls() + logger.debug( + "Upgraded ldap connection in simple mode through StartTLS: %s", + conn + ) + + conn.bind() + + elif self.ldap_mode == LDAPMode.SEARCH: + # connect with preconfigured credentials and search for local user + conn = ldap3.Connection( + server, + self.ldap_bind_dn, + self.ldap_bind_password + ) + logger.debug( + "Established ldap connection in search mode: %s", + conn + ) + + if self.ldap_start_tls: + conn.start_tls() + logger.debug( + "Upgraded ldap connection in search mode through StartTLS: %s", + conn + ) + + conn.bind() + + # find matching dn + query = "({prop}={value})".format( + prop=self.ldap_attributes['uid'], + value=localpart + ) + if self.ldap_filter: + query = "(&{query}{filter})".format( + query=query, + filter=self.ldap_filter + ) + logger.debug("ldap search filter: %s", query) + result = conn.search(self.ldap_base, query) + + if result and len(conn.response) == 1: + # found exactly one result + user_dn = conn.response[0]['dn'] + logger.debug('ldap search found dn: %s', user_dn) + + # unbind and reconnect, rebind with found dn + conn.unbind() + conn = ldap3.Connection( + server, + user_dn, + password, + auto_bind=True + ) + else: + # found 0 or > 1 results, abort! + logger.warn( + "ldap search returned unexpected (%d!=1) amount of results", + len(conn.response) + ) + defer.returnValue(False) + + logger.info( + "User authenticated against ldap server: %s", + conn + ) + + # check for existing account, if none exists, create one + if not (yield self.does_user_exist(user_id)): + # query user metadata for account creation + query = "({prop}={value})".format( + prop=self.ldap_attributes['uid'], + value=localpart + ) + + if self.ldap_mode == LDAPMode.SEARCH and self.ldap_filter: + query = "(&{filter}{user_filter})".format( + filter=query, + user_filter=self.ldap_filter + ) + logger.debug("ldap registration filter: %s", query) + + result = conn.search( + search_base=self.ldap_base, + search_filter=query, + attributes=[ + self.ldap_attributes['name'], + self.ldap_attributes['mail'] + ] + ) + + if len(conn.response) == 1: + attrs = conn.response[0]['attributes'] + mail = attrs[self.ldap_attributes['mail']][0] + name = attrs[self.ldap_attributes['name']][0] + + # create account + registration_handler = self.hs.get_handlers().registration_handler + user_id, access_token = ( + yield registration_handler.register(localpart=localpart) + ) + + # TODO: bind email, set displayname with data from ldap directory + + logger.info( + "ldap registration successful: %d: %s (%s, %)", + user_id, + localpart, + name, + mail + ) + else: + logger.warn( + "ldap registration failed: unexpected (%d!=1) amount of results", + len(result) + ) + defer.returnValue(False) + defer.returnValue(True) - except ldap.LDAPError, e: - logger.warn("LDAP error: %s", e) + except ldap3.core.exceptions.LDAPException as e: + logger.warn("Error during ldap authentication: %s", e) defer.returnValue(False) @defer.inlineCallbacks diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e0a7a19777..e024cec0a2 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -48,6 +48,9 @@ CONDITIONAL_REQUIREMENTS = { "Jinja2>=2.8": ["Jinja2>=2.8"], "bleach>=1.4.2": ["bleach>=1.4.2"], }, + "ldap": { + "ldap3>=1.0": ["ldap3>=1.0"], + }, } diff --git a/tests/utils.py b/tests/utils.py index 6e41ae1ff6..ed547bc39b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -56,6 +56,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs): config.use_frozen_dicts = True config.database_config = {"name": "sqlite3"} + config.ldap_enabled = False if "clock" not in kargs: kargs["clock"] = MockClock() From 3a4120e49a15f27368a231b32245e32a4ccadb06 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 22 Jun 2016 17:47:18 +0100 Subject: [PATCH 168/414] Put most recent 20 messages in notif Fixes https://github.com/vector-im/vector-web/issues/1648 --- synapse/storage/event_push_actions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 940e11d7a2..5aaaf4b19d 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -152,7 +152,7 @@ class EventPushActionsStore(SQLBaseStore): if max_stream_ordering is not None: sql += " AND ep.stream_ordering <= ?" args.append(max_stream_ordering) - sql += " ORDER BY ep.stream_ordering ASC LIMIT ?" + sql += " ORDER BY ep.stream_ordering DESC LIMIT ?" args.append(limit) txn.execute(sql, args) return txn.fetchall() @@ -176,7 +176,8 @@ class EventPushActionsStore(SQLBaseStore): if max_stream_ordering is not None: sql += " AND ep.stream_ordering <= ?" args.append(max_stream_ordering) - sql += " ORDER BY ep.stream_ordering ASC" + sql += " ORDER BY ep.stream_ordering DESC LIMIT ?" + args.append(limit) txn.execute(sql, args) return txn.fetchall() no_read_receipt = yield self.runInteraction( @@ -191,7 +192,7 @@ class EventPushActionsStore(SQLBaseStore): "actions": json.loads(row[3]), "received_ts": row[4], } for row in after_read_receipt + no_read_receipt - ]) + ][0:limit]) @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): From f73fdb04a6cc361e9396c9b22f81544ecfb895bd Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 22 Jun 2016 17:51:40 +0100 Subject: [PATCH 169/414] Style --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 5aaaf4b19d..2e85cf5f51 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -192,7 +192,7 @@ class EventPushActionsStore(SQLBaseStore): "actions": json.loads(row[3]), "received_ts": row[4], } for row in after_read_receipt + no_read_receipt - ][0:limit]) + ][:limit]) @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): From b5fb7458d501d3e0e24062b2a479232246f13d4e Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 22 Jun 2016 18:07:14 +0100 Subject: [PATCH 170/414] Actually we need to order these properly otherwise we'll end up returning the wrong 20 --- synapse/storage/event_push_actions.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 2e85cf5f51..5f1b6f63a9 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -184,7 +184,8 @@ class EventPushActionsStore(SQLBaseStore): "get_unread_push_actions_for_user_in_range", get_no_receipt ) - defer.returnValue([ + # Make a list of dicts from the two sets of results. + notifs = [ { "event_id": row[0], "room_id": row[1], @@ -192,7 +193,16 @@ class EventPushActionsStore(SQLBaseStore): "actions": json.loads(row[3]), "received_ts": row[4], } for row in after_read_receipt + no_read_receipt - ][:limit]) + ] + + # Now sort it so it's ordered correctly, since currently it will + # contain results from the first query, correctly ordered, followed + # by results from the second query, but we want them all ordered + # by received_ts + notifs.sort(key=lambda r: -(r['received_ts'] or 0)) + + # Now return the first `limit` + defer.returnValue(notifs[:limit]) @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): From 870c45913ef17584a65d0acf98336f1ddd6bf1c0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 24 Jun 2016 11:41:11 +0100 Subject: [PATCH 171/414] Use similar naming we use in email notifs for push Fixes https://github.com/vector-im/vector-web/issues/1654 --- synapse/push/httppusher.py | 9 +++-- synapse/push/push_tools.py | 33 ++++++++-------- synapse/replication/slave/storage/events.py | 8 ---- synapse/storage/events.py | 7 ---- synapse/storage/room.py | 43 --------------------- synapse/util/presentable_names.py | 5 ++- 6 files changed, 26 insertions(+), 79 deletions(-) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 3992804845..2acc6cc214 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -38,6 +38,7 @@ class HttpPusher(object): self.hs = hs self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() + self.state_handler = self.hs.get_state_handler() self.user_id = pusherdict['user_name'] self.app_id = pusherdict['app_id'] self.app_display_name = pusherdict['app_display_name'] @@ -237,7 +238,9 @@ class HttpPusher(object): @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): - ctx = yield push_tools.get_context_for_event(self.hs.get_datastore(), event) + ctx = yield push_tools.get_context_for_event( + self.state_handler, event, self.user_id + ) d = { 'notification': { @@ -269,8 +272,8 @@ class HttpPusher(object): if 'content' in event: d['notification']['content'] = event.content - if len(ctx['aliases']): - d['notification']['room_alias'] = ctx['aliases'][0] + # We no longer send aliases separately, instead, we send the human + # readable name of the room, which may be an alias. if 'sender_display_name' in ctx and len(ctx['sender_display_name']) > 0: d['notification']['sender_display_name'] = ctx['sender_display_name'] if 'name' in ctx and len(ctx['name']) > 0: diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 89a3b5e90a..d91ca34a8b 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -14,7 +14,9 @@ # limitations under the License. from twisted.internet import defer - +from synapse.util.presentable_names import ( + calculate_room_name, name_from_member_event +) @defer.inlineCallbacks def get_badge_count(store, user_id): @@ -45,24 +47,21 @@ def get_badge_count(store, user_id): @defer.inlineCallbacks -def get_context_for_event(store, ev): - name_aliases = yield store.get_room_name_and_aliases( - ev.room_id - ) +def get_context_for_event(state_handler, ev, user_id): + ctx = {} - ctx = {'aliases': name_aliases[1]} - if name_aliases[0] is not None: - ctx['name'] = name_aliases[0] + room_state = yield state_handler.get_current_state(ev.room_id) - their_member_events_for_room = yield store.get_current_state( - room_id=ev.room_id, - event_type='m.room.member', - state_key=ev.user_id + # we no longer bother setting room_alias, and make room_name the + # human-readable name instead, be that m.room.namer, an alias or + # a list of people in the room + name = calculate_room_name( + room_state, user_id, fallback_to_single_member=False ) - for mev in their_member_events_for_room: - if mev.content['membership'] == 'join' and 'displayname' in mev.content: - dn = mev.content['displayname'] - if dn is not None: - ctx['sender_display_name'] = dn + if name: + ctx['name'] = name + + sender_state_event = room_state[("m.room.member", ev.sender)] + ctx['sender_display_name'] = name_from_member_event(sender_state_event) defer.returnValue(ctx) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 877c68508c..86e0721ace 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -64,7 +64,6 @@ class SlavedEventStore(BaseSlavedStore): # Cached functions can't be accessed through a class instance so we need # to reach inside the __dict__ to extract them. - get_room_name_and_aliases = RoomStore.__dict__["get_room_name_and_aliases"] get_rooms_for_user = RoomMemberStore.__dict__["get_rooms_for_user"] get_users_in_room = RoomMemberStore.__dict__["get_users_in_room"] get_latest_event_ids_in_room = EventFederationStore.__dict__[ @@ -202,7 +201,6 @@ class SlavedEventStore(BaseSlavedStore): self.get_rooms_for_user.invalidate_all() self.get_users_in_room.invalidate((event.room_id,)) # self.get_joined_hosts_for_room.invalidate((event.room_id,)) - self.get_room_name_and_aliases.invalidate((event.room_id,)) self._invalidate_get_event_cache(event.event_id) @@ -246,9 +244,3 @@ class SlavedEventStore(BaseSlavedStore): self._get_current_state_for_key.invalidate(( event.room_id, event.type, event.state_key )) - - if event.type in [EventTypes.Name, EventTypes.Aliases]: - self.get_room_name_and_aliases.invalidate( - (event.room_id,) - ) - pass diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6d978ffcd5..88a6ff7310 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -355,7 +355,6 @@ class EventsStore(SQLBaseStore): txn.call_after(self.get_rooms_for_user.invalidate_all) txn.call_after(self.get_users_in_room.invalidate, (event.room_id,)) txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,)) - txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,)) # Add an entry to the current_state_resets table to record the point # where we clobbered the current state @@ -666,12 +665,6 @@ class EventsStore(SQLBaseStore): (event.room_id, event.type, event.state_key,) ) - if event.type in [EventTypes.Name, EventTypes.Aliases]: - txn.call_after( - self.get_room_name_and_aliases.invalidate, - (event.room_id,) - ) - self._simple_upsert_txn( txn, "current_state_events", diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 97f9f1929c..fb89ce01b1 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -192,49 +192,6 @@ class RoomStore(SQLBaseStore): # This should be unreachable. raise Exception("Unrecognized database engine") - @cachedInlineCallbacks() - def get_room_name_and_aliases(self, room_id): - def get_room_name(txn): - sql = ( - "SELECT name FROM room_names" - " INNER JOIN current_state_events USING (room_id, event_id)" - " WHERE room_id = ?" - " LIMIT 1" - ) - - txn.execute(sql, (room_id,)) - rows = txn.fetchall() - if rows: - return rows[0][0] - else: - return None - - return [row[0] for row in txn.fetchall()] - - def get_room_aliases(txn): - sql = ( - "SELECT content FROM current_state_events" - " INNER JOIN events USING (room_id, event_id)" - " WHERE room_id = ?" - ) - txn.execute(sql, (room_id,)) - return [row[0] for row in txn.fetchall()] - - name = yield self.runInteraction("get_room_name", get_room_name) - alias_contents = yield self.runInteraction("get_room_aliases", get_room_aliases) - - aliases = [] - - for c in alias_contents: - try: - content = json.loads(c) - except: - continue - - aliases.extend(content.get('aliases', [])) - - defer.returnValue((name, aliases)) - def add_event_report(self, room_id, event_id, user_id, reason, content, received_ts): next_id = self._event_reports_id_gen.get_next() diff --git a/synapse/util/presentable_names.py b/synapse/util/presentable_names.py index a6866f6117..4c54812e6f 100644 --- a/synapse/util/presentable_names.py +++ b/synapse/util/presentable_names.py @@ -25,7 +25,8 @@ ALIAS_RE = re.compile(r"^#.*:.+$") ALL_ALONE = "Empty Room" -def calculate_room_name(room_state, user_id, fallback_to_members=True): +def calculate_room_name(room_state, user_id, fallback_to_members=True, + fallback_to_single_member=True): """ Works out a user-facing name for the given room as per Matrix spec recommendations. @@ -129,6 +130,8 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True): return name_from_member_event(all_members[0]) else: return ALL_ALONE + elif len(other_members) == 1 and not fallback_to_single_member: + return None else: return descriptor_from_member_events(other_members) From 46b7362304c0ea056c65323a80a84e231c544e86 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 24 Jun 2016 11:44:57 +0100 Subject: [PATCH 172/414] pep8 --- synapse/replication/slave/storage/events.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 86e0721ace..369d839464 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -18,7 +18,6 @@ from ._slaved_id_tracker import SlavedIdTracker from synapse.api.constants import EventTypes from synapse.events import FrozenEvent from synapse.storage import DataStore -from synapse.storage.room import RoomStore from synapse.storage.roommember import RoomMemberStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.event_push_actions import EventPushActionsStore From aa3a4944d51c60886984211a7f8ae6b7fbac765d Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 24 Jun 2016 11:45:23 +0100 Subject: [PATCH 173/414] more pep8 --- synapse/storage/room.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/room.py b/synapse/storage/room.py index fb89ce01b1..8251f58670 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -18,7 +18,6 @@ from twisted.internet import defer from synapse.api.errors import StoreError from ._base import SQLBaseStore -from synapse.util.caches.descriptors import cachedInlineCallbacks from .engines import PostgresEngine, Sqlite3Engine import collections From 0b640aa56bce86ca56d9fe3cd9c1fec6620ff18b Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 24 Jun 2016 11:47:11 +0100 Subject: [PATCH 174/414] even more pep8 --- synapse/push/push_tools.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index d91ca34a8b..6f2d1ad57d 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -18,6 +18,7 @@ from synapse.util.presentable_names import ( calculate_room_name, name_from_member_event ) + @defer.inlineCallbacks def get_badge_count(store, user_id): invites, joins = yield defer.gatherResults([ From 2455ad8468ea3d372d0f3b3828efa10419ad68ad Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 24 Jun 2016 13:34:20 +0100 Subject: [PATCH 175/414] Remove room name & alias test as get_room_name_and_alias is now gone --- .../replication/slave/storage/test_events.py | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 17587fda00..f33e6f60fb 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -58,47 +58,6 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): def tearDown(self): [unpatch() for unpatch in self.unpatches] - @defer.inlineCallbacks - def test_room_name_and_aliases(self): - create = yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.member", key=USER_ID, membership="join") - yield self.persist(type="m.room.name", key="", name="name1") - yield self.persist( - type="m.room.aliases", key="blue", aliases=["#1:blue"] - ) - yield self.replicate() - yield self.check( - "get_room_name_and_aliases", (ROOM_ID,), ("name1", ["#1:blue"]) - ) - - # Set the room name. - yield self.persist(type="m.room.name", key="", name="name2") - yield self.replicate() - yield self.check( - "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#1:blue"]) - ) - - # Set the room aliases. - yield self.persist( - type="m.room.aliases", key="blue", aliases=["#2:blue"] - ) - yield self.replicate() - yield self.check( - "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#2:blue"]) - ) - - # Leave and join the room clobbering the state. - yield self.persist(type="m.room.member", key=USER_ID, membership="leave") - yield self.persist( - type="m.room.member", key=USER_ID, membership="join", - reset_state=[create] - ) - yield self.replicate() - - yield self.check( - "get_room_name_and_aliases", (ROOM_ID,), (None, []) - ) - @defer.inlineCallbacks def test_room_members(self): create = yield self.persist(type="m.room.create", key="", creator=USER_ID) From f7fe0e5f67e44c07e100226f54e183f82f2c98eb Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 24 Jun 2016 13:53:03 +0100 Subject: [PATCH 176/414] Fix the sytests to use a port-range rather than a port base --- jenkins-dendron-postgres.sh | 3 ++- jenkins-postgres.sh | 3 ++- jenkins-sqlite.sh | 5 +++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 7e6f24aa7d..50268e0982 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -70,6 +70,7 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) : ${PORT_BASE:=8000} +: ${PORT_COUNT=20} ./jenkins/prep_sytest_for_postgres.sh @@ -81,6 +82,6 @@ echo >&2 "Running sytest with PostgreSQL"; --dendron $WORKSPACE/dendron/bin/dendron \ --pusher \ --synchrotron \ - --port-base $PORT_BASE + --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) cd .. diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index ae6b111591..2f0768fcb7 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -44,6 +44,7 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) : ${PORT_BASE:=8000} +: ${PORT_COUNT=20} ./jenkins/prep_sytest_for_postgres.sh @@ -51,7 +52,7 @@ echo >&2 "Running sytest with PostgreSQL"; ./jenkins/install_and_run.sh --coverage \ --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ - --port-base $PORT_BASE + --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \ cd .. cp sytest/.coverage.* . diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index 9398d9db15..da603c5af8 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -41,11 +41,12 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PORT_BASE:=8500} +: ${PORT_COUNT=20} +: ${PORT_BASE:=8000} ./jenkins/install_and_run.sh --coverage \ --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ - --port-base $PORT_BASE + --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \ cd .. cp sytest/.coverage.* . From 70d820c87595f037f0c17dc525604aaaa0cf148c Mon Sep 17 00:00:00 2001 From: Rick Cogley Date: Sun, 26 Jun 2016 19:07:07 +0900 Subject: [PATCH 177/414] Update to reflect new location at github. Additionally it does not appear there is turnserver.conf.default, but rather, just /etc/turnserver.conf. --- docs/turn-howto.rst | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst index e2c73458e2..4f2794111f 100644 --- a/docs/turn-howto.rst +++ b/docs/turn-howto.rst @@ -9,19 +9,21 @@ the Home Server to generate credentials that are valid for use on the TURN server through the use of a secret shared between the Home Server and the TURN server. -This document described how to install coturn -(https://code.google.com/p/coturn/) which also supports the TURN REST API, +This document describes how to install coturn +(https://github.com/coturn/coturn) which also supports the TURN REST API, and integrate it with synapse. coturn Setup ============ +You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process. + 1. Check out coturn:: - svn checkout http://coturn.googlecode.com/svn/trunk/ coturn - cd coturn + svn checkout https://github.com/coturn/coturn.git coturn + cd coturn 2. Configure it:: - ./configure + ./configure You may need to install libevent2: if so, you should do so in the way recommended by your operating system. @@ -29,22 +31,21 @@ coturn Setup database is unnecessary for this purpose. 3. Build and install it:: - make - make install + make + make install - 4. Make a config file in /etc/turnserver.conf. You can customise - a config file from turnserver.conf.default. The relevant + 4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant lines, with example values, are:: - lt-cred-mech - use-auth-secret - static-auth-secret=[your secret key here] - realm=turn.myserver.org + lt-cred-mech + use-auth-secret + static-auth-secret=[your secret key here] + realm=turn.myserver.org - See turnserver.conf.default for explanations of the options. + See turnserver.conf for explanations of the options. One way to generate the static-auth-secret is with pwgen:: - pwgen -s 64 1 + pwgen -s 64 1 5. Ensure youe firewall allows traffic into the TURN server on the ports you've configured it to listen on (remember to allow @@ -54,7 +55,7 @@ coturn Setup import your private key and certificate. 7. Start the turn server:: - bin/turnserver -o + bin/turnserver -o synapse Setup @@ -91,3 +92,4 @@ Now, restart synapse:: ./synctl restart ...and your Home Server now supports VoIP relaying! + From 63bb8f0df9946fa8084193578b44e6a931f66d51 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 27 Jun 2016 13:13:17 +0400 Subject: [PATCH 178/414] remove vector.im from default secondary DS list --- synapse/config/server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index d7e6f20518..51eaf423ce 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -149,7 +149,6 @@ class ServerConfig(Config): # room directory. # secondary_directory_servers: # - matrix.org - # - vector.im # List of ports that Synapse should listen on, their purpose and their # configuration. From 551fe80bed666cf89225d650915391bbca84c165 Mon Sep 17 00:00:00 2001 From: Rick Cogley Date: Tue, 28 Jun 2016 12:47:55 +0900 Subject: [PATCH 179/414] Remove double spaces Reading the RST spec, I was trying to get breaks to appear by entering the double spaces after the lines in the code blocks. It does not work anyway, and, as pointed out, I've removed. --- docs/turn-howto.rst | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst index 4f2794111f..f0c5601ea0 100644 --- a/docs/turn-howto.rst +++ b/docs/turn-howto.rst @@ -19,11 +19,11 @@ coturn Setup You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process. 1. Check out coturn:: - svn checkout https://github.com/coturn/coturn.git coturn - cd coturn + svn checkout https://github.com/coturn/coturn.git coturn + cd coturn 2. Configure it:: - ./configure + ./configure You may need to install libevent2: if so, you should do so in the way recommended by your operating system. @@ -31,21 +31,21 @@ You may be able to setup coturn via your package manager, or set it up manually database is unnecessary for this purpose. 3. Build and install it:: - make - make install + make + make install 4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant lines, with example values, are:: - lt-cred-mech - use-auth-secret - static-auth-secret=[your secret key here] - realm=turn.myserver.org + lt-cred-mech + use-auth-secret + static-auth-secret=[your secret key here] + realm=turn.myserver.org See turnserver.conf for explanations of the options. One way to generate the static-auth-secret is with pwgen:: - pwgen -s 64 1 + pwgen -s 64 1 5. Ensure youe firewall allows traffic into the TURN server on the ports you've configured it to listen on (remember to allow @@ -55,7 +55,7 @@ You may be able to setup coturn via your package manager, or set it up manually import your private key and certificate. 7. Start the turn server:: - bin/turnserver -o + bin/turnserver -o synapse Setup @@ -92,4 +92,3 @@ Now, restart synapse:: ./synctl restart ...and your Home Server now supports VoIP relaying! - From 1ea358b28b46edffdf62a52e8a2b3faf8b2aae1d Mon Sep 17 00:00:00 2001 From: Rick Cogley Date: Tue, 28 Jun 2016 18:27:54 +0900 Subject: [PATCH 180/414] Update turn-howto.rst to use git clone svn checkout is not logical for a checkout from github, so changed the checkout to "git clone". thanks @dbkr Signed-off-by: Rick Cogley --- docs/turn-howto.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst index f0c5601ea0..afddebd53a 100644 --- a/docs/turn-howto.rst +++ b/docs/turn-howto.rst @@ -19,7 +19,7 @@ coturn Setup You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process. 1. Check out coturn:: - svn checkout https://github.com/coturn/coturn.git coturn + git clone https://github.com/coturn/coturn.git coturn cd coturn 2. Configure it:: From 56ec5869c98c97869f908c0309d2f9c4b648eda2 Mon Sep 17 00:00:00 2001 From: Rick Cogley Date: Tue, 28 Jun 2016 18:34:38 +0900 Subject: [PATCH 181/414] Update turn-howto.rst to use git clone (2) Not logical to use svn checkout against a github repo, so changed to git clone. Signed-off-by: Rick Cogley --- docs/turn-howto.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst index afddebd53a..04c0100715 100644 --- a/docs/turn-howto.rst +++ b/docs/turn-howto.rst @@ -19,18 +19,21 @@ coturn Setup You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process. 1. Check out coturn:: + git clone https://github.com/coturn/coturn.git coturn cd coturn 2. Configure it:: + ./configure - You may need to install libevent2: if so, you should do so + You may need to install ``libevent2``: if so, you should do so in the way recommended by your operating system. You can ignore warnings about lack of database support: a database is unnecessary for this purpose. 3. Build and install it:: + make make install @@ -55,6 +58,7 @@ You may be able to setup coturn via your package manager, or set it up manually import your private key and certificate. 7. Start the turn server:: + bin/turnserver -o From 314b146b2e3082fc6bc61296f5c2ea5d7735f01e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jun 2016 11:41:20 +0100 Subject: [PATCH 182/414] Track approximate last access time for remote media --- synapse/rest/media/v1/media_repository.py | 24 ++++++++++++++ synapse/storage/media_repository.py | 15 +++++++++ synapse/storage/prepare_database.py | 2 +- .../schema/delta/33/remote_media_ts.py | 31 +++++++++++++++++++ 4 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/33/remote_media_ts.py diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 2468c3ac42..1a287b6fec 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -43,6 +43,9 @@ import urlparse logger = logging.getLogger(__name__) +UPDATE_RECENTLY_ACCESSED_REMOTES_TS = 60 * 1000 + + class MediaRepository(object): def __init__(self, hs, filepaths): self.auth = hs.get_auth() @@ -57,6 +60,22 @@ class MediaRepository(object): self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements + self.recently_accessed_remotes = set() + + self.clock.looping_call( + self._update_recently_accessed_remotes, + UPDATE_RECENTLY_ACCESSED_REMOTES_TS + ) + + @defer.inlineCallbacks + def _update_recently_accessed_remotes(self): + media = self.recently_accessed_remotes + self.recently_accessed_remotes = set() + + yield self.store.update_cached_last_access_time( + media, self.clock.time_msec() + ) + @staticmethod def _makedirs(filepath): dirname = os.path.dirname(filepath) @@ -119,6 +138,11 @@ class MediaRepository(object): media_info = yield self._download_remote_file( server_name, media_id ) + else: + self.recently_accessed_remotes.add((server_name, media_id)) + yield self.store.update_cached_last_access_time( + [(server_name, media_id)], self.clock.time_msec() + ) defer.returnValue(media_info) @defer.inlineCallbacks diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index a820fcf07f..44e4d38307 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -157,10 +157,25 @@ class MediaRepositoryStore(SQLBaseStore): "created_ts": time_now_ms, "upload_name": upload_name, "filesystem_id": filesystem_id, + "last_access_ts": time_now_ms, }, desc="store_cached_remote_media", ) + def update_cached_last_access_time(self, origin_id_tuples, time_ts): + def update_cache_txn(txn): + sql = ( + "UPDATE remote_media_cache SET last_access_ts = ?" + " WHERE media_origin = ? AND media_id = ?" + ) + + txn.executemany(sql, ( + (time_ts, media_origin, media_id) + for media_origin, media_id in origin_id_tuples + )) + + return self.runInteraction("update_cached_last_access_time", update_cache_txn) + def get_remote_media_thumbnails(self, origin, media_id): return self._simple_select_list( "remote_media_cache_thumbnails", diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c8487c8838..8801669a6b 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 32 +SCHEMA_VERSION = 33 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/delta/33/remote_media_ts.py new file mode 100644 index 0000000000..55ae43f395 --- /dev/null +++ b/synapse/storage/schema/delta/33/remote_media_ts.py @@ -0,0 +1,31 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + + +ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT" + + +def run_create(cur, database_engine, *args, **kwargs): + cur.execute(ALTER_TABLE) + + +def run_upgrade(cur, database_engine, *args, **kwargs): + cur.execute( + database_engine.convert_param_style( + "UPDATE remote_media_cache SET last_access_ts = ?" + ), + (int(time.time() * 1000),) + ) From a70688445dd7a9fa41a55a642fb9a394f291ae45 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jun 2016 14:57:59 +0100 Subject: [PATCH 183/414] Implement purge_media_cache admin API --- synapse/rest/client/v1/admin.py | 32 ++++++++++ synapse/rest/media/v1/filepath.py | 6 ++ synapse/rest/media/v1/media_repository.py | 78 +++++++++++++++++------ synapse/server.py | 5 ++ synapse/storage/media_repository.py | 29 +++++++++ 5 files changed, 130 insertions(+), 20 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index aa05b3f023..8ec8569a49 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -46,5 +46,37 @@ class WhoisRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) +class PurgeMediaCacheRestServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns("/admin/purge_media_cache") + + def __init__(self, hs): + self.media_repository = hs.get_media_repository() + super(PurgeMediaCacheRestServlet, self).__init__(hs) + + @defer.inlineCallbacks + def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + is_admin = yield self.auth.is_server_admin(requester.user) + + if not is_admin: + raise AuthError(403, "You are not a server admin") + + before_ts = request.args.get("before_ts", None) + if not before_ts: + raise SynapseError(400, "Missing 'before_ts' arg") + + logger.info("before_ts: %r", before_ts[0]) + + try: + before_ts = int(before_ts[0]) + except Exception: + raise SynapseError(400, "Invalid 'before_ts' arg") + + ret = yield self.media_repository.delete_old_remote_media(before_ts) + + defer.returnValue((200, ret)) + + def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) + PurgeMediaCacheRestServlet(hs).register(http_server) diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 422ab86fb3..0137458f71 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -65,3 +65,9 @@ class MediaFilePaths(object): file_id[0:2], file_id[2:4], file_id[4:], file_name ) + + def remote_media_thumbnail_dir(self, server_name, file_id): + return os.path.join( + self.base_path, "remote_thumbnail", server_name, + file_id[0:2], file_id[2:4], file_id[4:], + ) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 1a287b6fec..844628c121 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -30,11 +30,13 @@ from synapse.api.errors import SynapseError from twisted.internet import defer, threads -from synapse.util.async import ObservableDeferred +from synapse.util.async import Linearizer from synapse.util.stringutils import is_ascii from synapse.util.logcontext import preserve_context_over_fn import os +import errno +import shutil import cgi import logging @@ -47,7 +49,7 @@ UPDATE_RECENTLY_ACCESSED_REMOTES_TS = 60 * 1000 class MediaRepository(object): - def __init__(self, hs, filepaths): + def __init__(self, hs): self.auth = hs.get_auth() self.client = MatrixFederationHttpClient(hs) self.clock = hs.get_clock() @@ -55,11 +57,12 @@ class MediaRepository(object): self.store = hs.get_datastore() self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels - self.filepaths = filepaths - self.downloads = {} + self.filepaths = MediaFilePaths(hs.config.media_store_path) self.dynamic_thumbnails = hs.config.dynamic_thumbnails self.thumbnail_requirements = hs.config.thumbnail_requirements + self.remote_media_linearizer = Linearizer() + self.recently_accessed_remotes = set() self.clock.looping_call( @@ -112,22 +115,12 @@ class MediaRepository(object): defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) + @defer.inlineCallbacks def get_remote_media(self, server_name, media_id): key = (server_name, media_id) - download = self.downloads.get(key) - if download is None: - download = self._get_remote_media_impl(server_name, media_id) - download = ObservableDeferred( - download, - consumeErrors=True - ) - self.downloads[key] = download - - @download.addBoth - def callback(media_info): - del self.downloads[key] - return media_info - return download.observe() + with (yield self.remote_media_linearizer.queue(key)): + media_info = yield self._get_remote_media_impl(server_name, media_id) + defer.returnValue(media_info) @defer.inlineCallbacks def _get_remote_media_impl(self, server_name, media_id): @@ -440,6 +433,52 @@ class MediaRepository(object): "height": m_height, }) + @defer.inlineCallbacks + def delete_old_remote_media(self, before_ts): + old_media = yield self.store.get_remote_media_before(before_ts) + + deleted = 0 + + for media in old_media: + origin = media["media_origin"] + media_id = media["media_id"] + file_id = media["filesystem_id"] + key = (origin, media_id) + + logger.info("Deleting: %r", key) + + with (yield self.remote_media_linearizer.queue(key)): + full_path = self.filepaths.remote_media_filepath(origin, file_id) + full_dir = os.path.dirname(full_path) + try: + os.remove(full_path) + except OSError as e: + logger.warn("Failed to remove file: %r", full_path) + if e.errno == errno.ENOENT: + pass + else: + continue + + try: + os.removedirs(full_dir) + except OSError: + pass + + thumbnail_dir = self.filepaths.remote_media_thumbnail_dir( + origin, file_id + ) + shutil.rmtree(thumbnail_dir, ignore_errors=True) + + yield self.store.delete_remote_media(origin, media_id) + try: + os.removedirs(thumbnail_dir) + except OSError: + pass + + deleted += 1 + + defer.returnValue({"deleted": deleted}) + class MediaRepositoryResource(Resource): """File uploading and downloading. @@ -488,9 +527,8 @@ class MediaRepositoryResource(Resource): def __init__(self, hs): Resource.__init__(self) - filepaths = MediaFilePaths(hs.config.media_store_path) - media_repo = MediaRepository(hs, filepaths) + media_repo = hs.get_media_repository() self.putChild("upload", UploadResource(hs, media_repo)) self.putChild("download", DownloadResource(hs, media_repo)) diff --git a/synapse/server.py b/synapse/server.py index dd4b81c658..d49a1a8a96 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -45,6 +45,7 @@ from synapse.crypto.keyring import Keyring from synapse.push.pusherpool import PusherPool from synapse.events.builder import EventBuilderFactory from synapse.api.filtering import Filtering +from synapse.rest.media.v1.media_repository import MediaRepository from synapse.http.matrixfederationclient import MatrixFederationHttpClient @@ -113,6 +114,7 @@ class HomeServer(object): 'filtering', 'http_client_context_factory', 'simple_http_client', + 'media_repository', ] def __init__(self, hostname, **kwargs): @@ -233,6 +235,9 @@ class HomeServer(object): **self.db_config.get("args", {}) ) + def build_media_repository(self): + return MediaRepository(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py index 44e4d38307..4c0f82353d 100644 --- a/synapse/storage/media_repository.py +++ b/synapse/storage/media_repository.py @@ -205,3 +205,32 @@ class MediaRepositoryStore(SQLBaseStore): }, desc="store_remote_media_thumbnail", ) + + def get_remote_media_before(self, before_ts): + sql = ( + "SELECT media_origin, media_id, filesystem_id" + " FROM remote_media_cache" + " WHERE last_access_ts < ?" + ) + + return self._execute( + "get_remote_media_before", self.cursor_to_dict, sql, before_ts + ) + + def delete_remote_media(self, media_origin, media_id): + def delete_remote_media_txn(txn): + self._simple_delete_txn( + txn, + "remote_media_cache", + keyvalues={ + "media_origin": media_origin, "media_id": media_id + }, + ) + self._simple_delete_txn( + txn, + "remote_media_cache_thumbnails", + keyvalues={ + "media_origin": media_origin, "media_id": media_id + }, + ) + return self.runInteraction("delete_remote_media", delete_remote_media_txn) From f52cb4cd7893ebf4ec3c793c215b3b5eb8efc232 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jun 2016 15:24:50 +0100 Subject: [PATCH 184/414] Remove race --- synapse/rest/media/v1/media_repository.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 844628c121..692e078419 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -449,7 +449,6 @@ class MediaRepository(object): with (yield self.remote_media_linearizer.queue(key)): full_path = self.filepaths.remote_media_filepath(origin, file_id) - full_dir = os.path.dirname(full_path) try: os.remove(full_path) except OSError as e: @@ -459,22 +458,12 @@ class MediaRepository(object): else: continue - try: - os.removedirs(full_dir) - except OSError: - pass - thumbnail_dir = self.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) yield self.store.delete_remote_media(origin, media_id) - try: - os.removedirs(thumbnail_dir) - except OSError: - pass - deleted += 1 defer.returnValue({"deleted": deleted}) From f328d95cef99763d056171846253ed68cab58214 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jun 2016 15:40:58 +0100 Subject: [PATCH 185/414] Feature: Add deactivate account admin API Allows server admins to "deactivate" accounts, which: - Revokes all access tokens - Removes all threepids - Removes password The API is a POST to `/admin/deactivate/` --- synapse/rest/client/v1/admin.py | 26 ++++++++++++++++++++++++++ synapse/storage/_base.py | 5 +++++ synapse/storage/registration.py | 9 +++++++++ 3 files changed, 40 insertions(+) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 8ec8569a49..e54c472e08 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -77,6 +77,32 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) +class DeactivateAccountRestServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") + + def __init__(self, hs): + self.store = hs.get_datastore() + super(DeactivateAccountRestServlet, self).__init__(hs) + + @defer.inlineCallbacks + def on_POST(self, request, target_user_id): + UserID.from_string(target_user_id) + requester = yield self.auth.get_user_by_req(request) + is_admin = yield self.auth.is_server_admin(requester.user) + + if not is_admin: + raise AuthError(403, "You are not a server admin") + + # FIXME: Theoretically there is a race here wherein user resets password + # using threepid. + yield self.store.user_delete_access_tokens(target_user_id) + yield self.store.user_delete_threepids(target_user_id) + yield self.store.user_set_password_hash(target_user_id, None) + + defer.returnValue((200, {})) + + def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) + DeactivateAccountRestServlet(hs).register(http_server) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 32c6677d47..d766a30299 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -807,6 +807,11 @@ class SQLBaseStore(object): if txn.rowcount > 1: raise StoreError(500, "more than one row matched") + def _simple_delete(self, table, keyvalues, desc): + return self.runInteraction( + desc, self._simple_delete_txn, table, keyvalues + ) + @staticmethod def _simple_delete_txn(txn, table, keyvalues): sql = "DELETE FROM %s WHERE %s" % ( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 3de9e0f709..5c75dbab51 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -384,6 +384,15 @@ class RegistrationStore(SQLBaseStore): defer.returnValue(ret['user_id']) defer.returnValue(None) + def user_delete_threepids(self, user_id): + return self._simple_delete( + "user_threepids", + keyvalues={ + "user_id": user_id, + }, + desc="user_delete_threepids", + ) + @defer.inlineCallbacks def count_all_users(self): """Counts all users registered on the homeserver.""" From be8be535f73e51a29cfa30f1eac266a7a08b695b Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 30 Jun 2016 17:51:28 +0100 Subject: [PATCH 186/414] requestToken update Don't send requestToken request to untrusted ID servers Also correct the THREEPID_IN_USE error to add the M_ prefix. This is a backwards incomaptible change, but the only thing using this is the angular client which is now unmaintained, so it's probably better to just do this now. --- synapse/api/errors.py | 3 ++- synapse/handlers/identity.py | 41 ++++++++++++++++++++++++------------ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index b106fbed6d..b219b46a4b 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -42,8 +42,9 @@ class Codes(object): TOO_LARGE = "M_TOO_LARGE" EXCLUSIVE = "M_EXCLUSIVE" THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED" - THREEPID_IN_USE = "THREEPID_IN_USE" + THREEPID_IN_USE = "M_THREEPID_IN_USE" INVALID_USERNAME = "M_INVALID_USERNAME" + SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED" class CodeMessageException(RuntimeError): diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 656ce124f9..559e5d5a71 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -21,7 +21,7 @@ from synapse.api.errors import ( ) from ._base import BaseHandler from synapse.util.async import run_on_reactor -from synapse.api.errors import SynapseError +from synapse.api.errors import SynapseError, Codes import json import logging @@ -41,6 +41,20 @@ class IdentityHandler(BaseHandler): hs.config.use_insecure_ssl_client_just_for_testing_do_not_use ) + def _should_trust_id_server(self, id_server): + if id_server not in self.trusted_id_servers: + if self.trust_any_id_server_just_for_testing_do_not_use: + logger.warn( + "Trusting untrustworthy ID server %r even though it isn't" + " in the trusted id list for testing because" + " 'use_insecure_ssl_client_just_for_testing_do_not_use'" + " is set in the config", + id_server, + ) + else: + return False + return True + @defer.inlineCallbacks def threepid_from_creds(self, creds): yield run_on_reactor() @@ -59,19 +73,12 @@ class IdentityHandler(BaseHandler): else: raise SynapseError(400, "No client_secret in creds") - if id_server not in self.trusted_id_servers: - if self.trust_any_id_server_just_for_testing_do_not_use: - logger.warn( - "Trusting untrustworthy ID server %r even though it isn't" - " in the trusted id list for testing because" - " 'use_insecure_ssl_client_just_for_testing_do_not_use'" - " is set in the config", - id_server, - ) - else: - logger.warn('%s is not a trusted ID server: rejecting 3pid ' + - 'credentials', id_server) - defer.returnValue(None) + if not self._should_trust_id_server(id_server): + logger.warn( + '%s is not a trusted ID server: rejecting 3pid ' + + 'credentials', id_server + ) + defer.returnValue(None) data = {} try: @@ -129,6 +136,12 @@ class IdentityHandler(BaseHandler): def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs): yield run_on_reactor() + if not self._should_trust_id_server(id_server): + raise SynapseError( + 400, "Untrusted ID server '%s'" % id_server, + Codes.SERVER_NOT_TRUSTED + ) + params = { 'email': email, 'client_secret': client_secret, From 5a6ef20ef625f1ac2cfb4011ce75ca9453b6a70e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Jul 2016 09:08:35 +0100 Subject: [PATCH 187/414] code_style.rst: add link to google style --- docs/code_style.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/code_style.rst b/docs/code_style.rst index dc40a7ab7b..8cde76149a 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -43,7 +43,8 @@ Basically, PEP8 together, or want to deliberately extend or preserve vertical/horizontal space) -Comments should follow the google code style. This is so that we can generate -documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/) +Comments should follow the `google code style `_. +This is so that we can generate documentation with +`sphinx `_. Code should pass pep8 --max-line-length=100 without any warnings. From 41f072fd0ee62e1df37ad8bb98489395a32ca6d3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Jul 2016 09:09:40 +0100 Subject: [PATCH 188/414] code_style.rst: *fix* link to google style --- docs/code_style.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/code_style.rst b/docs/code_style.rst index 8cde76149a..39710ab4ae 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -43,7 +43,7 @@ Basically, PEP8 together, or want to deliberately extend or preserve vertical/horizontal space) -Comments should follow the `google code style `_. +Comments should follow the `google code style `_. This is so that we can generate documentation with `sphinx `_. From 1238203bc47166d1d4ca686e108e84add3bf98b4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 1 Jul 2016 09:36:51 +0100 Subject: [PATCH 189/414] code_style.rst: add link to sphinx examples --- docs/code_style.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/code_style.rst b/docs/code_style.rst index 39710ab4ae..8d73d17beb 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -45,6 +45,8 @@ Basically, PEP8 Comments should follow the `google code style `_. This is so that we can generate documentation with -`sphinx `_. +`sphinx `_. See the +`examples `_ +in the sphinx documentation. Code should pass pep8 --max-line-length=100 without any warnings. From fc8007dbec40212ae85285aea600111ce2d06912 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Sun, 3 Jul 2016 15:08:15 +0900 Subject: [PATCH 190/414] Optionally include password hash in createUser endpoint Signed-off-by: Kent Shikama --- synapse/handlers/register.py | 4 ++-- synapse/rest/client/v1/register.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 0b7517221d..e255f2da81 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -358,7 +358,7 @@ class RegistrationHandler(BaseHandler): defer.returnValue(data) @defer.inlineCallbacks - def get_or_create_user(self, localpart, displayname, duration_seconds): + def get_or_create_user(self, localpart, displayname, duration_seconds, password_hash=None): """Creates a new user if the user does not exist, else revokes all previous access tokens and generates a new one. @@ -394,7 +394,7 @@ class RegistrationHandler(BaseHandler): yield self.store.register( user_id=user_id, token=token, - password_hash=None, + password_hash=password_hash, create_profile_with_localpart=user.localpart, ) else: diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index e3f4fbb0bb..ef56d1e90f 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -410,12 +410,14 @@ class CreateUserRestServlet(ClientV1RestServlet): raise SynapseError(400, "Failed to parse 'duration_seconds'") if duration_seconds > self.direct_user_creation_max_duration: duration_seconds = self.direct_user_creation_max_duration + password_hash = user_json["password_hash"].encode("utf-8") if user_json["password_hash"] else None handler = self.handlers.registration_handler user_id, token = yield handler.get_or_create_user( localpart=localpart, displayname=displayname, - duration_seconds=duration_seconds + duration_seconds=duration_seconds, + password_hash=password_hash ) defer.returnValue({ From 2e5a31f1973b49ec1a89cfc042e00b51ba7e70fc Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Mon, 4 Jul 2016 22:00:13 +0900 Subject: [PATCH 191/414] Use .get() instead of [] to access password_hash --- synapse/rest/client/v1/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index ef56d1e90f..a923d5a198 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -410,7 +410,7 @@ class CreateUserRestServlet(ClientV1RestServlet): raise SynapseError(400, "Failed to parse 'duration_seconds'") if duration_seconds > self.direct_user_creation_max_duration: duration_seconds = self.direct_user_creation_max_duration - password_hash = user_json["password_hash"].encode("utf-8") if user_json["password_hash"] else None + password_hash = user_json["password_hash"].encode("utf-8") if user_json.get("password_hash") else None handler = self.handlers.registration_handler user_id, token = yield handler.get_or_create_user( From bb069079bbd0ce761403416ed4f77051352ed347 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Mon, 4 Jul 2016 22:07:11 +0900 Subject: [PATCH 192/414] Fix style violations Signed-off-by: Kent Shikama --- synapse/handlers/register.py | 3 ++- synapse/rest/client/v1/register.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e255f2da81..88c82ba7d0 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -358,7 +358,8 @@ class RegistrationHandler(BaseHandler): defer.returnValue(data) @defer.inlineCallbacks - def get_or_create_user(self, localpart, displayname, duration_seconds, password_hash=None): + def get_or_create_user(self, localpart, displayname, duration_seconds, + password_hash=None): """Creates a new user if the user does not exist, else revokes all previous access tokens and generates a new one. diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index a923d5a198..d791d5e07e 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -410,7 +410,8 @@ class CreateUserRestServlet(ClientV1RestServlet): raise SynapseError(400, "Failed to parse 'duration_seconds'") if duration_seconds > self.direct_user_creation_max_duration: duration_seconds = self.direct_user_creation_max_duration - password_hash = user_json["password_hash"].encode("utf-8") if user_json.get("password_hash") else None + password_hash = user_json["password_hash"].encode("utf-8") \ + if user_json.get("password_hash") else None handler = self.handlers.registration_handler user_id, token = yield handler.get_or_create_user( From f18d7546c63ae30c4058d1ec6ab2d5c3b001d257 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 4 Jul 2016 15:48:25 +0100 Subject: [PATCH 193/414] Use a query that postgresql optimises better for get_events_around --- synapse/storage/stream.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index b9ad965fd6..4dd11284e5 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -591,25 +591,28 @@ class StreamStore(SQLBaseStore): query_before = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND (topological_ordering < ?" - " OR (topological_ordering = ? AND stream_ordering < ?))" - " ORDER BY topological_ordering DESC, stream_ordering DESC" - " LIMIT ?" + " WHERE room_id = ? AND topological_ordering < ?" + " UNION ALL " + " SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?" + " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" ) query_after = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND (topological_ordering > ?" - " OR (topological_ordering = ? AND stream_ordering > ?))" - " ORDER BY topological_ordering ASC, stream_ordering ASC" - " LIMIT ?" + " WHERE room_id = ? AND topological_ordering > ?" + " UNION ALL" + " SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?" + " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" ) txn.execute( query_before, ( - room_id, topological_ordering, topological_ordering, - stream_ordering, before_limit, + room_id, topological_ordering, + room_id, topological_ordering, stream_ordering, + before_limit, ) ) @@ -630,8 +633,9 @@ class StreamStore(SQLBaseStore): txn.execute( query_after, ( - room_id, topological_ordering, topological_ordering, - stream_ordering, after_limit, + room_id, topological_ordering, + room_id, topological_ordering, stream_ordering, + after_limit, ) ) From a67bf0b074acfca69647030beb9b775359fe684d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 4 Jul 2016 16:02:50 +0100 Subject: [PATCH 194/414] Add storage function to purge history for a room --- synapse/storage/events.py | 140 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 88a6ff7310..98c917ce15 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1281,6 +1281,146 @@ class EventsStore(SQLBaseStore): ) return self.runInteraction("get_all_new_events", get_all_new_events_txn) + def _delete_old_state_txn(self, txn, room_id, topological_ordering): + """Deletes old room state + """ + + # Tables that should be pruned: + # event_auth + # event_backward_extremities + # event_content_hashes + # event_destinations + # event_edge_hashes + # event_edges + # event_forward_extremities + # event_json + # event_push_actions + # event_reference_hashes + # event_search + # event_signatures + # event_to_state_groups + # events + # rejections + # room_depth + # state_groups + # state_groups_state + + # First ensure that we're not about to delete all the forward extremeties + txn.execute( + "SELECT e.event_id, e.depth FROM events as e " + "INNER JOIN event_forward_extremities as f " + "ON e.event_id = f.event_id " + "AND e.room_id = f.room_id " + "WHERE f.room_id = ?", + (room_id,) + ) + rows = txn.fetchall() + max_depth = max(row[0] for row in rows) + + if max_depth <= topological_ordering: + raise Exception("topological_ordering is greater than forward extremeties") + + txn.execute( + "SELECT event_id, state_key FROM events" + " LEFT JOIN state_events USING (room_id, event_id)" + " WHERE room_id = ? AND topological_ordering < ?", + (room_id, topological_ordering,) + ) + event_rows = txn.fetchall() + + # We calculate the new entries for the backward extremeties by finding + # all events that point to events that are to be purged + txn.execute( + "SELECT e.event_id FROM events as e" + " INNER JOIN event_edges as ed ON e.event_id = ed.prev_event_id" + " INNER JOIN events as e2 ON e2.event_id = ed.event_id" + " WHERE e.room_id = ? AND e.topological_ordering < ?" + " AND e2.topological_ordering >= ?", + (room_id, topological_ordering, topological_ordering) + ) + new_backwards_extrems = txn.fetchall() + + # Get all state groups that are only referenced by events that are + # to be deleted. + txn.execute( + "SELECT state_group FROM event_to_state_groups" + " INNER JOIN events USING (event_id)" + " WHERE state_group IN (" + " SELECT DISTINCT state_group FROM events" + " INNER JOIN event_to_state_groups USING (event_id)" + " WHERE room_id = ? AND topological_ordering < ?" + " )" + " GROUP BY state_group HAVING MAX(topological_ordering) < ?", + (room_id, topological_ordering, topological_ordering) + ) + state_rows = txn.fetchall() + txn.executemany( + "DELETE FROM state_groups_state WHERE state_group = ?", + state_rows + ) + txn.executemany( + "DELETE FROM state_groups WHERE id = ?", + state_rows + ) + # Delete all non-state + txn.executemany( + "DELETE FROM event_to_state_groups WHERE event_id = ?", + [(event_id,) for event_id, _ in event_rows] + ) + + txn.execute( + "UPDATE room_depth SET min_depth = ? WHERE room_id = ?", + (topological_ordering, room_id,) + ) + + # Delete all remote non-state events + to_delete = [ + (event_id,) for event_id, state_key in event_rows + if state_key is None and not self.hs.is_mine_id(event_id) + ] + to_not_delete = [ + (event_id,) for event_id, state_key in event_rows + if state_key is not None or self.hs.is_mine_id(event_id) + ] + for table in ( + "events", + "event_json", + "event_auth", + "event_content_hashes", + "event_destinations", + "event_edge_hashes", + "event_edges", + "event_forward_extremities", + "event_push_actions", + "event_reference_hashes", + "event_search", + "event_signatures", + "rejections", + "event_backward_extremities", + ): + txn.executemany( + "DELETE FROM %s WHERE event_id = ?" % (table,), + to_delete + ) + + # Update backward extremeties + txn.executemany( + "INSERT INTO event_backward_extremities (room_id, event_id)" + " VALUES (?, ?)", + [(room_id, event_id) for event_id, in new_backwards_extrems] + ) + + txn.executemany( + "DELETE FROM events WHERE event_id = ?", + to_delete + ) + # Mark all state and own events as outliers + txn.executemany( + "UPDATE events SET outlier = ?" + " WHERE event_id = ?", + to_not_delete + ) + AllNewEventsResult = namedtuple("AllNewEventsResult", [ "new_forward_events", "new_backfill_events", From 8bdaf5f7afaee98a8cf25d2fb170fe4b2aa97f3d Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Tue, 5 Jul 2016 02:13:52 +0900 Subject: [PATCH 195/414] Add pepper to password hashing Signed-off-by: Kent Shikama --- synapse/config/password.py | 6 +++++- synapse/handlers/auth.py | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/config/password.py b/synapse/config/password.py index dec801ef41..ea822f2bb5 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -23,10 +23,14 @@ class PasswordConfig(Config): def read_config(self, config): password_config = config.get("password_config", {}) self.password_enabled = password_config.get("enabled", True) + self.pepper = password_config.get("pepper", "") def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable password for login. password_config: enabled: true - """ + # Uncomment for extra security for your passwords. + # DO NOT CHANGE THIS AFTER INITIAL SETUP! + #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9" + """ \ No newline at end of file diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 968095c141..fd5fadf73d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -750,7 +750,7 @@ class AuthHandler(BaseHandler): Returns: Hashed password (str). """ - return bcrypt.hashpw(password, bcrypt.gensalt(self.bcrypt_rounds)) + return bcrypt.hashpw(password + self.hs.config.password_config.pepper, bcrypt.gensalt(self.bcrypt_rounds)) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. @@ -763,6 +763,7 @@ class AuthHandler(BaseHandler): Whether self.hash(password) == stored_hash (bool). """ if stored_hash: - return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash + return bcrypt.hashpw(password + self.hs.config.password_config.pepper, + stored_hash.encode('utf-8')) == stored_hash else: return False From 0fb76c71ac4bdd00e7524cf11668c13754d29a08 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 4 Jul 2016 19:44:55 +0100 Subject: [PATCH 196/414] Use different SQL for postgres and sqlite3 for when using multicolumn indexes --- synapse/storage/event_push_actions.py | 18 ++--- synapse/storage/stream.py | 100 +++++++++++++------------- 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 5f1b6f63a9..e3e2e8083e 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -16,6 +16,8 @@ from ._base import SQLBaseStore from twisted.internet import defer from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.types import RoomStreamToken +from .stream import lower_bound import logging import ujson as json @@ -73,6 +75,9 @@ class EventPushActionsStore(SQLBaseStore): stream_ordering = results[0][0] topological_ordering = results[0][1] + token = RoomStreamToken( + topological_ordering, stream_ordering + ) sql = ( "SELECT sum(notif), sum(highlight)" @@ -80,15 +85,10 @@ class EventPushActionsStore(SQLBaseStore): " WHERE" " user_id = ?" " AND room_id = ?" - " AND (" - " topological_ordering > ?" - " OR (topological_ordering = ? AND stream_ordering > ?)" - ")" - ) - txn.execute(sql, ( - user_id, room_id, - topological_ordering, topological_ordering, stream_ordering - )) + " AND %s" + ) % (lower_bound(token, self.database_engine, inclusive=""),) + + txn.execute(sql, (user_id, room_id)) row = txn.fetchone() if row: return { diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 4dd11284e5..23b3a40aaf 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -40,6 +40,7 @@ from synapse.util.caches.descriptors import cached from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logcontext import preserve_fn +from synapse.storage.engines import PostgresEngine import logging @@ -54,25 +55,41 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" -def lower_bound(token): +def lower_bound(token, engine, inclusive=""): if token.topological is None: - return "(%d < %s)" % (token.stream, "stream_ordering") + return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering") else: - return "(%d < %s OR (%d = %s AND %d < %s))" % ( + if isinstance(engine, PostgresEngine): + # Postgres doesn't optimise ``(x < a) OR (x=a AND y= %s)" % (token.stream, "stream_ordering") + return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering") else: - return "(%d > %s OR (%d = %s AND %d >= %s))" % ( + if isinstance(engine, PostgresEngine): + # Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well + # as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we + # use the later form when running against postgres. + return "((%d,%d) >%s (%s,%s))" % ( + token.topological, token.stream, inclusive, + "topological_ordering", "stream_ordering", + ) + return "(%d > %s OR (%d = %s AND %d >%s %s))" % ( token.topological, "topological_ordering", token.topological, "topological_ordering", - token.stream, "stream_ordering", + token.stream, inclusive, "stream_ordering", ) @@ -308,18 +325,22 @@ class StreamStore(SQLBaseStore): args = [False, room_id] if direction == 'b': order = "DESC" - bounds = upper_bound(RoomStreamToken.parse(from_key)) + bounds = upper_bound( + RoomStreamToken.parse(from_key), self.database_engine + ) if to_key: - bounds = "%s AND %s" % ( - bounds, lower_bound(RoomStreamToken.parse(to_key)) - ) + bounds = "%s AND %s" % (bounds, lower_bound( + RoomStreamToken.parse(to_key), self.database_engine + )) else: order = "ASC" - bounds = lower_bound(RoomStreamToken.parse(from_key)) + bounds = lower_bound( + RoomStreamToken.parse(from_key), self.database_engine + ) if to_key: - bounds = "%s AND %s" % ( - bounds, upper_bound(RoomStreamToken.parse(to_key)) - ) + bounds = "%s AND %s" % (bounds, upper_bound( + RoomStreamToken.parse(to_key), self.database_engine + )) if int(limit) > 0: args.append(int(limit)) @@ -586,35 +607,24 @@ class StreamStore(SQLBaseStore): retcols=["stream_ordering", "topological_ordering"], ) - stream_ordering = results["stream_ordering"] - topological_ordering = results["topological_ordering"] + token = RoomStreamToken( + results["topological_ordering"], + results["stream_ordering"], + ) query_before = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering < ?" - " UNION ALL " - " SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?" + " WHERE room_id = ? AND %s" " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" - ) + ) % (upper_bound(token, self.database_engine, inclusive=""),) query_after = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering > ?" - " UNION ALL" - " SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?" + " WHERE room_id = ? AND %s" " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" - ) + ) % (lower_bound(token, self.database_engine, inclusive=""),) - txn.execute( - query_before, - ( - room_id, topological_ordering, - room_id, topological_ordering, stream_ordering, - before_limit, - ) - ) + txn.execute(query_before, (room_id, before_limit)) rows = self.cursor_to_dict(txn) events_before = [r["event_id"] for r in rows] @@ -626,18 +636,11 @@ class StreamStore(SQLBaseStore): )) else: start_token = str(RoomStreamToken( - topological_ordering, - stream_ordering - 1, + token.topological, + token.stream - 1, )) - txn.execute( - query_after, - ( - room_id, topological_ordering, - room_id, topological_ordering, stream_ordering, - after_limit, - ) - ) + txn.execute(query_after, (room_id, after_limit)) rows = self.cursor_to_dict(txn) events_after = [r["event_id"] for r in rows] @@ -648,10 +651,7 @@ class StreamStore(SQLBaseStore): rows[-1]["stream_ordering"], )) else: - end_token = str(RoomStreamToken( - topological_ordering, - stream_ordering, - )) + end_token = str(token) return { "before": { From 2d21d43c34751cffb5f324bd58ceff060f65f679 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 10:28:51 +0100 Subject: [PATCH 197/414] Add purge_history API --- synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 13 +++++++++++++ synapse/rest/client/v1/admin.py | 18 ++++++++++++++++++ synapse/storage/events.py | 6 ++++++ 4 files changed, 38 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6c0bc7eafa..351b218247 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1413,7 +1413,7 @@ class FederationHandler(BaseHandler): local_view = dict(auth_events) remote_view = dict(auth_events) remote_view.update({ - (d.type, d.state_key): d for d in different_events + (d.type, d.state_key): d for d in different_events if d }) new_state, prev_state = self.state_handler.resolve_events( diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 15caf1950a..878809d50d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -50,6 +50,19 @@ class MessageHandler(BaseHandler): self.validator = EventValidator() self.snapshot_cache = SnapshotCache() + @defer.inlineCallbacks + def purge_history(self, room_id, event_id): + event = yield self.store.get_event(event_id) + + if event.room_id != room_id: + raise SynapseError(400, "Event is for wrong room.") + + depth = event.depth + + # TODO: Lock. + + yield self.store.delete_old_state(room_id, depth) + @defer.inlineCallbacks def get_messages(self, requester, room_id=None, pagin_config=None, as_client_event=True): diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index e54c472e08..71537a7d0b 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -77,6 +77,24 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) +class PurgeHistoryRestServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns( + "/admin/purge_history/(?P[^/]*)/(?P[^/]*)" + ) + + @defer.inlineCallbacks + def on_POST(self, request, room_id, event_id): + requester = yield self.auth.get_user_by_req(request) + is_admin = yield self.auth.is_server_admin(requester.user) + + if not is_admin: + raise AuthError(403, "You are not a server admin") + + yield self.handlers.message_handler.purge_history(room_id, event_id) + + defer.returnValue((200, {})) + + class DeactivateAccountRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 98c917ce15..c3b498bb3d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1281,6 +1281,12 @@ class EventsStore(SQLBaseStore): ) return self.runInteraction("get_all_new_events", get_all_new_events_txn) + def delete_old_state(self, room_id, topological_ordering): + return self.runInteraction( + "delete_old_state", + self._delete_old_state_txn, room_id, topological_ordering + ) + def _delete_old_state_txn(self, txn, room_id, topological_ordering): """Deletes old room state """ From d44d11d864714d4d99953bdae6625973519f120f Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 5 Jul 2016 10:39:13 +0100 Subject: [PATCH 198/414] Use true/false for boolean parameter inclusive to avoid potential for sqli, and possibly make the code clearer --- synapse/storage/event_push_actions.py | 2 +- synapse/storage/stream.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index e3e2e8083e..3d93285f84 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -86,7 +86,7 @@ class EventPushActionsStore(SQLBaseStore): " user_id = ?" " AND room_id = ?" " AND %s" - ) % (lower_bound(token, self.database_engine, inclusive=""),) + ) % (lower_bound(token, self.database_engine, inclusive=False),) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 23b3a40aaf..56304999dc 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -55,7 +55,8 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" -def lower_bound(token, engine, inclusive=""): +def lower_bound(token, engine, inclusive=False): + inclusive = "=" if inclusive else "" if token.topological is None: return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering") else: @@ -74,7 +75,8 @@ def lower_bound(token, engine, inclusive=""): ) -def upper_bound(token, engine, inclusive="="): +def upper_bound(token, engine, inclusive=True): + inclusive = "=" if inclusive else "" if token.topological is None: return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering") else: @@ -616,13 +618,13 @@ class StreamStore(SQLBaseStore): "SELECT topological_ordering, stream_ordering, event_id FROM events" " WHERE room_id = ? AND %s" " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" - ) % (upper_bound(token, self.database_engine, inclusive=""),) + ) % (upper_bound(token, self.database_engine, inclusive=False),) query_after = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" " WHERE room_id = ? AND %s" " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" - ) % (lower_bound(token, self.database_engine, inclusive=""),) + ) % (lower_bound(token, self.database_engine, inclusive=False),) txn.execute(query_before, (room_id, before_limit)) From 507b8bb0910ef6fae9c7d9cb1405a33c4e4b6e8e Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Tue, 5 Jul 2016 18:42:35 +0900 Subject: [PATCH 199/414] Add comment to prompt changing of pepper --- synapse/config/password.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/config/password.py b/synapse/config/password.py index ea822f2bb5..7c5cb5f0e1 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -31,6 +31,7 @@ class PasswordConfig(Config): password_config: enabled: true # Uncomment for extra security for your passwords. + # Change to a secret random string. # DO NOT CHANGE THIS AFTER INITIAL SETUP! #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9" """ \ No newline at end of file From 1ee258430724618c7014bb176186c23b0b5b06f0 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Tue, 5 Jul 2016 19:01:00 +0900 Subject: [PATCH 200/414] Fix pep8 --- synapse/config/password.py | 2 +- synapse/handlers/auth.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/config/password.py b/synapse/config/password.py index 7c5cb5f0e1..058a3a5346 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -34,4 +34,4 @@ class PasswordConfig(Config): # Change to a secret random string. # DO NOT CHANGE THIS AFTER INITIAL SETUP! #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9" - """ \ No newline at end of file + """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index fd5fadf73d..be46681c64 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -750,7 +750,8 @@ class AuthHandler(BaseHandler): Returns: Hashed password (str). """ - return bcrypt.hashpw(password + self.hs.config.password_config.pepper, bcrypt.gensalt(self.bcrypt_rounds)) + return bcrypt.hashpw(password + self.hs.config.password_config.pepper, + bcrypt.gensalt(self.bcrypt_rounds)) def validate_hash(self, password, stored_hash): """Validates that self.hash(password) == stored_hash. From 14362bf3590eb95a50201a84c8e16d5626b86249 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Tue, 5 Jul 2016 19:12:53 +0900 Subject: [PATCH 201/414] Fix password config --- synapse/config/password.py | 2 +- synapse/handlers/auth.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/config/password.py b/synapse/config/password.py index 058a3a5346..00b1ea3df9 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -23,7 +23,7 @@ class PasswordConfig(Config): def read_config(self, config): password_config = config.get("password_config", {}) self.password_enabled = password_config.get("enabled", True) - self.pepper = password_config.get("pepper", "") + self.password_pepper = password_config.get("pepper", "") def default_config(self, config_dir_path, server_name, **kwargs): return """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index be46681c64..e259213a36 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -750,7 +750,7 @@ class AuthHandler(BaseHandler): Returns: Hashed password (str). """ - return bcrypt.hashpw(password + self.hs.config.password_config.pepper, + return bcrypt.hashpw(password + self.hs.config.password_pepper, bcrypt.gensalt(self.bcrypt_rounds)) def validate_hash(self, password, stored_hash): @@ -764,7 +764,7 @@ class AuthHandler(BaseHandler): Whether self.hash(password) == stored_hash (bool). """ if stored_hash: - return bcrypt.hashpw(password + self.hs.config.password_config.pepper, + return bcrypt.hashpw(password + self.hs.config.password_pepper, stored_hash.encode('utf-8')) == stored_hash else: return False From 252ee2d979f8814ff5bd0f9acb76b9ba3ce86b52 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Tue, 5 Jul 2016 19:15:51 +0900 Subject: [PATCH 202/414] Remove default password pepper string --- synapse/config/password.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/config/password.py b/synapse/config/password.py index 00b1ea3df9..66f0d93eea 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -30,8 +30,7 @@ class PasswordConfig(Config): # Enable password for login. password_config: enabled: true - # Uncomment for extra security for your passwords. # Change to a secret random string. # DO NOT CHANGE THIS AFTER INITIAL SETUP! - #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9" + #pepper: "" """ From b6b0132ac7cac86e8cc5457783311b4db59e5870 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 5 Jul 2016 13:55:18 +0100 Subject: [PATCH 203/414] Make get_events_around more efficient on sqlite3 --- synapse/storage/stream.py | 62 +++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 56304999dc..f18fb63c5e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -40,7 +40,7 @@ from synapse.util.caches.descriptors import cached from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logcontext import preserve_fn -from synapse.storage.engines import PostgresEngine +from synapse.storage.engines import PostgresEngine, Sqlite3Engine import logging @@ -614,19 +614,55 @@ class StreamStore(SQLBaseStore): results["stream_ordering"], ) - query_before = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND %s" - " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" - ) % (upper_bound(token, self.database_engine, inclusive=False),) + if isinstance(self.database_engine, Sqlite3Engine): + # SQLite3 doesn't optimise ``(x < a) OR (x = a AND y < b)`` + # So we give pass it to SQLite3 as the UNION ALL of the two queries. - query_after = ( - "SELECT topological_ordering, stream_ordering, event_id FROM events" - " WHERE room_id = ? AND %s" - " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" - ) % (lower_bound(token, self.database_engine, inclusive=False),) + query_before = ( + "SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering < ?" + " UNION ALL" + " SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?" + " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" + ) + before_args = ( + room_id, token.topological, + room_id, token.topological, token.stream, + before_limit, + ) - txn.execute(query_before, (room_id, before_limit)) + query_after = ( + "SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering > ?" + " UNION ALL" + " SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?" + " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" + ) + after_args = ( + room_id, token.topological, + room_id, token.topological, token.stream, + after_limit, + ) + else: + query_before = ( + "SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND %s" + " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" + ) % (upper_bound(token, self.database_engine, inclusive=False),) + + before_args = (room_id, before_limit), + + query_after = ( + "SELECT topological_ordering, stream_ordering, event_id FROM events" + " WHERE room_id = ? AND %s" + " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?" + ) % (lower_bound(token, self.database_engine, inclusive=False),) + + after_args = (room_id, after_limit) + + txn.execute(query_before, before_args) rows = self.cursor_to_dict(txn) events_before = [r["event_id"] for r in rows] @@ -642,7 +678,7 @@ class StreamStore(SQLBaseStore): token.stream - 1, )) - txn.execute(query_after, (room_id, after_limit)) + txn.execute(query_after, after_args) rows = self.cursor_to_dict(txn) events_after = [r["event_id"] for r in rows] From dd2ccee27d834107e86cc18f46a5e4d4aa88d3c9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 5 Jul 2016 14:06:07 +0100 Subject: [PATCH 204/414] Fix typo --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index f18fb63c5e..c08c5b9979 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -652,7 +652,7 @@ class StreamStore(SQLBaseStore): " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?" ) % (upper_bound(token, self.database_engine, inclusive=False),) - before_args = (room_id, before_limit), + before_args = (room_id, before_limit) query_after = ( "SELECT topological_ordering, stream_ordering, event_id FROM events" From 7335f0addae9ff473403eaaffd7d2b02a9f1991f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 14:44:25 +0100 Subject: [PATCH 205/414] Add ReadWriteLock --- synapse/util/async.py | 82 +++++++++++++++++++++++++++++++++++++ tests/util/test_rwlock.py | 85 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 167 insertions(+) create mode 100644 tests/util/test_rwlock.py diff --git a/synapse/util/async.py b/synapse/util/async.py index 40be7fe7e3..c84b23ff46 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -194,3 +194,85 @@ class Linearizer(object): self.key_to_defer.pop(key, None) defer.returnValue(_ctx_manager()) + + +class ReadWriteLock(object): + """A deferred style read write lock. + + Example: + + with (yield read_write_lock.read("test_key")): + # do some work + """ + + # IMPLEMENTATION NOTES + # + # We track the most recent queued reader and writer deferreds (which get + # resolved when they release the lock). + # + # Read: We know its safe to acquire a read lock when the latest writer has + # been resolved. The new reader is appeneded to the list of latest readers. + # + # Write: We know its safe to acquire the write lock when both the latest + # writers and readers have been resolved. The new writer replaces the latest + # writer. + + def __init__(self): + # Latest readers queued + self.key_to_current_readers = {} + + # Latest writer queued + self.key_to_current_writer = {} + + @defer.inlineCallbacks + def read(self, key): + new_defer = defer.Deferred() + + curr_readers = self.key_to_current_readers.setdefault(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) + + curr_readers.add(new_defer) + + # We wait for the latest writer to finish writing. We can safely ignore + # any existing readers... as they're readers. + yield curr_writer + + @contextmanager + def _ctx_manager(): + try: + yield + finally: + new_defer.callback(None) + self.key_to_current_readers.get(key, set()).discard(new_defer) + + defer.returnValue(_ctx_manager()) + + @defer.inlineCallbacks + def write(self, key): + new_defer = defer.Deferred() + + curr_readers = self.key_to_current_readers.get(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) + + # We wait on all latest readers and writer. + to_wait_on = list(curr_readers) + if curr_writer: + to_wait_on.append(curr_writer) + + # We can clear the list of current readers since the new writer waits + # for them to finish. + curr_readers.clear() + self.key_to_current_writer[key] = new_defer + + yield defer.gatherResults(to_wait_on) + + @contextmanager + def _ctx_manager(): + try: + yield + finally: + new_defer.callback(None) + if self.key_to_current_writer[key] == new_defer: + self.key_to_current_writer.pop(key) + + defer.returnValue(_ctx_manager()) diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py new file mode 100644 index 0000000000..1d745ae1a7 --- /dev/null +++ b/tests/util/test_rwlock.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from tests import unittest + +from synapse.util.async import ReadWriteLock + + +class ReadWriteLockTestCase(unittest.TestCase): + + def _assert_called_before_not_after(self, lst, first_false): + for i, d in enumerate(lst[:first_false]): + self.assertTrue(d.called, msg="%d was unexpectedly false" % i) + + for i, d in enumerate(lst[first_false:]): + self.assertFalse( + d.called, msg="%d was unexpectedly true" % (i + first_false) + ) + + def test_rwlock(self): + rwlock = ReadWriteLock() + + key = object() + + ds = [ + rwlock.read(key), # 0 + rwlock.read(key), # 1 + rwlock.write(key), # 2 + rwlock.write(key), # 3 + rwlock.read(key), # 4 + rwlock.read(key), # 5 + rwlock.write(key), # 6 + ] + + self._assert_called_before_not_after(ds, 2) + + with ds[0].result: + self._assert_called_before_not_after(ds, 2) + self._assert_called_before_not_after(ds, 2) + + with ds[1].result: + self._assert_called_before_not_after(ds, 2) + self._assert_called_before_not_after(ds, 3) + + with ds[2].result: + self._assert_called_before_not_after(ds, 3) + self._assert_called_before_not_after(ds, 4) + + with ds[3].result: + self._assert_called_before_not_after(ds, 4) + self._assert_called_before_not_after(ds, 6) + + with ds[5].result: + self._assert_called_before_not_after(ds, 6) + self._assert_called_before_not_after(ds, 6) + + with ds[4].result: + self._assert_called_before_not_after(ds, 6) + self._assert_called_before_not_after(ds, 7) + + with ds[6].result: + pass + + d = rwlock.write(key) + self.assertTrue(d.called) + with d.result: + pass + + d = rwlock.read(key) + self.assertTrue(d.called) + with d.result: + pass From 8f8798bc0d572af103274fc07d3adac67ce7f51a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 15:30:25 +0100 Subject: [PATCH 206/414] Add ReadWriteLock for pagination and history prune --- synapse/handlers/message.py | 76 +++++++++++++++++++------------------ synapse/storage/stream.py | 4 +- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 878809d50d..ad2753c1b5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -26,7 +26,7 @@ from synapse.types import ( UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id ) from synapse.util import unwrapFirstError -from synapse.util.async import concurrently_execute, run_on_reactor +from synapse.util.async import concurrently_execute, run_on_reactor, ReadWriteLock from synapse.util.caches.snapshot_cache import SnapshotCache from synapse.util.logcontext import preserve_fn from synapse.visibility import filter_events_for_client @@ -50,6 +50,8 @@ class MessageHandler(BaseHandler): self.validator = EventValidator() self.snapshot_cache = SnapshotCache() + self.pagination_lock = ReadWriteLock() + @defer.inlineCallbacks def purge_history(self, room_id, event_id): event = yield self.store.get_event(event_id) @@ -59,9 +61,8 @@ class MessageHandler(BaseHandler): depth = event.depth - # TODO: Lock. - - yield self.store.delete_old_state(room_id, depth) + with (yield self.pagination_lock.write(room_id)): + yield self.store.delete_old_state(room_id, depth) @defer.inlineCallbacks def get_messages(self, requester, room_id=None, pagin_config=None, @@ -98,42 +99,43 @@ class MessageHandler(BaseHandler): source_config = pagin_config.get_source_config("room") - membership, member_event_id = yield self._check_in_room_or_world_readable( - room_id, user_id - ) - - if source_config.direction == 'b': - # if we're going backwards, we might need to backfill. This - # requires that we have a topo token. - if room_token.topological: - max_topo = room_token.topological - else: - max_topo = yield self.store.get_max_topological_token_for_stream_and_room( - room_id, room_token.stream - ) - - if membership == Membership.LEAVE: - # If they have left the room then clamp the token to be before - # they left the room, to save the effort of loading from the - # database. - leave_token = yield self.store.get_topological_token_for_event( - member_event_id - ) - leave_token = RoomStreamToken.parse(leave_token) - if leave_token.topological < max_topo: - source_config.from_key = str(leave_token) - - yield self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, max_topo + with (yield self.pagination_lock.read(room_id)): + membership, member_event_id = yield self._check_in_room_or_world_readable( + room_id, user_id ) - events, next_key = yield data_source.get_pagination_rows( - requester.user, source_config, room_id - ) + if source_config.direction == 'b': + # if we're going backwards, we might need to backfill. This + # requires that we have a topo token. + if room_token.topological: + max_topo = room_token.topological + else: + max_topo = yield self.store.get_max_topological_token( + room_id, room_token.stream + ) - next_token = pagin_config.from_token.copy_and_replace( - "room_key", next_key - ) + if membership == Membership.LEAVE: + # If they have left the room then clamp the token to be before + # they left the room, to save the effort of loading from the + # database. + leave_token = yield self.store.get_topological_token_for_event( + member_event_id + ) + leave_token = RoomStreamToken.parse(leave_token) + if leave_token.topological < max_topo: + source_config.from_key = str(leave_token) + + yield self.hs.get_handlers().federation_handler.maybe_backfill( + room_id, max_topo + ) + + events, next_key = yield data_source.get_pagination_rows( + requester.user, source_config, room_id + ) + + next_token = pagin_config.from_token.copy_and_replace( + "room_key", next_key + ) if not events: defer.returnValue({ diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index b9ad965fd6..3dda2dab55 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -487,13 +487,13 @@ class StreamStore(SQLBaseStore): row["topological_ordering"], row["stream_ordering"],) ) - def get_max_topological_token_for_stream_and_room(self, room_id, stream_key): + def get_max_topological_token(self, room_id, stream_key): sql = ( "SELECT max(topological_ordering) FROM events" " WHERE room_id = ? AND stream_ordering < ?" ) return self._execute( - "get_max_topological_token_for_stream_and_room", None, + "get_max_topological_token", None, sql, room_id, stream_key, ).addCallback( lambda r: r[0][0] if r else 0 From caf33b2d9be1b992098a00ee61cf4b4009ee3a09 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 17:18:19 +0100 Subject: [PATCH 207/414] Protect password when registering using shared secret --- scripts/register_new_matrix_user | 11 ++++++++--- synapse/rest/client/v1/register.py | 11 +++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 27a6250b14..6d055fd012 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -25,12 +25,17 @@ import urllib2 import yaml -def request_registration(user, password, server_location, shared_secret): +def request_registration(user, password, server_location, shared_secret, admin=False): mac = hmac.new( key=shared_secret, - msg=user, digestmod=hashlib.sha1, - ).hexdigest() + ) + + mac.update(user) + mac.update(password) + mac.update("admin" if admin else "notadmin") + + mac = mac.hexdigest() data = { "user": user, diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index d791d5e07e..0eb7490e5d 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -324,6 +324,8 @@ class RegisterRestServlet(ClientV1RestServlet): raise SynapseError(400, "Shared secret registration is not enabled") user = register_json["user"].encode("utf-8") + password = register_json["password"].encode("utf-8") + admin = register_json.get("admin", None) # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface @@ -331,11 +333,12 @@ class RegisterRestServlet(ClientV1RestServlet): want_mac = hmac.new( key=self.hs.config.registration_shared_secret, - msg=user, digestmod=sha1, - ).hexdigest() - - password = register_json["password"].encode("utf-8") + ) + want_mac.update(user) + want_mac.update(password) + want_mac.update("admin" if admin else "notadmin") + want_mac = want_mac.hexdigest() if compare_digest(want_mac, got_mac): handler = self.handlers.registration_handler From 651faee698d5ff4806d1e0e7f5cd4c438bf434f1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 17:30:22 +0100 Subject: [PATCH 208/414] Add an admin option to shared secret registration --- scripts/register_new_matrix_user | 19 ++++++++-- synapse/handlers/register.py | 4 +- synapse/rest/client/v1/register.py | 1 + synapse/storage/registration.py | 61 +++++++++++++++++++----------- 4 files changed, 58 insertions(+), 27 deletions(-) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 6d055fd012..987bf32d1c 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -42,6 +42,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F "password": password, "mac": mac, "type": "org.matrix.login.shared_secret", + "admin": admin, } server_location = server_location.rstrip("/") @@ -73,7 +74,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F sys.exit(1) -def register_new_user(user, password, server_location, shared_secret): +def register_new_user(user, password, server_location, shared_secret, admin): if not user: try: default_user = getpass.getuser() @@ -104,7 +105,14 @@ def register_new_user(user, password, server_location, shared_secret): print "Passwords do not match" sys.exit(1) - request_registration(user, password, server_location, shared_secret) + if not admin: + admin = raw_input("Make admin [no]: ") + if admin in ("y", "yes", "true"): + admin = True + else: + admin = False + + request_registration(user, password, server_location, shared_secret, bool(admin)) if __name__ == "__main__": @@ -124,6 +132,11 @@ if __name__ == "__main__": default=None, help="New password for user. Will prompt if omitted.", ) + parser.add_argument( + "-a", "--admin", + action="store_true", + help="Register new user as an admin. Will prompt if omitted.", + ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( @@ -156,4 +169,4 @@ if __name__ == "__main__": else: secret = args.shared_secret - register_new_user(args.user, args.password, args.server_url, secret) + register_new_user(args.user, args.password, args.server_url, secret, args.admin) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 88c82ba7d0..8c3381df8a 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -90,7 +90,8 @@ class RegistrationHandler(BaseHandler): password=None, generate_token=True, guest_access_token=None, - make_guest=False + make_guest=False, + admin=False, ): """Registers a new client on the server. @@ -141,6 +142,7 @@ class RegistrationHandler(BaseHandler): # If the user was a guest then they already have a profile None if was_guest else user.localpart ), + admin=admin, ) else: # autogen a sequential user ID diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 0eb7490e5d..25d63a0b0b 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -345,6 +345,7 @@ class RegisterRestServlet(ClientV1RestServlet): user_id, token = yield handler.register( localpart=user, password=password, + admin=bool(admin), ) self._remove_session(session) defer.returnValue({ diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 5c75dbab51..4999175ddb 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -77,7 +77,7 @@ class RegistrationStore(SQLBaseStore): @defer.inlineCallbacks def register(self, user_id, token, password_hash, was_guest=False, make_guest=False, appservice_id=None, - create_profile_with_localpart=None): + create_profile_with_localpart=None, admin=False): """Attempts to register an account. Args: @@ -104,6 +104,7 @@ class RegistrationStore(SQLBaseStore): make_guest, appservice_id, create_profile_with_localpart, + admin ) self.get_user_by_id.invalidate((user_id,)) self.is_guest.invalidate((user_id,)) @@ -118,6 +119,7 @@ class RegistrationStore(SQLBaseStore): make_guest, appservice_id, create_profile_with_localpart, + admin, ): now = int(self.clock.time()) @@ -125,29 +127,42 @@ class RegistrationStore(SQLBaseStore): try: if was_guest: - txn.execute("UPDATE users SET" - " password_hash = ?," - " upgrade_ts = ?," - " is_guest = ?" - " WHERE name = ?", - [password_hash, now, 1 if make_guest else 0, user_id]) + txn.execute( + "UPDATE users SET" + " password_hash = ?," + " upgrade_ts = ?," + " is_guest = ?," + " admin = ?" + " WHERE name = ?", + (password_hash, now, 1 if make_guest else 0, admin, user_id,) + ) + self._simple_update_one_txn( + txn, + "users", + keyvalues={ + "name": user_id, + }, + updatevalues={ + "password_hash": password_hash, + "upgrade_ts": now, + "is_guest": 1 if make_guest else 0, + "appservice_id": appservice_id, + "admin": admin, + } + ) else: - txn.execute("INSERT INTO users " - "(" - " name," - " password_hash," - " creation_ts," - " is_guest," - " appservice_id" - ") " - "VALUES (?,?,?,?,?)", - [ - user_id, - password_hash, - now, - 1 if make_guest else 0, - appservice_id, - ]) + self._simple_insert_txn( + txn, + "users", + values={ + "name": user_id, + "password_hash": password_hash, + "creation_ts": now, + "is_guest": 1 if make_guest else 0, + "appservice_id": appservice_id, + "admin": admin, + } + ) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE From 4adf93e0f743338c929860a1384beabeae9fded8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 17:34:25 +0100 Subject: [PATCH 209/414] Fix for postgres --- synapse/storage/registration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4999175ddb..232dcfd9eb 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -147,7 +147,7 @@ class RegistrationStore(SQLBaseStore): "upgrade_ts": now, "is_guest": 1 if make_guest else 0, "appservice_id": appservice_id, - "admin": admin, + "admin": 1 if admin else 0, } ) else: @@ -160,7 +160,7 @@ class RegistrationStore(SQLBaseStore): "creation_ts": now, "is_guest": 1 if make_guest else 0, "appservice_id": appservice_id, - "admin": admin, + "admin": 1 if admin else 0, } ) except self.database_engine.module.IntegrityError: From be3548f7e14f411b0bb4d176ea0977672ed58252 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Jul 2016 17:46:51 +0100 Subject: [PATCH 210/414] Remove spurious txn --- synapse/storage/registration.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 232dcfd9eb..0a68341494 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -127,15 +127,6 @@ class RegistrationStore(SQLBaseStore): try: if was_guest: - txn.execute( - "UPDATE users SET" - " password_hash = ?," - " upgrade_ts = ?," - " is_guest = ?," - " admin = ?" - " WHERE name = ?", - (password_hash, now, 1 if make_guest else 0, admin, user_id,) - ) self._simple_update_one_txn( txn, "users", From 896bc6cd464c4e2807a6751bd2de8039bbe1fc63 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Wed, 6 Jul 2016 12:17:54 +0900 Subject: [PATCH 211/414] Update hash_password script Signed-off-by: Kent Shikama --- scripts/hash_password | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/scripts/hash_password b/scripts/hash_password index e784600989..215ab25cfe 100755 --- a/scripts/hash_password +++ b/scripts/hash_password @@ -1,10 +1,16 @@ #!/usr/bin/env python import argparse + +import sys + import bcrypt import getpass +import yaml + bcrypt_rounds=12 +password_pepper = "" def prompt_for_pass(): password = getpass.getpass("Password: ") @@ -28,12 +34,22 @@ if __name__ == "__main__": default=None, help="New password for user. Will prompt if omitted.", ) + parser.add_argument( + "-c", "--config", + type=argparse.FileType('r'), + help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.", + ) args = parser.parse_args() + if "config" in args and args.config: + config = yaml.safe_load(args.config) + bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) + password_config = config.get("password_config", {}) + password_pepper = password_config.get("pepper", password_pepper) password = args.password if not password: password = prompt_for_pass() - print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds)) + print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds)) From 8d9a884cee0b3ee5b18b0d037592bb9e5c3ae943 Mon Sep 17 00:00:00 2001 From: Kent Shikama Date: Wed, 6 Jul 2016 12:18:19 +0900 Subject: [PATCH 212/414] Update password config comment Signed-off-by: Kent Shikama --- synapse/config/password.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/password.py b/synapse/config/password.py index 66f0d93eea..a4bd171399 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py @@ -30,7 +30,7 @@ class PasswordConfig(Config): # Enable password for login. password_config: enabled: true - # Change to a secret random string. + # Uncomment and change to a secret random string for extra security. # DO NOT CHANGE THIS AFTER INITIAL SETUP! #pepper: "" """ From 0da24cac8bde47961396f7da774d8dc8ed847107 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Jul 2016 11:04:44 +0100 Subject: [PATCH 213/414] Add null separator to hmac --- scripts/register_new_matrix_user | 2 ++ synapse/rest/client/v1/register.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 987bf32d1c..12ed20d623 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -32,7 +32,9 @@ def request_registration(user, password, server_location, shared_secret, admin=F ) mac.update(user) + mac.update("\x00") mac.update(password) + mac.update("\x00") mac.update("admin" if admin else "notadmin") mac = mac.hexdigest() diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 25d63a0b0b..83872f5f60 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -336,7 +336,9 @@ class RegisterRestServlet(ClientV1RestServlet): digestmod=sha1, ) want_mac.update(user) + want_mac.update("\x00") want_mac.update(password) + want_mac.update("\x00") want_mac.update("admin" if admin else "notadmin") want_mac = want_mac.hexdigest() From 76b18df3d95cd881017a9aa5c8473409928faecd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Jul 2016 11:16:10 +0100 Subject: [PATCH 214/414] Check that there are no null bytes in user and passsword --- synapse/rest/client/v1/register.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 83872f5f60..ce7099b18f 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -327,6 +327,12 @@ class RegisterRestServlet(ClientV1RestServlet): password = register_json["password"].encode("utf-8") admin = register_json.get("admin", None) + # Its important to check as we use null bytes as HMAC field separators + if "\x00" in user: + raise SynapseError(400, "Invalid user") + if "\x00" in password: + raise SynapseError(400, "Invalid password") + # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface got_mac = str(register_json["mac"]) From 67f2c901ea4196d869380c1c5cdd8569934857ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Jul 2016 15:56:59 +0100 Subject: [PATCH 215/414] Add rest servlet. Fix SQL. --- synapse/rest/client/v1/admin.py | 1 + synapse/storage/events.py | 9 ++++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 71537a7d0b..b0cb31a448 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -124,3 +124,4 @@ def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) + PurgeHistoryRestServlet(hs).register(http_server) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c3b498bb3d..23ebd5d4c5 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1384,10 +1384,6 @@ class EventsStore(SQLBaseStore): (event_id,) for event_id, state_key in event_rows if state_key is None and not self.hs.is_mine_id(event_id) ] - to_not_delete = [ - (event_id,) for event_id, state_key in event_rows - if state_key is not None or self.hs.is_mine_id(event_id) - ] for table in ( "events", "event_json", @@ -1424,7 +1420,10 @@ class EventsStore(SQLBaseStore): txn.executemany( "UPDATE events SET outlier = ?" " WHERE event_id = ?", - to_not_delete + [ + (True, event_id,) for event_id, state_key in event_rows + if state_key is not None or self.hs.is_mine_id(event_id) + ] ) From c98e1479bd39a64add0456299644e96480151625 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Jul 2016 11:41:07 +0100 Subject: [PATCH 216/414] Return 400 rather than 500 --- synapse/storage/events.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 23ebd5d4c5..c2136f3fd1 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -23,6 +23,7 @@ from synapse.util.async import ObservableDeferred from synapse.util.logcontext import preserve_fn, PreserveLoggingContext from synapse.util.logutils import log_function from synapse.api.constants import EventTypes +from synapse.api.errors import SynapseError from canonicaljson import encode_canonical_json from collections import deque, namedtuple @@ -1324,7 +1325,9 @@ class EventsStore(SQLBaseStore): max_depth = max(row[0] for row in rows) if max_depth <= topological_ordering: - raise Exception("topological_ordering is greater than forward extremeties") + raise SynapseError( + 400, "topological_ordering is greater than forward extremeties" + ) txn.execute( "SELECT event_id, state_key FROM events" From b92e7955be10209fdd13cdb799b1ac55c981d086 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Jul 2016 11:42:15 +0100 Subject: [PATCH 217/414] Comment --- synapse/storage/events.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c2136f3fd1..b582942164 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1325,6 +1325,9 @@ class EventsStore(SQLBaseStore): max_depth = max(row[0] for row in rows) if max_depth <= topological_ordering: + # We need to ensure we don't delete all the events from the datanase + # otherwise we wouldn't be able to send any events (due to not + # having any backwards extremeties) raise SynapseError( 400, "topological_ordering is greater than forward extremeties" ) From 067596d341a661e008195f7f3a6887ade7cafa32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Jul 2016 16:11:37 +0100 Subject: [PATCH 218/414] Fix bug where we did not correctly explode when multiple user_ids were set in macaroon --- synapse/api/auth.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 31e1abb964..a4d658a9d0 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -637,17 +637,22 @@ class Auth(object): try: macaroon = pymacaroons.Macaroon.deserialize(macaroon_str) - self.validate_macaroon(macaroon, rights, self.hs.config.expire_access_token) - user_prefix = "user_id = " user = None + user_id = None guest = False for caveat in macaroon.caveats: if caveat.caveat_id.startswith(user_prefix): - user = UserID.from_string(caveat.caveat_id[len(user_prefix):]) + user_id = caveat.caveat_id[len(user_prefix):] + user = UserID.from_string(user_id) elif caveat.caveat_id == "guest = true": guest = True + self.validate_macaroon( + macaroon, rights, self.hs.config.expire_access_token, + user_id=user_id, + ) + if user is None: raise AuthError( self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon", @@ -692,7 +697,7 @@ class Auth(object): errcode=Codes.UNKNOWN_TOKEN ) - def validate_macaroon(self, macaroon, type_string, verify_expiry): + def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): """ validate that a Macaroon is understood by and was signed by this server. @@ -707,7 +712,7 @@ class Auth(object): v = pymacaroons.Verifier() v.satisfy_exact("gen = 1") v.satisfy_exact("type = " + type_string) - v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.satisfy_exact("user_id = %s" % user_id) v.satisfy_exact("guest = true") if verify_expiry: v.satisfy_general(self._verify_expiry) From f90cf150e2b51124bb6848980394c4368e0de73a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Jul 2016 16:33:00 +0100 Subject: [PATCH 219/414] Bump version and changelog --- CHANGES.rst | 8 ++++++++ synapse/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index ecaaa189d0..e1d5e876dc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,11 @@ +Changes in synapse v0.16.1-r1 (2016-07-08) +========================================== + +THIS IS A CRITICAL SECURITY UPDATE. + +This fixes a bug which allowed users' accounts to be accessed by unauthorised +users. + Changes in synapse v0.16.1 (2016-06-20) ======================================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 3cd79b1247..2750ad3f7a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.1" +__version__ = "0.16.1-r1" From 10c843fcfbd6c3f6bcc13c5b9c71c9007ee54480 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 Jul 2016 15:15:55 +0100 Subject: [PATCH 220/414] Ensure that the guest user is in the database when upgrading accounts --- synapse/storage/registration.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 0a68341494..3a675e53f6 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -127,11 +127,24 @@ class RegistrationStore(SQLBaseStore): try: if was_guest: + # Ensure that the guest user actually exists + self._simple_select_one_txn( + txn, + "users", + keyvalues={ + "name": user_id, + "is_guest": 1, + }, + retcols=("name",), + allow_none=False, + ) + self._simple_update_one_txn( txn, "users", keyvalues={ "name": user_id, + "is_guest": 1, }, updatevalues={ "password_hash": password_hash, From dfde67a6fe22535558552060820abfca047540f3 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 8 Jul 2016 15:57:06 +0100 Subject: [PATCH 221/414] Add a comment explaining allow_none --- synapse/storage/registration.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 3a675e53f6..d957a629dc 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -128,6 +128,8 @@ class RegistrationStore(SQLBaseStore): try: if was_guest: # Ensure that the guest user actually exists + # ``allow_none=False`` makes this raise an exception + # if the row isn't in the database. self._simple_select_one_txn( txn, "users", From 385aec401015b12b763f630abf48ad2b8b30649c Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 8 Jul 2016 17:42:48 +0100 Subject: [PATCH 222/414] Implement https://github.com/matrix-org/matrix-doc/pull/346/files --- synapse/api/errors.py | 1 + synapse/rest/client/v2_alpha/account.py | 59 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index b219b46a4b..0041646858 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -43,6 +43,7 @@ class Codes(object): EXCLUSIVE = "M_EXCLUSIVE" THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED" THREEPID_IN_USE = "M_THREEPID_IN_USE" + THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND" INVALID_USERNAME = "M_INVALID_USERNAME" SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED" diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 9a84873a5f..1c37f91312 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -36,11 +36,16 @@ class PasswordRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() + self.identity_handler = hs.get_handlers().identity_handler @defer.inlineCallbacks def on_POST(self, request): yield run_on_reactor() + if '/account/password/email/requestToken' in request.path: + ret = yield self.onPasswordEmailTokenRequest(request) + defer.returnValue(ret) + body = parse_json_object_from_request(request) authed, result, params, _ = yield self.auth_handler.check_auth([ @@ -85,6 +90,29 @@ class PasswordRestServlet(RestServlet): defer.returnValue((200, {})) + @defer.inlineCallbacks + def onPasswordEmailTokenRequest(self, request): + body = parse_json_object_from_request(request) + + required = ['id_server', 'client_secret', 'email', 'send_attempt'] + absent = [] + for k in required: + if k not in body: + absent.append(k) + + if len(absent) > 0: + raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) + + existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + 'email', body['email'] + ) + + if existingUid is None: + raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) + + ret = yield self.identity_handler.requestEmailToken(**body) + defer.returnValue((200, ret)) + def on_OPTIONS(self, _): return 200, {} @@ -115,6 +143,10 @@ class ThreepidRestServlet(RestServlet): def on_POST(self, request): yield run_on_reactor() + if '/account/3pid/email/requestToken' in request.path: + ret = yield self.onThreepidEmailTokenRequest(request) + defer.returnValue(ret) + body = parse_json_object_from_request(request) threePidCreds = body.get('threePidCreds') @@ -155,6 +187,33 @@ class ThreepidRestServlet(RestServlet): defer.returnValue((200, {})) + @defer.inlineCallbacks + def onThreepidEmailTokenRequest(self, request): + body = parse_json_object_from_request(request) + + logger.error("hi") + + required = ['id_server', 'client_secret', 'email', 'send_attempt'] + absent = [] + for k in required: + if k not in body: + absent.append(k) + + if len(absent) > 0: + raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) + + existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + 'email', body['email'] + ) + + logger.error("existing %r", existingUid) + + if existingUid is not None: + raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) + + ret = yield self.identity_handler.requestEmailToken(**body) + defer.returnValue((200, ret)) + def register_servlets(hs, http_server): PasswordRestServlet(hs).register(http_server) From 9c491366c51b2a0ed23e1f3ead80b7ac4307d46f Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 11 Jul 2016 09:07:40 +0100 Subject: [PATCH 223/414] Oops, remove debug logging --- synapse/rest/client/v2_alpha/account.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 1c37f91312..e2bbfc9d93 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -191,8 +191,6 @@ class ThreepidRestServlet(RestServlet): def onThreepidEmailTokenRequest(self, request): body = parse_json_object_from_request(request) - logger.error("hi") - required = ['id_server', 'client_secret', 'email', 'send_attempt'] absent = [] for k in required: @@ -206,8 +204,6 @@ class ThreepidRestServlet(RestServlet): 'email', body['email'] ) - logger.error("existing %r", existingUid) - if existingUid is not None: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) From a5db0026ede13159e340db8612bf4cafba8f6ab6 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 11 Jul 2016 09:57:07 +0100 Subject: [PATCH 224/414] Separate out requestTokens to separate handlers --- synapse/rest/client/v2_alpha/account.py | 93 ++++++++++++++---------- synapse/rest/client/v2_alpha/register.py | 65 ++++++++++------- 2 files changed, 93 insertions(+), 65 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e2bbfc9d93..8a53617629 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -28,24 +28,54 @@ import logging logger = logging.getLogger(__name__) +class PasswordRequestTokenRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/account/password/email/requestToken$") + + def __init__(self, hs): + super(PasswordRequestTokenRestServlet, self).__init__() + self.hs = hs + self.identity_handler = hs.get_handlers().identity_handler + + @defer.inlineCallbacks + def on_POST(self, request): + body = parse_json_object_from_request(request) + + required = ['id_server', 'client_secret', 'email', 'send_attempt'] + absent = [] + for k in required: + if k not in body: + absent.append(k) + + if len(absent) > 0: + raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) + + existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + 'email', body['email'] + ) + + if existingUid is None: + raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) + + ret = yield self.identity_handler.requestEmailToken(**body) + defer.returnValue((200, ret)) + + def on_OPTIONS(self, _): + return 200, {} + + class PasswordRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password") + PATTERNS = client_v2_patterns("/account/password$") def __init__(self, hs): super(PasswordRestServlet, self).__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - self.identity_handler = hs.get_handlers().identity_handler @defer.inlineCallbacks def on_POST(self, request): yield run_on_reactor() - if '/account/password/email/requestToken' in request.path: - ret = yield self.onPasswordEmailTokenRequest(request) - defer.returnValue(ret) - body = parse_json_object_from_request(request) authed, result, params, _ = yield self.auth_handler.check_auth([ @@ -90,8 +120,20 @@ class PasswordRestServlet(RestServlet): defer.returnValue((200, {})) + def on_OPTIONS(self, _): + return 200, {} + + +class ThreepidRequestTokenRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") + + def __init__(self, hs): + self.hs = hs + super(ThreepidRequestTokenRestServlet, self).__init__() + self.identity_handler = hs.get_handlers().identity_handler + @defer.inlineCallbacks - def onPasswordEmailTokenRequest(self, request): + def on_POST(self, request): body = parse_json_object_from_request(request) required = ['id_server', 'client_secret', 'email', 'send_attempt'] @@ -107,8 +149,10 @@ class PasswordRestServlet(RestServlet): 'email', body['email'] ) - if existingUid is None: - raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) + logger.error("existing %r", existingUid) + + if existingUid is not None: + raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) defer.returnValue((200, ret)) @@ -118,7 +162,7 @@ class PasswordRestServlet(RestServlet): class ThreepidRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid") + PATTERNS = client_v2_patterns("/account/3pid$") def __init__(self, hs): super(ThreepidRestServlet, self).__init__() @@ -143,10 +187,6 @@ class ThreepidRestServlet(RestServlet): def on_POST(self, request): yield run_on_reactor() - if '/account/3pid/email/requestToken' in request.path: - ret = yield self.onThreepidEmailTokenRequest(request) - defer.returnValue(ret) - body = parse_json_object_from_request(request) threePidCreds = body.get('threePidCreds') @@ -187,30 +227,9 @@ class ThreepidRestServlet(RestServlet): defer.returnValue((200, {})) - @defer.inlineCallbacks - def onThreepidEmailTokenRequest(self, request): - body = parse_json_object_from_request(request) - - required = ['id_server', 'client_secret', 'email', 'send_attempt'] - absent = [] - for k in required: - if k not in body: - absent.append(k) - - if len(absent) > 0: - raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) - - existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( - 'email', body['email'] - ) - - if existingUid is not None: - raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - - ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) - def register_servlets(hs, http_server): + PasswordRequestTokenRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) + ThreepidRequestTokenRestServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2088c316d1..e5944b99b1 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -41,8 +41,43 @@ else: logger = logging.getLogger(__name__) +class RegisterRequestTokenRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/register/email/requestToken$") + + def __init__(self, hs): + super(RegisterRequestTokenRestServlet, self).__init__() + self.hs = hs + self.identity_handler = hs.get_handlers().identity_handler + + @defer.inlineCallbacks + def on_POST(self, request): + body = parse_json_object_from_request(request) + + required = ['id_server', 'client_secret', 'email', 'send_attempt'] + absent = [] + for k in required: + if k not in body: + absent.append(k) + + if len(absent) > 0: + raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) + + existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( + 'email', body['email'] + ) + + if existingUid is not None: + raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) + + ret = yield self.identity_handler.requestEmailToken(**body) + defer.returnValue((200, ret)) + + def on_OPTIONS(self, _): + return 200, {} + + class RegisterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register") + PATTERNS = client_v2_patterns("/register$") def __init__(self, hs): super(RegisterRestServlet, self).__init__() @@ -70,10 +105,6 @@ class RegisterRestServlet(RestServlet): "Do not understand membership kind: %s" % (kind,) ) - if '/register/email/requestToken' in request.path: - ret = yield self.onEmailTokenRequest(request) - defer.returnValue(ret) - body = parse_json_object_from_request(request) # we do basic sanity checks here because the auth layer will store these @@ -305,29 +336,6 @@ class RegisterRestServlet(RestServlet): "refresh_token": refresh_token, }) - @defer.inlineCallbacks - def onEmailTokenRequest(self, request): - body = parse_json_object_from_request(request) - - required = ['id_server', 'client_secret', 'email', 'send_attempt'] - absent = [] - for k in required: - if k not in body: - absent.append(k) - - if len(absent) > 0: - raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) - - existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( - 'email', body['email'] - ) - - if existingUid is not None: - raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - - ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) - @defer.inlineCallbacks def _do_guest_registration(self): if not self.hs.config.allow_guest_access: @@ -345,4 +353,5 @@ class RegisterRestServlet(RestServlet): def register_servlets(hs, http_server): + RegisterRequestTokenRestServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server) From 75fa7f6b3ceae5cf1eeda8f28149796eecdcd133 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 12 Jul 2016 14:08:57 +0100 Subject: [PATCH 225/414] Remove other debug logging --- synapse/rest/client/v2_alpha/account.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 8a53617629..d85b2d08aa 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -149,8 +149,6 @@ class ThreepidRequestTokenRestServlet(RestServlet): 'email', body['email'] ) - logger.error("existing %r", existingUid) - if existingUid is not None: raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) From aaa9d9f0e1d761655c4976a45a76dfba31f067de Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 12 Jul 2016 14:13:14 +0100 Subject: [PATCH 226/414] on_OPTIONS isn't neccessary --- synapse/rest/client/v2_alpha/account.py | 8 +------- synapse/rest/client/v2_alpha/register.py | 3 --- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index d85b2d08aa..64e9ae0c45 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -46,7 +46,7 @@ class PasswordRequestTokenRestServlet(RestServlet): if k not in body: absent.append(k) - if len(absent) > 0: + if absent: raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -59,9 +59,6 @@ class PasswordRequestTokenRestServlet(RestServlet): ret = yield self.identity_handler.requestEmailToken(**body) defer.returnValue((200, ret)) - def on_OPTIONS(self, _): - return 200, {} - class PasswordRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/password$") @@ -155,9 +152,6 @@ class ThreepidRequestTokenRestServlet(RestServlet): ret = yield self.identity_handler.requestEmailToken(**body) defer.returnValue((200, ret)) - def on_OPTIONS(self, _): - return 200, {} - class ThreepidRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/3pid$") diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index e5944b99b1..7c6d2942dc 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -72,9 +72,6 @@ class RegisterRequestTokenRestServlet(RestServlet): ret = yield self.identity_handler.requestEmailToken(**body) defer.returnValue((200, ret)) - def on_OPTIONS(self, _): - return 200, {} - class RegisterRestServlet(RestServlet): PATTERNS = client_v2_patterns("/register$") From c55ad2e3755487727e8760e7aab2fc21182d5948 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 12 Jul 2016 14:15:10 +0100 Subject: [PATCH 227/414] be more pythonic --- synapse/rest/client/v2_alpha/account.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 64e9ae0c45..47f78eba8c 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -139,7 +139,7 @@ class ThreepidRequestTokenRestServlet(RestServlet): if k not in body: absent.append(k) - if len(absent) > 0: + if absent: raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( From 560c71c7352946f70f58d6fc3d0c459084127b21 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Jul 2016 13:07:19 +0100 Subject: [PATCH 228/414] Check creation event's room_id domain matches sender's --- synapse/api/auth.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a4d658a9d0..29b4ac456c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -86,6 +86,13 @@ class Auth(object): return True if event.type == EventTypes.Create: + room_id_domain = get_domain_from_id(event.room_id) + sender_domain = get_domain_from_id(event.sender) + if room_id_domain != sender_domain: + raise AuthError( + 403, + "Creation event's room_id domain does not match sender's" + ) # FIXME return True From 2cb758ac75e529d9d093122a207ec43bcfa5f067 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Jul 2016 13:12:25 +0100 Subject: [PATCH 229/414] Check if alias event's state_key matches sender's domain --- synapse/api/auth.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 29b4ac456c..e05defd7d8 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -115,6 +115,17 @@ class Auth(object): # FIXME: Temp hack if event.type == EventTypes.Aliases: + if not event.state_key: + raise AuthError( + 403, + "Alias event must have non-empty state_key" + ) + sender_domain = get_domain_from_id(event.sender) + if event.state_key != sender_domain: + raise AuthError( + 403, + "Alias event's state_key does not match sender's domain" + ) return True logger.debug( From 0136a522b18a734db69171d60566f501c0ced663 Mon Sep 17 00:00:00 2001 From: Negar Fazeli Date: Fri, 8 Jul 2016 16:53:18 +0200 Subject: [PATCH 230/414] Bug fix: expire invalid access tokens --- synapse/api/auth.py | 3 +++ synapse/handlers/auth.py | 5 +++-- synapse/handlers/register.py | 6 +++--- synapse/rest/client/v1/register.py | 2 +- tests/api/test_auth.py | 31 +++++++++++++++++++++++++++++- tests/handlers/test_register.py | 4 ++-- 6 files changed, 42 insertions(+), 9 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a4d658a9d0..521a52e001 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -629,7 +629,10 @@ class Auth(object): except AuthError: # TODO(daniel): Remove this fallback when all existing access tokens # have been re-issued as macaroons. + if self.hs.config.expire_access_token: + raise ret = yield self._look_up_user_by_access_token(token) + defer.returnValue(ret) @defer.inlineCallbacks diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index e259213a36..5a0ed9d6b9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -637,12 +637,13 @@ class AuthHandler(BaseHandler): yield self.store.add_refresh_token_to_user(user_id, refresh_token) defer.returnValue(refresh_token) - def generate_access_token(self, user_id, extra_caveats=None): + def generate_access_token(self, user_id, extra_caveats=None, + duration_in_ms=(60 * 60 * 1000)): extra_caveats = extra_caveats or [] macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = access") now = self.hs.get_clock().time_msec() - expiry = now + (60 * 60 * 1000) + expiry = now + duration_in_ms macaroon.add_first_party_caveat("time < %d" % (expiry,)) for caveat in extra_caveats: macaroon.add_first_party_caveat(caveat) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 8c3381df8a..6b33b27149 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -360,7 +360,7 @@ class RegistrationHandler(BaseHandler): defer.returnValue(data) @defer.inlineCallbacks - def get_or_create_user(self, localpart, displayname, duration_seconds, + def get_or_create_user(self, localpart, displayname, duration_in_ms, password_hash=None): """Creates a new user if the user does not exist, else revokes all previous access tokens and generates a new one. @@ -390,8 +390,8 @@ class RegistrationHandler(BaseHandler): user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - token = self.auth_handler().generate_short_term_login_token( - user_id, duration_seconds) + token = self.auth_handler().generate_access_token( + user_id, None, duration_in_ms) if need_register: yield self.store.register( diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index ce7099b18f..8e1f1b7845 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -429,7 +429,7 @@ class CreateUserRestServlet(ClientV1RestServlet): user_id, token = yield handler.get_or_create_user( localpart=localpart, displayname=displayname, - duration_seconds=duration_seconds, + duration_in_ms=(duration_seconds * 1000), password_hash=password_hash ) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ad269af0ec..960c23d631 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -281,7 +281,7 @@ class AuthTestCase(unittest.TestCase): macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user,)) - macaroon.add_first_party_caveat("time < 1") # ms + macaroon.add_first_party_caveat("time < -2000") # ms self.hs.clock.now = 5000 # seconds self.hs.config.expire_access_token = True @@ -293,3 +293,32 @@ class AuthTestCase(unittest.TestCase): yield self.auth.get_user_from_macaroon(macaroon.serialize()) self.assertEqual(401, cm.exception.code) self.assertIn("Invalid macaroon", cm.exception.msg) + + @defer.inlineCallbacks + def test_get_user_from_macaroon_with_valid_duration(self): + # TODO(danielwh): Remove this mock when we remove the + # get_user_by_access_token fallback. + self.store.get_user_by_access_token = Mock( + return_value={"name": "@baldrick:matrix.org"} + ) + + self.store.get_user_by_access_token = Mock( + return_value={"name": "@baldrick:matrix.org"} + ) + + user_id = "@baldrick:matrix.org" + macaroon = pymacaroons.Macaroon( + location=self.hs.config.server_name, + identifier="key", + key=self.hs.config.macaroon_secret_key) + macaroon.add_first_party_caveat("gen = 1") + macaroon.add_first_party_caveat("type = access") + macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) + macaroon.add_first_party_caveat("time < 900000000") # ms + + self.hs.clock.now = 5000 # seconds + self.hs.config.expire_access_token = True + + user_info = yield self.auth.get_user_from_macaroon(macaroon.serialize()) + user = user_info["user"] + self.assertEqual(UserID.from_string(user_id), user) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 69a5e5b1d4..a7de3c7c17 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -42,12 +42,12 @@ class RegistrationTestCase(unittest.TestCase): http_client=None, expire_access_token=True) self.auth_handler = Mock( - generate_short_term_login_token=Mock(return_value='secret')) + generate_access_token=Mock(return_value='secret')) self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.hs.get_handlers().profile_handler = Mock() self.mock_handler = Mock(spec=[ - "generate_short_term_login_token", + "generate_access_token", ]) self.hs.get_auth_handler = Mock(return_value=self.auth_handler) From d543b72562a376258bc898e6cc16832431dfd527 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 09:56:53 +0100 Subject: [PATCH 231/414] Add an /account/deactivate endpoint --- synapse/rest/client/v2_alpha/account.py | 55 +++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 47f78eba8c..d0412122a7 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -121,6 +121,60 @@ class PasswordRestServlet(RestServlet): return 200, {} +class DeactivateAccountRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/account/deactivate$") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.auth_handler = hs.get_auth_handler() + super(DeactivateAccountRestServlet, self).__init__() + + @defer.inlineCallbacks + def on_POST(self, request): + body = parse_json_object_from_request(request) + + authed, result, params, _ = yield self.auth_handler.check_auth([ + [LoginType.PASSWORD], + [LoginType.EMAIL_IDENTITY] + ], body, self.hs.get_ip_from_request(request)) + + if not authed: + defer.returnValue((401, result)) + + user_id = None + requester = None + + if LoginType.PASSWORD in result: + # if using password, they should also be logged in + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + if user_id != result[LoginType.PASSWORD]: + raise LoginError(400, "", Codes.UNKNOWN) + elif LoginType.EMAIL_IDENTITY in result: + threepid = result[LoginType.EMAIL_IDENTITY] + if 'medium' not in threepid or 'address' not in threepid: + raise SynapseError(500, "Malformed threepid") + # if using email, we must know about the email they're authing with! + threepid_user_id = yield self.hs.get_datastore().get_user_id_by_threepid( + threepid['medium'], threepid['address'] + ) + if not threepid_user_id: + raise SynapseError(404, "Email address not found", Codes.NOT_FOUND) + user_id = threepid_user_id + else: + logger.error("Auth succeeded but no known type!", result.keys()) + raise SynapseError(500, "", Codes.UNKNOWN) + + # FIXME: Theoretically there is a race here wherein user resets password + # using threepid. + yield self.store.user_delete_access_tokens(user_id) + yield self.store.user_delete_threepids(user_id) + yield self.store.user_set_password_hash(user_id, None) + + defer.returnValue((200, {})) + + class ThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") @@ -223,5 +277,6 @@ class ThreepidRestServlet(RestServlet): def register_servlets(hs, http_server): PasswordRequestTokenRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) + DeactivateAccountRestServlet(hs).register(http_server) ThreepidRequestTokenRestServlet(hs).register(http_server) ThreepidRestServlet(hs).register(http_server) From b55c7702717987aa7b50469b4f099fa3a29976d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 10:00:38 +0100 Subject: [PATCH 232/414] Only accept password auth --- synapse/rest/client/v2_alpha/account.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index d0412122a7..366f1ec158 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -136,7 +136,6 @@ class DeactivateAccountRestServlet(RestServlet): authed, result, params, _ = yield self.auth_handler.check_auth([ [LoginType.PASSWORD], - [LoginType.EMAIL_IDENTITY] ], body, self.hs.get_ip_from_request(request)) if not authed: @@ -151,17 +150,6 @@ class DeactivateAccountRestServlet(RestServlet): user_id = requester.user.to_string() if user_id != result[LoginType.PASSWORD]: raise LoginError(400, "", Codes.UNKNOWN) - elif LoginType.EMAIL_IDENTITY in result: - threepid = result[LoginType.EMAIL_IDENTITY] - if 'medium' not in threepid or 'address' not in threepid: - raise SynapseError(500, "Malformed threepid") - # if using email, we must know about the email they're authing with! - threepid_user_id = yield self.hs.get_datastore().get_user_id_by_threepid( - threepid['medium'], threepid['address'] - ) - if not threepid_user_id: - raise SynapseError(404, "Email address not found", Codes.NOT_FOUND) - user_id = threepid_user_id else: logger.error("Auth succeeded but no known type!", result.keys()) raise SynapseError(500, "", Codes.UNKNOWN) From 848d3bf2e136c0be5d68ad95ee5ab901ab59ee3d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 10:25:52 +0100 Subject: [PATCH 233/414] Add hs object --- synapse/rest/client/v2_alpha/account.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 366f1ec158..eb49ad62e9 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -125,6 +125,7 @@ class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_v2_patterns("/account/deactivate$") def __init__(self, hs): + self.hs = hs self.store = hs.get_datastore() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() From b64aa6d68761d66dbdc58a93573f548d45cc230e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 15:15:22 +0100 Subject: [PATCH 234/414] Add sender and contains_url field to events table --- synapse/storage/events.py | 82 +++++++++++++++++++ .../storage/schema/delta/33/event_fields.py | 60 ++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 synapse/storage/schema/delta/33/event_fields.py diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b582942164..91462495ab 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -152,6 +152,7 @@ _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) class EventsStore(SQLBaseStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" def __init__(self, hs): super(EventsStore, self).__init__(hs) @@ -159,6 +160,10 @@ class EventsStore(SQLBaseStore): self.register_background_update_handler( self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) self._event_persist_queue = _EventPeristenceQueue() @@ -576,6 +581,11 @@ class EventsStore(SQLBaseStore): "content": encode_json(event.content).decode("UTF-8"), "origin_server_ts": int(event.origin_server_ts), "received_ts": self._clock.time_msec(), + "sender": event.sender, + "contains_url": ( + "url" in event.content + and isinstance(event.content["url"], basestring) + ), } for event, _ in events_and_contexts ], @@ -1115,6 +1125,78 @@ class EventsStore(SQLBaseStore): ret = yield self.runInteraction("count_messages", _count_messages) defer.returnValue(ret) + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], basestring) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = ( + "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + ) + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index:index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows) + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + @defer.inlineCallbacks def _background_reindex_origin_server_ts(self, progress, batch_size): target_min_stream_id = progress["target_min_stream_id_inclusive"] diff --git a/synapse/storage/schema/delta/33/event_fields.py b/synapse/storage/schema/delta/33/event_fields.py new file mode 100644 index 0000000000..83066cccc9 --- /dev/null +++ b/synapse/storage/schema/delta/33/event_fields.py @@ -0,0 +1,60 @@ +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.storage.prepare_database import get_statements + +import logging +import ujson + +logger = logging.getLogger(__name__) + + +ALTER_TABLE = """ +ALTER TABLE events ADD COLUMN sender TEXT; +ALTER TABLE events ADD COLUMN contains_url BOOLEAN; +""" + + +def run_create(cur, database_engine, *args, **kwargs): + for statement in get_statements(ALTER_TABLE.splitlines()): + cur.execute(statement) + + cur.execute("SELECT MIN(stream_ordering) FROM events") + rows = cur.fetchall() + min_stream_id = rows[0][0] + + cur.execute("SELECT MAX(stream_ordering) FROM events") + rows = cur.fetchall() + max_stream_id = rows[0][0] + + if min_stream_id is not None and max_stream_id is not None: + progress = { + "target_min_stream_id_inclusive": min_stream_id, + "max_stream_id_exclusive": max_stream_id + 1, + "rows_inserted": 0, + } + progress_json = ujson.dumps(progress) + + sql = ( + "INSERT into background_updates (update_name, progress_json)" + " VALUES (?, ?)" + ) + + sql = database_engine.convert_param_style(sql) + + cur.execute(sql, ("event_fields_sender_url", progress_json)) + + +def run_upgrade(cur, database_engine, *args, **kwargs): + pass From e5142f65a69c666c88077d828c01a98258881e7b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 15:15:52 +0100 Subject: [PATCH 235/414] Add 'contains_url' to filter --- synapse/api/filtering.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 4f5a4281fa..3b3ef70750 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -191,6 +191,17 @@ class Filter(object): def __init__(self, filter_json): self.filter_json = filter_json + self.types = self.filter_json.get("types", None) + self.not_types = self.filter_json.get("not_types", []) + + self.rooms = self.filter_json.get("rooms", None) + self.not_rooms = self.filter_json.get("not_rooms", []) + + self.senders = self.filter_json.get("senders", None) + self.not_senders = self.filter_json.get("not_senders", []) + + self.contains_url = self.filter_json.get("contains_url", None) + def check(self, event): """Checks whether the filter matches the given event. @@ -209,9 +220,10 @@ class Filter(object): event.get("room_id", None), sender, event.get("type", None), + "url" in event.get("content", {}) ) - def check_fields(self, room_id, sender, event_type): + def check_fields(self, room_id, sender, event_type, contains_url): """Checks whether the filter matches the given event fields. Returns: @@ -225,15 +237,20 @@ class Filter(object): for name, match_func in literal_keys.items(): not_name = "not_%s" % (name,) - disallowed_values = self.filter_json.get(not_name, []) + disallowed_values = getattr(self, not_name) if any(map(match_func, disallowed_values)): return False - allowed_values = self.filter_json.get(name, None) + allowed_values = getattr(self, name) if allowed_values is not None: if not any(map(match_func, allowed_values)): return False + contains_url_filter = self.filter_json.get("contains_url") + if contains_url_filter is not None: + if contains_url_filter != contains_url: + return False + return True def filter_rooms(self, room_ids): From d554ca5e1de97d2b65420d78e266208d9efe8e31 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 15:35:27 +0100 Subject: [PATCH 236/414] Add support for filters in paginate_room_events --- synapse/storage/stream.py | 52 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c33ac5a8d7..3516636dd1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -95,6 +95,50 @@ def upper_bound(token, engine, inclusive=True): ) +def filter_to_clause(event_filter): + if not event_filter: + return "", [] + + clauses = [] + args = [] + + if event_filter.types: + clauses.append( + "(%s)" % " OR ".join("type = ?" for _ in event_filter.types) + ) + args.extend(event_filter.types) + + for typ in event_filter.not_types: + clauses.append("type != ?") + args.append(typ) + + if event_filter.senders: + clauses.append( + "(%s)" % " OR ".join("sender = ?" for _ in event_filter.senders) + ) + args.extend(event_filter.senders) + + for sender in event_filter.not_senders: + clauses.append("sender != ?") + args.append(sender) + + if event_filter.rooms: + clauses.append( + "(%s)" % " OR ".join("room_id = ?" for _ in event_filter.rooms) + ) + args.extend(event_filter.rooms) + + for room_id in event_filter.not_rooms: + clauses.append("room_id != ?") + args.append(room_id) + + if event_filter.contains_url: + clauses.append("contains_url = ?") + args.append(event_filter.contains_url) + + return " AND ".join(clauses), args + + class StreamStore(SQLBaseStore): @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): @@ -320,7 +364,7 @@ class StreamStore(SQLBaseStore): @defer.inlineCallbacks def paginate_room_events(self, room_id, from_key, to_key=None, - direction='b', limit=-1): + direction='b', limit=-1, event_filter=None): # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence # we have a bit of asymmetry when it comes to equalities. @@ -344,6 +388,12 @@ class StreamStore(SQLBaseStore): RoomStreamToken.parse(to_key), self.database_engine )) + filter_clause, filter_args = filter_to_clause(event_filter) + + if filter_clause: + bounds += " AND " + filter_clause + args.extend(filter_args) + if int(limit) > 0: args.append(int(limit)) limit_str = " LIMIT ?" From a98d2152049b0a61426ed3d8b6ac872a9ca3f535 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 15:59:25 +0100 Subject: [PATCH 237/414] Add filter param to /messages API --- synapse/handlers/message.py | 16 ++++++++++++---- synapse/rest/client/v1/room.py | 11 ++++++++++- tests/storage/event_injector.py | 1 + tests/storage/test_events.py | 12 ++++++------ 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ad2753c1b5..dc76d34a52 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -66,7 +66,7 @@ class MessageHandler(BaseHandler): @defer.inlineCallbacks def get_messages(self, requester, room_id=None, pagin_config=None, - as_client_event=True): + as_client_event=True, event_filter=None): """Get messages in a room. Args: @@ -75,11 +75,11 @@ class MessageHandler(BaseHandler): pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. as_client_event (bool): True to get events in client-server format. + event_filter (Filter): Filter to apply to results or None Returns: dict: Pagination API results """ user_id = requester.user.to_string() - data_source = self.hs.get_event_sources().sources["room"] if pagin_config.from_token: room_token = pagin_config.from_token.room_key @@ -129,8 +129,13 @@ class MessageHandler(BaseHandler): room_id, max_topo ) - events, next_key = yield data_source.get_pagination_rows( - requester.user, source_config, room_id + events, next_key = yield self.store.paginate_room_events( + room_id=room_id, + from_key=source_config.from_key, + to_key=source_config.to_key, + direction=source_config.direction, + limit=source_config.limit, + event_filter=event_filter, ) next_token = pagin_config.from_token.copy_and_replace( @@ -144,6 +149,9 @@ class MessageHandler(BaseHandler): "end": next_token.to_string(), }) + if event_filter: + events = event_filter.filter(events) + events = yield filter_events_for_client( self.store, user_id, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 86fbe2747d..866a1e9120 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -20,12 +20,14 @@ from .base import ClientV1RestServlet, client_path_patterns from synapse.api.errors import SynapseError, Codes, AuthError from synapse.streams.config import PaginationConfig from synapse.api.constants import EventTypes, Membership +from synapse.api.filtering import Filter from synapse.types import UserID, RoomID, RoomAlias from synapse.events.utils import serialize_event from synapse.http.servlet import parse_json_object_from_request import logging import urllib +import ujson as json logger = logging.getLogger(__name__) @@ -327,12 +329,19 @@ class RoomMessageListRestServlet(ClientV1RestServlet): request, default_limit=10, ) as_client_event = "raw" not in request.args + filter_bytes = request.args.get("filter", None) + if filter_bytes: + filter_json = urllib.unquote(filter_bytes[-1]).decode("UTF-8") + event_filter = Filter(json.loads(filter_json)) + else: + event_filter = None handler = self.handlers.message_handler msgs = yield handler.get_messages( room_id=room_id, requester=requester, pagin_config=pagination_config, - as_client_event=as_client_event + as_client_event=as_client_event, + event_filter=event_filter, ) defer.returnValue((200, msgs)) diff --git a/tests/storage/event_injector.py b/tests/storage/event_injector.py index f22ba8db89..38556da9a7 100644 --- a/tests/storage/event_injector.py +++ b/tests/storage/event_injector.py @@ -30,6 +30,7 @@ class EventInjector: def create_room(self, room): builder = self.event_builder_factory.new({ "type": EventTypes.Create, + "sender": "", "room_id": room.to_string(), "content": {}, }) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 18a6cff0c7..3762b38e37 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -37,7 +37,7 @@ class EventsStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_count_daily_messages(self): - self.db_pool.runQuery("DELETE FROM stats_reporting") + yield self.db_pool.runQuery("DELETE FROM stats_reporting") self.hs.clock.now = 100 @@ -60,7 +60,7 @@ class EventsStoreTestCase(unittest.TestCase): # it isn't old enough. count = yield self.store.count_daily_messages() self.assertIsNone(count) - self._assert_stats_reporting(1, self.hs.clock.now) + yield self._assert_stats_reporting(1, self.hs.clock.now) # Already reported yesterday, two new events from today. yield self.event_injector.inject_message(room, user, "Yeah they are!") @@ -68,21 +68,21 @@ class EventsStoreTestCase(unittest.TestCase): self.hs.clock.now += 60 * 60 * 24 count = yield self.store.count_daily_messages() self.assertEqual(2, count) # 2 since yesterday - self._assert_stats_reporting(3, self.hs.clock.now) # 3 ever + yield self._assert_stats_reporting(3, self.hs.clock.now) # 3 ever # Last reported too recently. yield self.event_injector.inject_message(room, user, "Who could disagree?") self.hs.clock.now += 60 * 60 * 22 count = yield self.store.count_daily_messages() self.assertIsNone(count) - self._assert_stats_reporting(4, self.hs.clock.now) + yield self._assert_stats_reporting(4, self.hs.clock.now) # Last reported too long ago yield self.event_injector.inject_message(room, user, "No one.") self.hs.clock.now += 60 * 60 * 26 count = yield self.store.count_daily_messages() self.assertIsNone(count) - self._assert_stats_reporting(5, self.hs.clock.now) + yield self._assert_stats_reporting(5, self.hs.clock.now) # And now let's actually report something yield self.event_injector.inject_message(room, user, "Indeed.") @@ -92,7 +92,7 @@ class EventsStoreTestCase(unittest.TestCase): self.hs.clock.now += (60 * 60 * 24) + 50 count = yield self.store.count_daily_messages() self.assertEqual(3, count) - self._assert_stats_reporting(8, self.hs.clock.now) + yield self._assert_stats_reporting(8, self.hs.clock.now) @defer.inlineCallbacks def _get_last_stream_token(self): From ebdafd8114d1aed631a3497ad142f79efa9face7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 14 Jul 2016 16:49:37 +0100 Subject: [PATCH 238/414] Check sender signed event --- synapse/api/auth.py | 10 ++++++++-- synapse/handlers/federation.py | 4 ++-- synapse/state.py | 4 ++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e05defd7d8..e2f40ee65a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -63,7 +63,7 @@ class Auth(object): "user_id = ", ]) - def check(self, event, auth_events): + def check(self, event, auth_events, do_sig_check=True): """ Checks if this event is correctly authed. Args: @@ -79,6 +79,13 @@ class Auth(object): if not hasattr(event, "room_id"): raise AuthError(500, "Event has no room_id: %s" % event) + + sender_domain = get_domain_from_id(event.sender) + + # Check the sender's domain has signed the event + if do_sig_check and not event.signatures.get(sender_domain): + raise AuthError(403, "Event not signed by sending server") + if auth_events is None: # Oh, we don't know what the state of the room was, so we # are trusting that this is allowed (at least for now) @@ -87,7 +94,6 @@ class Auth(object): if event.type == EventTypes.Create: room_id_domain = get_domain_from_id(event.room_id) - sender_domain = get_domain_from_id(event.sender) if room_id_domain != sender_domain: raise AuthError( 403, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 351b218247..4e8ffa8f7b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -688,7 +688,7 @@ class FederationHandler(BaseHandler): logger.warn("Failed to create join %r because %s", event, e) raise e - self.auth.check(event, auth_events=context.current_state) + self.auth.check(event, auth_events=context.current_state, do_sig_check=False) defer.returnValue(event) @@ -918,7 +918,7 @@ class FederationHandler(BaseHandler): ) try: - self.auth.check(event, auth_events=context.current_state) + self.auth.check(event, auth_events=context.current_state, do_sig_check=False) except AuthError as e: logger.warn("Failed to create new leave %r because %s", event, e) raise e diff --git a/synapse/state.py b/synapse/state.py index d0f76dc4f5..d7d08570c9 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -379,7 +379,7 @@ class StateHandler(object): try: # FIXME: hs.get_auth() is bad style, but we need to do it to # get around circular deps. - self.hs.get_auth().check(event, auth_events) + self.hs.get_auth().check(event, auth_events, do_sig_check=False) prev_event = event except AuthError: return prev_event @@ -391,7 +391,7 @@ class StateHandler(object): try: # FIXME: hs.get_auth() is bad style, but we need to do it to # get around circular deps. - self.hs.get_auth().check(event, auth_events) + self.hs.get_auth().check(event, auth_events, do_sig_check=False) return event except AuthError: pass From 9e1b43bcbf46c38510cd8348b7df3eb5f6374e81 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2016 09:29:54 +0100 Subject: [PATCH 239/414] Comment --- synapse/handlers/federation.py | 4 ++++ synapse/state.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4e8ffa8f7b..7622962d46 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -688,6 +688,8 @@ class FederationHandler(BaseHandler): logger.warn("Failed to create join %r because %s", event, e) raise e + # The remote hasn't signed it yet, obviously. We'll do the full checks + # when we get the event back in `on_send_join_request` self.auth.check(event, auth_events=context.current_state, do_sig_check=False) defer.returnValue(event) @@ -918,6 +920,8 @@ class FederationHandler(BaseHandler): ) try: + # The remote hasn't signed it yet, obviously. We'll do the full checks + # when we get the event back in `on_send_leave_request` self.auth.check(event, auth_events=context.current_state, do_sig_check=False) except AuthError as e: logger.warn("Failed to create new leave %r because %s", event, e) diff --git a/synapse/state.py b/synapse/state.py index d7d08570c9..ef1bc470be 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -379,6 +379,7 @@ class StateHandler(object): try: # FIXME: hs.get_auth() is bad style, but we need to do it to # get around circular deps. + # The signatures have already been checked at this point self.hs.get_auth().check(event, auth_events, do_sig_check=False) prev_event = event except AuthError: @@ -391,6 +392,7 @@ class StateHandler(object): try: # FIXME: hs.get_auth() is bad style, but we need to do it to # get around circular deps. + # The signatures have already been checked at this point self.hs.get_auth().check(event, auth_events, do_sig_check=False) return event except AuthError: From eec9609e962c7fa623e9d09827ad3ffafeca1874 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2016 10:22:09 +0100 Subject: [PATCH 240/414] event_backwards_extremeties may not be empty --- synapse/storage/events.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b582942164..59f8061333 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1411,11 +1411,21 @@ class EventsStore(SQLBaseStore): to_delete ) + txn.execute( + "SELECT event_id FROM event_backward_extremities WHERE room_id = ?", + (room_id,) + ) + + cur_back_event_ids = [event_id for event_id, in txn.fetchall()] + # Update backward extremeties txn.executemany( "INSERT INTO event_backward_extremities (room_id, event_id)" " VALUES (?, ?)", - [(room_id, event_id) for event_id, in new_backwards_extrems] + [ + (room_id, event_id) for event_id, in new_backwards_extrems + if event_id not in cur_back_event_ids + ] ) txn.executemany( From 978fa53cc2a34dbf5e274416bae5228a42c454c5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2016 10:22:30 +0100 Subject: [PATCH 241/414] Pull out min stream_ordering from ex_outlier_stream --- synapse/storage/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index e93c3de66c..1c93e18f9d 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -92,7 +92,8 @@ class DataStore(RoomMemberStore, RoomStore, extra_tables=[("local_invites", "stream_id")] ) self._backfill_id_gen = StreamIdGenerator( - db_conn, "events", "stream_ordering", step=-1 + db_conn, "events", "stream_ordering", step=-1, + extra_tables=[("ex_outlier_stream", "event_stream_ordering")] ) self._receipts_id_gen = StreamIdGenerator( db_conn, "receipts_linearized", "stream_id" From f52565de50ee311ac154d138f8234950aeeed309 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Jul 2016 14:23:15 +0100 Subject: [PATCH 242/414] Fix /purge_history bug This was caused by trying to insert duplicate backward extremeties --- synapse/storage/events.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 59f8061333..9d74fd159d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1343,7 +1343,7 @@ class EventsStore(SQLBaseStore): # We calculate the new entries for the backward extremeties by finding # all events that point to events that are to be purged txn.execute( - "SELECT e.event_id FROM events as e" + "SELECT DISTINCT e.event_id FROM events as e" " INNER JOIN event_edges as ed ON e.event_id = ed.prev_event_id" " INNER JOIN events as e2 ON e2.event_id = ed.event_id" " WHERE e.room_id = ? AND e.topological_ordering < ?" @@ -1352,6 +1352,20 @@ class EventsStore(SQLBaseStore): ) new_backwards_extrems = txn.fetchall() + txn.execute( + "DELETE FROM event_backward_extremities WHERE room_id = ?", + (room_id,) + ) + + # Update backward extremeties + txn.executemany( + "INSERT INTO event_backward_extremities (room_id, event_id)" + " VALUES (?, ?)", + [ + (room_id, event_id) for event_id, in new_backwards_extrems + ] + ) + # Get all state groups that are only referenced by events that are # to be deleted. txn.execute( @@ -1404,30 +1418,12 @@ class EventsStore(SQLBaseStore): "event_search", "event_signatures", "rejections", - "event_backward_extremities", ): txn.executemany( "DELETE FROM %s WHERE event_id = ?" % (table,), to_delete ) - txn.execute( - "SELECT event_id FROM event_backward_extremities WHERE room_id = ?", - (room_id,) - ) - - cur_back_event_ids = [event_id for event_id, in txn.fetchall()] - - # Update backward extremeties - txn.executemany( - "INSERT INTO event_backward_extremities (room_id, event_id)" - " VALUES (?, ?)", - [ - (room_id, event_id) for event_id, in new_backwards_extrems - if event_id not in cur_back_event_ids - ] - ) - txn.executemany( "DELETE FROM events WHERE event_id = ?", to_delete From d137e03231fbe29d7100cd2e3e8683d7681e2208 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 15 Jul 2016 18:58:25 +0100 Subject: [PATCH 243/414] Fix 500 ISE when sending alias event without a state_key --- synapse/api/auth.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 3d1ce4e09e..be67ab4f4d 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -121,6 +121,11 @@ class Auth(object): # FIXME: Temp hack if event.type == EventTypes.Aliases: + if not event.is_state(): + raise AuthError( + 403, + "Alias event must be a state event", + ) if not event.state_key: raise AuthError( 403, From e885e2a623b6a5803b43023978b843c2002bc677 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Sat, 16 Jul 2016 15:40:21 +0100 Subject: [PATCH 244/414] Fall back to 'username' if 'user' is not given for appservice reg. --- synapse/rest/client/v2_alpha/register.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 7c6d2942dc..8da73a8615 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -134,9 +134,11 @@ class RegisterRestServlet(RestServlet): # fallback to 'username' if they gave one. if isinstance(body.get("user"), basestring): desired_username = body["user"] - result = yield self._do_appservice_registration( - desired_username, request.args["access_token"][0] - ) + + if isinstance(desired_username, basestring): + result = yield self._do_appservice_registration( + desired_username, request.args["access_token"][0] + ) defer.returnValue((200, result)) # we throw for non 200 responses return From 511a52afc8325504a69a3680e38b300dae7fd089 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Sat, 16 Jul 2016 18:37:34 +0100 Subject: [PATCH 245/414] Use body.get to check for 'user' --- synapse/rest/client/v2_alpha/register.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 8da73a8615..e8d34b06b0 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -132,8 +132,7 @@ class RegisterRestServlet(RestServlet): # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. - if isinstance(body.get("user"), basestring): - desired_username = body["user"] + desired_username = body.get("user", desired_username) if isinstance(desired_username, basestring): result = yield self._do_appservice_registration( From 6344db659f0d4c57551f1da6456dcaa724d5beb2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 18 Jul 2016 09:47:33 +0100 Subject: [PATCH 246/414] Fix a doc-comment The `store` in a handler is a generic DataStore, not just an events.StateStore. --- synapse/handlers/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index c904c6c500..d00685c389 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -31,7 +31,7 @@ class BaseHandler(object): Common base class for the event handlers. Attributes: - store (synapse.storage.events.StateStore): + store (synapse.storage.DataStore): state_handler (synapse.state.StateHandler): """ From dcfd71aa4c4a1d3d71356fd2f5d854fb1db8fafa Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 15 Jul 2016 12:34:23 +0100 Subject: [PATCH 247/414] Refactor login flow Make sure that we have the canonical user_id *before* calling get_login_tuple_for_user_id. Replace login_with_password with a method which just validates the password, and have the caller call get_login_tuple_for_user_id. This brings the password flow into line with the other flows, and will give us a place to register the device_id if necessary. --- synapse/handlers/auth.py | 106 ++++++++++++++++++-------------- synapse/rest/client/v1/login.py | 41 ++++++------ 2 files changed, 82 insertions(+), 65 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 5a0ed9d6b9..983994fa95 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -230,7 +230,6 @@ class AuthHandler(BaseHandler): sess = self._get_session_info(session_id) return sess.setdefault('serverdict', {}).get(key, default) - @defer.inlineCallbacks def _check_password_auth(self, authdict, _): if "user" not in authdict or "password" not in authdict: raise LoginError(400, "", Codes.MISSING_PARAM) @@ -240,11 +239,7 @@ class AuthHandler(BaseHandler): if not user_id.startswith('@'): user_id = UserID.create(user_id, self.hs.hostname).to_string() - if not (yield self._check_password(user_id, password)): - logger.warn("Failed password login for user %s", user_id) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) - - defer.returnValue(user_id) + return self._check_password(user_id, password) @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip): @@ -348,67 +343,66 @@ class AuthHandler(BaseHandler): return self.sessions[session_id] - @defer.inlineCallbacks - def login_with_password(self, user_id, password): + def validate_password_login(self, user_id, password): """ Authenticates the user with their username and password. Used only by the v1 login API. Args: - user_id (str): User ID + user_id (str): complete @user:id password (str): Password Returns: - A tuple of: - The user's ID. - The access token for the user's session. - The refresh token for the user's session. + defer.Deferred: (str) canonical user id Raises: - StoreError if there was a problem storing the token. + StoreError if there was a problem accessing the database LoginError if there was an authentication problem. """ - - if not (yield self._check_password(user_id, password)): - logger.warn("Failed password login for user %s", user_id) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) - - logger.info("Logging in user %s", user_id) - access_token = yield self.issue_access_token(user_id) - refresh_token = yield self.issue_refresh_token(user_id) - defer.returnValue((user_id, access_token, refresh_token)) + return self._check_password(user_id, password) @defer.inlineCallbacks def get_login_tuple_for_user_id(self, user_id): """ Gets login tuple for the user with the given user ID. + + Creates a new access/refresh token for the user. + The user is assumed to have been authenticated by some other - machanism (e.g. CAS) + machanism (e.g. CAS), and the user_id converted to the canonical case. Args: - user_id (str): User ID + user_id (str): canonical User ID Returns: A tuple of: - The user's ID. The access token for the user's session. The refresh token for the user's session. Raises: StoreError if there was a problem storing the token. LoginError if there was an authentication problem. """ - user_id, ignored = yield self._find_user_id_and_pwd_hash(user_id) - logger.info("Logging in user %s", user_id) access_token = yield self.issue_access_token(user_id) refresh_token = yield self.issue_refresh_token(user_id) - defer.returnValue((user_id, access_token, refresh_token)) + defer.returnValue((access_token, refresh_token)) @defer.inlineCallbacks - def does_user_exist(self, user_id): + def check_user_exists(self, user_id): + """ + Checks to see if a user with the given id exists. Will check case + insensitively, but return None if there are multiple inexact matches. + + Args: + (str) user_id: complete @user:id + + Returns: + defer.Deferred: (str) canonical_user_id, or None if zero or + multiple matches + """ try: - yield self._find_user_id_and_pwd_hash(user_id) - defer.returnValue(True) + res = yield self._find_user_id_and_pwd_hash(user_id) + defer.returnValue(res[0]) except LoginError: - defer.returnValue(False) + defer.returnValue(None) @defer.inlineCallbacks def _find_user_id_and_pwd_hash(self, user_id): @@ -438,27 +432,45 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def _check_password(self, user_id, password): - """ + """Authenticate a user against the LDAP and local databases. + + user_id is checked case insensitively against the local database, but + will throw if there are multiple inexact matches. + + Args: + user_id (str): complete @user:id Returns: - True if the user_id successfully authenticated + (str) the canonical_user_id + Raises: + LoginError if the password was incorrect """ valid_ldap = yield self._check_ldap_password(user_id, password) if valid_ldap: - defer.returnValue(True) + defer.returnValue(user_id) - valid_local_password = yield self._check_local_password(user_id, password) - if valid_local_password: - defer.returnValue(True) - - defer.returnValue(False) + result = yield self._check_local_password(user_id, password) + defer.returnValue(result) @defer.inlineCallbacks def _check_local_password(self, user_id, password): - try: - user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id) - defer.returnValue(self.validate_hash(password, password_hash)) - except LoginError: - defer.returnValue(False) + """Authenticate a user against the local password database. + + user_id is checked case insensitively, but will throw if there are + multiple inexact matches. + + Args: + user_id (str): complete @user:id + Returns: + (str) the canonical_user_id + Raises: + LoginError if the password was incorrect + """ + user_id, password_hash = yield self._find_user_id_and_pwd_hash(user_id) + result = self.validate_hash(password, password_hash) + if not result: + logger.warn("Failed password login for user %s", user_id) + raise LoginError(403, "", errcode=Codes.FORBIDDEN) + defer.returnValue(user_id) @defer.inlineCallbacks def _check_ldap_password(self, user_id, password): @@ -570,7 +582,7 @@ class AuthHandler(BaseHandler): ) # check for existing account, if none exists, create one - if not (yield self.does_user_exist(user_id)): + if not (yield self.check_user_exists(user_id)): # query user metadata for account creation query = "({prop}={value})".format( prop=self.ldap_attributes['uid'], diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 8df9d10efa..a1f2ba8773 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -145,10 +145,13 @@ class LoginRestServlet(ClientV1RestServlet): ).to_string() auth_handler = self.auth_handler - user_id, access_token, refresh_token = yield auth_handler.login_with_password( + user_id = yield auth_handler.validate_password_login( user_id=user_id, - password=login_submission["password"]) - + password=login_submission["password"], + ) + access_token, refresh_token = ( + yield auth_handler.get_login_tuple_for_user_id(user_id) + ) result = { "user_id": user_id, # may have changed "access_token": access_token, @@ -165,7 +168,7 @@ class LoginRestServlet(ClientV1RestServlet): user_id = ( yield auth_handler.validate_short_term_login_token_and_get_user_id(token) ) - user_id, access_token, refresh_token = ( + access_token, refresh_token = ( yield auth_handler.get_login_tuple_for_user_id(user_id) ) result = { @@ -196,13 +199,15 @@ class LoginRestServlet(ClientV1RestServlet): user_id = UserID.create(user, self.hs.hostname).to_string() auth_handler = self.auth_handler - user_exists = yield auth_handler.does_user_exist(user_id) - if user_exists: - user_id, access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id) + registered_user_id = yield auth_handler.check_user_exists(user_id) + if registered_user_id: + access_token, refresh_token = ( + yield auth_handler.get_login_tuple_for_user_id( + registered_user_id + ) ) result = { - "user_id": user_id, # may have changed + "user_id": registered_user_id, # may have changed "access_token": access_token, "refresh_token": refresh_token, "home_server": self.hs.hostname, @@ -245,13 +250,13 @@ class LoginRestServlet(ClientV1RestServlet): user_id = UserID.create(user, self.hs.hostname).to_string() auth_handler = self.auth_handler - user_exists = yield auth_handler.does_user_exist(user_id) - if user_exists: - user_id, access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id) + registered_user_id = yield auth_handler.check_user_exists(user_id) + if registered_user_id: + access_token, refresh_token = ( + yield auth_handler.get_login_tuple_for_user_id(registered_user_id) ) result = { - "user_id": user_id, # may have changed + "user_id": registered_user_id, "access_token": access_token, "refresh_token": refresh_token, "home_server": self.hs.hostname, @@ -414,13 +419,13 @@ class CasTicketServlet(ClientV1RestServlet): user_id = UserID.create(user, self.hs.hostname).to_string() auth_handler = self.auth_handler - user_exists = yield auth_handler.does_user_exist(user_id) - if not user_exists: - user_id, _ = ( + registered_user_id = yield auth_handler.check_user_exists(user_id) + if not registered_user_id: + registered_user_id, _ = ( yield self.handlers.registration_handler.register(localpart=user) ) - login_token = auth_handler.generate_short_term_login_token(user_id) + login_token = auth_handler.generate_short_term_login_token(registered_user_id) redirect_url = self.add_login_token_to_redirect_url(client_redirect_url, login_token) request.redirect(redirect_url) From f863a52ceacf69ab19b073383be80603a2f51c0a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 15 Jul 2016 13:19:07 +0100 Subject: [PATCH 248/414] Add device_id support to /login Add a 'devices' table to the storage, as well as a 'device_id' column to refresh_tokens. Allow the client to pass a device_id, and initial_device_display_name, to /login. If login is successful, then register the device in the devices table if it wasn't known already. If no device_id was supplied, make one up. Associate the device_id with the access token and refresh token, so that we can get at it again later. Ensure that the device_id is copied from the refresh token to the access_token when the token is refreshed. --- synapse/handlers/auth.py | 19 +++-- synapse/handlers/device.py | 71 +++++++++++++++++ synapse/rest/client/v1/login.py | 39 +++++++++- synapse/rest/client/v2_alpha/tokenrefresh.py | 10 ++- synapse/server.py | 5 ++ synapse/storage/__init__.py | 3 + synapse/storage/devices.py | 77 +++++++++++++++++++ synapse/storage/registration.py | 28 ++++--- synapse/storage/schema/delta/33/devices.sql | 21 +++++ .../schema/delta/33/refreshtoken_device.sql | 16 ++++ tests/handlers/test_device.py | 75 ++++++++++++++++++ tests/storage/test_registration.py | 21 +++-- 12 files changed, 354 insertions(+), 31 deletions(-) create mode 100644 synapse/handlers/device.py create mode 100644 synapse/storage/devices.py create mode 100644 synapse/storage/schema/delta/33/devices.sql create mode 100644 synapse/storage/schema/delta/33/refreshtoken_device.sql create mode 100644 tests/handlers/test_device.py diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 983994fa95..ce9bc18849 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -361,7 +361,7 @@ class AuthHandler(BaseHandler): return self._check_password(user_id, password) @defer.inlineCallbacks - def get_login_tuple_for_user_id(self, user_id): + def get_login_tuple_for_user_id(self, user_id, device_id=None): """ Gets login tuple for the user with the given user ID. @@ -372,6 +372,7 @@ class AuthHandler(BaseHandler): Args: user_id (str): canonical User ID + device_id (str): the device ID to associate with the access token Returns: A tuple of: The access token for the user's session. @@ -380,9 +381,9 @@ class AuthHandler(BaseHandler): StoreError if there was a problem storing the token. LoginError if there was an authentication problem. """ - logger.info("Logging in user %s", user_id) - access_token = yield self.issue_access_token(user_id) - refresh_token = yield self.issue_refresh_token(user_id) + logger.info("Logging in user %s on device %s", user_id, device_id) + access_token = yield self.issue_access_token(user_id, device_id) + refresh_token = yield self.issue_refresh_token(user_id, device_id) defer.returnValue((access_token, refresh_token)) @defer.inlineCallbacks @@ -638,15 +639,17 @@ class AuthHandler(BaseHandler): defer.returnValue(False) @defer.inlineCallbacks - def issue_access_token(self, user_id): + def issue_access_token(self, user_id, device_id=None): access_token = self.generate_access_token(user_id) - yield self.store.add_access_token_to_user(user_id, access_token) + yield self.store.add_access_token_to_user(user_id, access_token, + device_id) defer.returnValue(access_token) @defer.inlineCallbacks - def issue_refresh_token(self, user_id): + def issue_refresh_token(self, user_id, device_id=None): refresh_token = self.generate_refresh_token(user_id) - yield self.store.add_refresh_token_to_user(user_id, refresh_token) + yield self.store.add_refresh_token_to_user(user_id, refresh_token, + device_id) defer.returnValue(refresh_token) def generate_access_token(self, user_id, extra_caveats=None, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py new file mode 100644 index 0000000000..8d7d9874f8 --- /dev/null +++ b/synapse/handlers/device.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.api.errors import StoreError +from synapse.util import stringutils +from twisted.internet import defer +from ._base import BaseHandler + +import logging + +logger = logging.getLogger(__name__) + + +class DeviceHandler(BaseHandler): + def __init__(self, hs): + super(DeviceHandler, self).__init__(hs) + + @defer.inlineCallbacks + def check_device_registered(self, user_id, device_id, + initial_device_display_name): + """ + If the given device has not been registered, register it with the + supplied display name. + + If no device_id is supplied, we make one up. + + Args: + user_id (str): @user:id + device_id (str | None): device id supplied by client + initial_device_display_name (str | None): device display name from + client + Returns: + str: device id (generated if none was supplied) + """ + if device_id is not None: + yield self.store.store_device( + user_id=user_id, + device_id=device_id, + initial_device_display_name=initial_device_display_name, + ignore_if_known=True, + ) + defer.returnValue(device_id) + + # if the device id is not specified, we'll autogen one, but loop a few + # times in case of a clash. + attempts = 0 + while attempts < 5: + try: + device_id = stringutils.random_string_with_symbols(16) + yield self.store.store_device( + user_id=user_id, + device_id=device_id, + initial_device_display_name=initial_device_display_name, + ignore_if_known=False, + ) + defer.returnValue(device_id) + except StoreError: + attempts += 1 + + raise StoreError(500, "Couldn't generate a device ID.") diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index a1f2ba8773..e8b791519c 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -59,6 +59,7 @@ class LoginRestServlet(ClientV1RestServlet): self.servername = hs.config.server_name self.http_client = hs.get_simple_http_client() self.auth_handler = self.hs.get_auth_handler() + self.device_handler = self.hs.get_device_handler() def on_GET(self, request): flows = [] @@ -149,14 +150,16 @@ class LoginRestServlet(ClientV1RestServlet): user_id=user_id, password=login_submission["password"], ) + device_id = yield self._register_device(user_id, login_submission) access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id) + yield auth_handler.get_login_tuple_for_user_id(user_id, device_id) ) result = { "user_id": user_id, # may have changed "access_token": access_token, "refresh_token": refresh_token, "home_server": self.hs.hostname, + "device_id": device_id, } defer.returnValue((200, result)) @@ -168,14 +171,16 @@ class LoginRestServlet(ClientV1RestServlet): user_id = ( yield auth_handler.validate_short_term_login_token_and_get_user_id(token) ) + device_id = yield self._register_device(user_id, login_submission) access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id) + yield auth_handler.get_login_tuple_for_user_id(user_id, device_id) ) result = { "user_id": user_id, # may have changed "access_token": access_token, "refresh_token": refresh_token, "home_server": self.hs.hostname, + "device_id": device_id, } defer.returnValue((200, result)) @@ -252,8 +257,13 @@ class LoginRestServlet(ClientV1RestServlet): auth_handler = self.auth_handler registered_user_id = yield auth_handler.check_user_exists(user_id) if registered_user_id: + device_id = yield self._register_device( + registered_user_id, login_submission + ) access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(registered_user_id) + yield auth_handler.get_login_tuple_for_user_id( + registered_user_id, device_id + ) ) result = { "user_id": registered_user_id, @@ -262,6 +272,9 @@ class LoginRestServlet(ClientV1RestServlet): "home_server": self.hs.hostname, } else: + # TODO: we should probably check that the register isn't going + # to fonx/change our user_id before registering the device + device_id = yield self._register_device(user_id, login_submission) user_id, access_token = ( yield self.handlers.registration_handler.register(localpart=user) ) @@ -300,6 +313,26 @@ class LoginRestServlet(ClientV1RestServlet): return (user, attributes) + def _register_device(self, user_id, login_submission): + """Register a device for a user. + + This is called after the user's credentials have been validated, but + before the access token has been issued. + + Args: + (str) user_id: full canonical @user:id + (object) login_submission: dictionary supplied to /login call, from + which we pull device_id and initial_device_name + Returns: + defer.Deferred: (str) device_id + """ + device_id = login_submission.get("device_id") + initial_display_name = login_submission.get( + "initial_device_display_name") + return self.device_handler.check_device_registered( + user_id, device_id, initial_display_name + ) + class SAML2RestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/login/saml2", releases=()) diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 8270e8787f..0d312c91d4 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -39,9 +39,13 @@ class TokenRefreshRestServlet(RestServlet): try: old_refresh_token = body["refresh_token"] auth_handler = self.hs.get_auth_handler() - (user_id, new_refresh_token) = yield self.store.exchange_refresh_token( - old_refresh_token, auth_handler.generate_refresh_token) - new_access_token = yield auth_handler.issue_access_token(user_id) + refresh_result = yield self.store.exchange_refresh_token( + old_refresh_token, auth_handler.generate_refresh_token + ) + (user_id, new_refresh_token, device_id) = refresh_result + new_access_token = yield auth_handler.issue_access_token( + user_id, device_id + ) defer.returnValue((200, { "access_token": new_access_token, "refresh_token": new_refresh_token, diff --git a/synapse/server.py b/synapse/server.py index d49a1a8a96..e8b166990d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -25,6 +25,7 @@ from twisted.enterprise import adbapi from synapse.appservice.scheduler import ApplicationServiceScheduler from synapse.appservice.api import ApplicationServiceApi from synapse.federation import initialize_http_replication +from synapse.handlers.device import DeviceHandler from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory from synapse.notifier import Notifier from synapse.api.auth import Auth @@ -92,6 +93,7 @@ class HomeServer(object): 'typing_handler', 'room_list_handler', 'auth_handler', + 'device_handler', 'application_service_api', 'application_service_scheduler', 'application_service_handler', @@ -197,6 +199,9 @@ class HomeServer(object): def build_auth_handler(self): return AuthHandler(self) + def build_device_handler(self): + return DeviceHandler(self) + def build_application_service_api(self): return ApplicationServiceApi(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 1c93e18f9d..73fb334dd6 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -14,6 +14,8 @@ # limitations under the License. from twisted.internet import defer + +from synapse.storage.devices import DeviceStore from .appservice import ( ApplicationServiceStore, ApplicationServiceTransactionStore ) @@ -80,6 +82,7 @@ class DataStore(RoomMemberStore, RoomStore, EventPushActionsStore, OpenIdStore, ClientIpStore, + DeviceStore, ): def __init__(self, db_conn, hs): diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py new file mode 100644 index 0000000000..9065e96d28 --- /dev/null +++ b/synapse/storage/devices.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from twisted.internet import defer + +from synapse.api.errors import StoreError +from ._base import SQLBaseStore + +logger = logging.getLogger(__name__) + + +class DeviceStore(SQLBaseStore): + @defer.inlineCallbacks + def store_device(self, user_id, device_id, + initial_device_display_name, + ignore_if_known=True): + """Ensure the given device is known; add it to the store if not + + Args: + user_id (str): id of user associated with the device + device_id (str): id of device + initial_device_display_name (str): initial displayname of the + device + ignore_if_known (bool): ignore integrity errors which mean the + device is already known + Returns: + defer.Deferred + Raises: + StoreError: if ignore_if_known is False and the device was already + known + """ + try: + yield self._simple_insert( + "devices", + values={ + "user_id": user_id, + "device_id": device_id, + "display_name": initial_device_display_name + }, + desc="store_device", + or_ignore=ignore_if_known, + ) + except Exception as e: + logger.error("store_device with device_id=%s failed: %s", + device_id, e) + raise StoreError(500, "Problem storing device.") + + def get_device(self, user_id, device_id): + """Retrieve a device. + + Args: + user_id (str): The ID of the user which owns the device + device_id (str): The ID of the device to retrieve + Returns: + defer.Deferred for a namedtuple containing the device information + Raises: + StoreError: if the device is not found + """ + return self._simple_select_one( + table="devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + retcols=("user_id", "device_id", "display_name"), + desc="get_device", + ) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index d957a629dc..26ef1cfd8a 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -31,12 +31,14 @@ class RegistrationStore(SQLBaseStore): self.clock = hs.get_clock() @defer.inlineCallbacks - def add_access_token_to_user(self, user_id, token): + def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. Args: user_id (str): The user ID. token (str): The new access token to add. + device_id (str): ID of the device to associate with the access + token Raises: StoreError if there was a problem adding this. """ @@ -47,18 +49,21 @@ class RegistrationStore(SQLBaseStore): { "id": next_id, "user_id": user_id, - "token": token + "token": token, + "device_id": device_id, }, desc="add_access_token_to_user", ) @defer.inlineCallbacks - def add_refresh_token_to_user(self, user_id, token): + def add_refresh_token_to_user(self, user_id, token, device_id=None): """Adds a refresh token for the given user. Args: user_id (str): The user ID. token (str): The new refresh token to add. + device_id (str): ID of the device to associate with the access + token Raises: StoreError if there was a problem adding this. """ @@ -69,7 +74,8 @@ class RegistrationStore(SQLBaseStore): { "id": next_id, "user_id": user_id, - "token": token + "token": token, + "device_id": device_id, }, desc="add_refresh_token_to_user", ) @@ -291,18 +297,18 @@ class RegistrationStore(SQLBaseStore): ) def exchange_refresh_token(self, refresh_token, token_generator): - """Exchange a refresh token for a new access token and refresh token. + """Exchange a refresh token for a new one. Doing so invalidates the old refresh token - refresh tokens are single use. Args: - token (str): The refresh token of a user. + refresh_token (str): The refresh token of a user. token_generator (fn: str -> str): Function which, when given a user ID, returns a unique refresh token for that user. This function must never return the same value twice. Returns: - tuple of (user_id, refresh_token) + tuple of (user_id, new_refresh_token, device_id) Raises: StoreError if no user was found with that refresh token. """ @@ -314,12 +320,13 @@ class RegistrationStore(SQLBaseStore): ) def _exchange_refresh_token(self, txn, old_token, token_generator): - sql = "SELECT user_id FROM refresh_tokens WHERE token = ?" + sql = "SELECT user_id, device_id FROM refresh_tokens WHERE token = ?" txn.execute(sql, (old_token,)) rows = self.cursor_to_dict(txn) if not rows: raise StoreError(403, "Did not recognize refresh token") user_id = rows[0]["user_id"] + device_id = rows[0]["device_id"] # TODO(danielwh): Maybe perform a validation on the macaroon that # macaroon.user_id == user_id. @@ -328,7 +335,7 @@ class RegistrationStore(SQLBaseStore): sql = "UPDATE refresh_tokens SET token = ? WHERE token = ?" txn.execute(sql, (new_token, old_token,)) - return user_id, new_token + return user_id, new_token, device_id @defer.inlineCallbacks def is_server_admin(self, user): @@ -356,7 +363,8 @@ class RegistrationStore(SQLBaseStore): def _query_for_auth(self, txn, token): sql = ( - "SELECT users.name, users.is_guest, access_tokens.id as token_id" + "SELECT users.name, users.is_guest, access_tokens.id as token_id," + " access_tokens.device_id" " FROM users" " INNER JOIN access_tokens on users.name = access_tokens.user_id" " WHERE token = ?" diff --git a/synapse/storage/schema/delta/33/devices.sql b/synapse/storage/schema/delta/33/devices.sql new file mode 100644 index 0000000000..eca7268d82 --- /dev/null +++ b/synapse/storage/schema/delta/33/devices.sql @@ -0,0 +1,21 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE devices ( + user_id TEXT NOT NULL, + device_id TEXT NOT NULL, + display_name TEXT, + CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) +); diff --git a/synapse/storage/schema/delta/33/refreshtoken_device.sql b/synapse/storage/schema/delta/33/refreshtoken_device.sql new file mode 100644 index 0000000000..b21da00dde --- /dev/null +++ b/synapse/storage/schema/delta/33/refreshtoken_device.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE refresh_tokens ADD COLUMN device_id BIGINT; diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py new file mode 100644 index 0000000000..cc6512ccc7 --- /dev/null +++ b/tests/handlers/test_device.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.handlers.device import DeviceHandler +from tests import unittest +from tests.utils import setup_test_homeserver + + +class DeviceHandlers(object): + def __init__(self, hs): + self.device_handler = DeviceHandler(hs) + + +class DeviceTestCase(unittest.TestCase): + @defer.inlineCallbacks + def setUp(self): + self.hs = yield setup_test_homeserver(handlers=None) + self.hs.handlers = handlers = DeviceHandlers(self.hs) + self.handler = handlers.device_handler + + @defer.inlineCallbacks + def test_device_is_created_if_doesnt_exist(self): + res = yield self.handler.check_device_registered( + user_id="boris", + device_id="fco", + initial_device_display_name="display name" + ) + self.assertEqual(res, "fco") + + dev = yield self.handler.store.get_device("boris", "fco") + self.assertEqual(dev["display_name"], "display name") + + @defer.inlineCallbacks + def test_device_is_preserved_if_exists(self): + res1 = yield self.handler.check_device_registered( + user_id="boris", + device_id="fco", + initial_device_display_name="display name" + ) + self.assertEqual(res1, "fco") + + res2 = yield self.handler.check_device_registered( + user_id="boris", + device_id="fco", + initial_device_display_name="new display name" + ) + self.assertEqual(res2, "fco") + + dev = yield self.handler.store.get_device("boris", "fco") + self.assertEqual(dev["display_name"], "display name") + + @defer.inlineCallbacks + def test_device_id_is_made_up_if_unspecified(self): + device_id = yield self.handler.check_device_registered( + user_id="theresa", + device_id=None, + initial_device_display_name="display" + ) + + dev = yield self.handler.store.get_device("theresa", device_id) + self.assertEqual(dev["display_name"], "display") diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index b8384c98d8..b03ca303a2 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -38,6 +38,7 @@ class RegistrationStoreTestCase(unittest.TestCase): "BcDeFgHiJkLmNoPqRsTuVwXyZa" ] self.pwhash = "{xx1}123456789" + self.device_id = "akgjhdjklgshg" @defer.inlineCallbacks def test_register(self): @@ -64,13 +65,15 @@ class RegistrationStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_add_tokens(self): yield self.store.register(self.user_id, self.tokens[0], self.pwhash) - yield self.store.add_access_token_to_user(self.user_id, self.tokens[1]) + yield self.store.add_access_token_to_user(self.user_id, self.tokens[1], + self.device_id) result = yield self.store.get_user_by_access_token(self.tokens[1]) self.assertDictContainsSubset( { "name": self.user_id, + "device_id": self.device_id, }, result ) @@ -80,20 +83,24 @@ class RegistrationStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_exchange_refresh_token_valid(self): uid = stringutils.random_string(32) + device_id = stringutils.random_string(16) generator = TokenGenerator() last_token = generator.generate(uid) self.db_pool.runQuery( - "INSERT INTO refresh_tokens(user_id, token) VALUES(?,?)", - (uid, last_token,)) + "INSERT INTO refresh_tokens(user_id, token, device_id) " + "VALUES(?,?,?)", + (uid, last_token, device_id)) - (found_user_id, refresh_token) = yield self.store.exchange_refresh_token( - last_token, generator.generate) + (found_user_id, refresh_token, device_id) = \ + yield self.store.exchange_refresh_token(last_token, + generator.generate) self.assertEqual(uid, found_user_id) rows = yield self.db_pool.runQuery( - "SELECT token FROM refresh_tokens WHERE user_id = ?", (uid, )) - self.assertEqual([(refresh_token,)], rows) + "SELECT token, device_id FROM refresh_tokens WHERE user_id = ?", + (uid, )) + self.assertEqual([(refresh_token, device_id)], rows) # We issued token 1, then exchanged it for token 2 expected_refresh_token = u"%s-%d" % (uid, 2,) self.assertEqual(expected_refresh_token, refresh_token) From 7e554aac86144ebde529aae259cd0895d4078f23 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 19 Jul 2016 10:18:40 +0100 Subject: [PATCH 249/414] Update docstring on Handlers. To indicate it is deprecated. --- synapse/handlers/__init__.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index d28e07f0d9..c512077cb5 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -31,10 +31,22 @@ from .search import SearchHandler class Handlers(object): - """ A collection of all the event handlers. + """ + Deprecated. - There's no need to lazily create these; we'll just make them all eagerly - at construction time. + At some point most of the classes whose name ended "Handler" were + accessed through this class. + + However this makes it painful to unit test the handlers and to run cut + down versions of synapse that only use specific handlers because using a + single handler required creating all of the handlers. So some of the + handlers have been lifted out of the Handlers object and are now accessed + directly through the homeserver object itself. + + Any new handlers should follow the new pattern of being accessed through + the homeserver object and should not be added to the Handlers object. + + The remaining handlers should be moved out of the handlers object. """ def __init__(self, hs): From c41d52a04221d478220ede7ab389299918f113ca Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 19 Jul 2016 10:28:27 +0100 Subject: [PATCH 250/414] Summary line --- synapse/handlers/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index c512077cb5..1a50a2ec98 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -31,8 +31,7 @@ from .search import SearchHandler class Handlers(object): - """ - Deprecated. + """ Deprecated. A collection of handlers. At some point most of the classes whose name ended "Handler" were accessed through this class. From 022b9176fe44c15ce3f795d019bd93dd184ca945 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 11:44:05 +0100 Subject: [PATCH 251/414] schema fix device_id should be text, not bigint. --- synapse/storage/schema/delta/33/refreshtoken_device.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/33/refreshtoken_device.sql b/synapse/storage/schema/delta/33/refreshtoken_device.sql index b21da00dde..290bd6da86 100644 --- a/synapse/storage/schema/delta/33/refreshtoken_device.sql +++ b/synapse/storage/schema/delta/33/refreshtoken_device.sql @@ -13,4 +13,4 @@ * limitations under the License. */ -ALTER TABLE refresh_tokens ADD COLUMN device_id BIGINT; +ALTER TABLE refresh_tokens ADD COLUMN device_id TEXT; From 0da0d0a29d807c481152b1580acbbe36f24cf771 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 13:12:22 +0100 Subject: [PATCH 252/414] rest/client/v2_alpha/register.py: Refactor flow somewhat. This is meant to be an *almost* non-functional change, with the exception that it fixes what looks a lot like a bug in that it only calls `auth_handler.add_threepid` and `add_pusher` once instead of three times. The idea is to move the generation of the `access_token` out of `registration_handler.register`, because `access_token`s now require a device_id, and we only want to generate a device_id once registration has been successful. --- synapse/rest/client/v2_alpha/register.py | 177 +++++++++++--------- tests/rest/client/v2_alpha/test_register.py | 3 +- 2 files changed, 104 insertions(+), 76 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index e8d34b06b0..707bde0f34 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -199,92 +199,55 @@ class RegisterRestServlet(RestServlet): "Already registered user ID %r for this session", registered_user_id ) - access_token = yield self.auth_handler.issue_access_token(registered_user_id) - refresh_token = yield self.auth_handler.issue_refresh_token( - registered_user_id + # don't re-register the email address + add_email = False + else: + # NB: This may be from the auth handler and NOT from the POST + if 'password' not in params: + raise SynapseError(400, "Missing password.", + Codes.MISSING_PARAM) + + desired_username = params.get("username", None) + new_password = params.get("password", None) + guest_access_token = params.get("guest_access_token", None) + + (registered_user_id, _) = yield self.registration_handler.register( + localpart=desired_username, + password=new_password, + guest_access_token=guest_access_token, + generate_token=False, ) - defer.returnValue((200, { - "user_id": registered_user_id, - "access_token": access_token, - "home_server": self.hs.hostname, - "refresh_token": refresh_token, - })) - # NB: This may be from the auth handler and NOT from the POST - if 'password' not in params: - raise SynapseError(400, "Missing password.", Codes.MISSING_PARAM) + # remember that we've now registered that user account, and with + # what user ID (since the user may not have specified) + self.auth_handler.set_session_data( + session_id, "registered_user_id", registered_user_id + ) - desired_username = params.get("username", None) - new_password = params.get("password", None) - guest_access_token = params.get("guest_access_token", None) + add_email = True - (user_id, token) = yield self.registration_handler.register( - localpart=desired_username, - password=new_password, - guest_access_token=guest_access_token, + access_token = yield self.auth_handler.issue_access_token( + registered_user_id ) - # remember that we've now registered that user account, and with what - # user ID (since the user may not have specified) - self.auth_handler.set_session_data( - session_id, "registered_user_id", user_id - ) - - if result and LoginType.EMAIL_IDENTITY in result: + if add_email and result and LoginType.EMAIL_IDENTITY in result: threepid = result[LoginType.EMAIL_IDENTITY] - - for reqd in ['medium', 'address', 'validated_at']: - if reqd not in threepid: - logger.info("Can't add incomplete 3pid") - else: - yield self.auth_handler.add_threepid( - user_id, - threepid['medium'], - threepid['address'], - threepid['validated_at'], - ) - - # And we add an email pusher for them by default, but only - # if email notifications are enabled (so people don't start - # getting mail spam where they weren't before if email - # notifs are set up on a home server) - if ( - self.hs.config.email_enable_notifs and - self.hs.config.email_notif_for_new_users - ): - # Pull the ID of the access token back out of the db - # It would really make more sense for this to be passed - # up when the access token is saved, but that's quite an - # invasive change I'd rather do separately. - user_tuple = yield self.store.get_user_by_access_token( - token - ) - - yield self.hs.get_pusherpool().add_pusher( - user_id=user_id, - access_token=user_tuple["token_id"], - kind="email", - app_id="m.email", - app_display_name="Email Notifications", - device_display_name=threepid["address"], - pushkey=threepid["address"], - lang=None, # We don't know a user's language here - data={}, - ) - - if 'bind_email' in params and params['bind_email']: + reqd = ('medium', 'address', 'validated_at') + if all(x in threepid for x in reqd): + yield self._register_email_threepid( + registered_user_id, threepid, access_token + ) + # XXX why is bind_email not protected by this? + else: + logger.info("Can't add incomplete 3pid") + if params.get("bind_email"): logger.info("bind_email specified: binding") - - emailThreepid = result[LoginType.EMAIL_IDENTITY] - threepid_creds = emailThreepid['threepid_creds'] - logger.debug("Binding emails %s to %s" % ( - emailThreepid, user_id - )) - yield self.identity_handler.bind_threepid(threepid_creds, user_id) + yield self._bind_email(registered_user_id, threepid) else: logger.info("bind_email not specified: not binding email") - result = yield self._create_registration_details(user_id, token) + result = yield self._create_registration_details(registered_user_id, + access_token) defer.returnValue((200, result)) def on_OPTIONS(self, _): @@ -324,6 +287,70 @@ class RegisterRestServlet(RestServlet): ) defer.returnValue((yield self._create_registration_details(user_id, token))) + @defer.inlineCallbacks + def _register_email_threepid(self, user_id, threepid, token): + """Add an email address as a 3pid identifier + + Also adds an email pusher for the email address, if configured in the + HS config + + Args: + user_id (str): id of user + threepid (object): m.login.email.identity auth response + token (str): access_token for the user + Returns: + defer.Deferred: + """ + yield self.auth_handler.add_threepid( + user_id, + threepid['medium'], + threepid['address'], + threepid['validated_at'], + ) + + # And we add an email pusher for them by default, but only + # if email notifications are enabled (so people don't start + # getting mail spam where they weren't before if email + # notifs are set up on a home server) + if (self.hs.config.email_enable_notifs and + self.hs.config.email_notif_for_new_users): + # Pull the ID of the access token back out of the db + # It would really make more sense for this to be passed + # up when the access token is saved, but that's quite an + # invasive change I'd rather do separately. + user_tuple = yield self.store.get_user_by_access_token( + token + ) + token_id = user_tuple["token_id"] + + yield self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="email", + app_id="m.email", + app_display_name="Email Notifications", + device_display_name=threepid["address"], + pushkey=threepid["address"], + lang=None, # We don't know a user's language here + data={}, + ) + defer.returnValue() + + def _bind_email(self, user_id, email_threepid): + """Bind emails to the given user_id on the identity server + + Args: + user_id (str): user id to bind the emails to + email_threepid (object): m.login.email.identity auth response + Returns: + defer.Deferred: + """ + threepid_creds = email_threepid['threepid_creds'] + logger.debug("Binding emails %s to %s" % ( + email_threepid, user_id + )) + return self.identity_handler.bind_threepid(threepid_creds, user_id) + @defer.inlineCallbacks def _create_registration_details(self, user_id, token): refresh_token = yield self.auth_handler.issue_refresh_token(user_id) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index cda0a2b27c..9a4215fef7 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -114,7 +114,8 @@ class RegisterRestServletTestCase(unittest.TestCase): "username": "kermit", "password": "monkey" }, None) - self.registration_handler.register = Mock(return_value=(user_id, token)) + self.registration_handler.register = Mock(return_value=(user_id, None)) + self.auth_handler.issue_access_token = Mock(return_value=token) (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) From 8f6281ab0cace7a1cbf47533ad87387fc003b190 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 15:50:01 +0100 Subject: [PATCH 253/414] Don't bind email unless threepid contains expected fields --- synapse/rest/client/v2_alpha/register.py | 55 +++++++++++------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 707bde0f34..5db953a1e3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -232,19 +232,10 @@ class RegisterRestServlet(RestServlet): if add_email and result and LoginType.EMAIL_IDENTITY in result: threepid = result[LoginType.EMAIL_IDENTITY] - reqd = ('medium', 'address', 'validated_at') - if all(x in threepid for x in reqd): - yield self._register_email_threepid( - registered_user_id, threepid, access_token - ) - # XXX why is bind_email not protected by this? - else: - logger.info("Can't add incomplete 3pid") - if params.get("bind_email"): - logger.info("bind_email specified: binding") - yield self._bind_email(registered_user_id, threepid) - else: - logger.info("bind_email not specified: not binding email") + yield self._register_email_threepid( + registered_user_id, threepid, access_token, + params.get("bind_email") + ) result = yield self._create_registration_details(registered_user_id, access_token) @@ -288,19 +279,28 @@ class RegisterRestServlet(RestServlet): defer.returnValue((yield self._create_registration_details(user_id, token))) @defer.inlineCallbacks - def _register_email_threepid(self, user_id, threepid, token): + def _register_email_threepid(self, user_id, threepid, token, bind_email): """Add an email address as a 3pid identifier Also adds an email pusher for the email address, if configured in the HS config + Also optionally binds emails to the given user_id on the identity server + Args: user_id (str): id of user threepid (object): m.login.email.identity auth response token (str): access_token for the user + bind_email (bool): true if the client requested the email to be + bound at the identity server Returns: defer.Deferred: """ + reqd = ('medium', 'address', 'validated_at') + if any(x not in threepid for x in reqd): + logger.info("Can't add incomplete 3pid") + defer.returnValue() + yield self.auth_handler.add_threepid( user_id, threepid['medium'], @@ -334,23 +334,20 @@ class RegisterRestServlet(RestServlet): lang=None, # We don't know a user's language here data={}, ) + + if bind_email: + logger.info("bind_email specified: binding") + logger.debug("Binding emails %s to %s" % ( + threepid, user_id + )) + yield self.identity_handler.bind_threepid( + threepid['threepid_creds'], user_id + ) + else: + logger.info("bind_email not specified: not binding email") + defer.returnValue() - def _bind_email(self, user_id, email_threepid): - """Bind emails to the given user_id on the identity server - - Args: - user_id (str): user id to bind the emails to - email_threepid (object): m.login.email.identity auth response - Returns: - defer.Deferred: - """ - threepid_creds = email_threepid['threepid_creds'] - logger.debug("Binding emails %s to %s" % ( - email_threepid, user_id - )) - return self.identity_handler.bind_threepid(threepid_creds, user_id) - @defer.inlineCallbacks def _create_registration_details(self, user_id, token): refresh_token = yield self.auth_handler.issue_refresh_token(user_id) From 40cbffb2d2ca0166f1377ac4ec5988046ea4ca10 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 18:46:19 +0100 Subject: [PATCH 254/414] Further registration refactoring * `RegistrationHandler.appservice_register` no longer issues an access token: instead it is left for the caller to do it. (There are two of these, one in `synapse/rest/client/v1/register.py`, which now simply calls `AuthHandler.issue_access_token`, and the other in `synapse/rest/client/v2_alpha/register.py`, which is covered below). * In `synapse/rest/client/v2_alpha/register.py`, move the generation of access_tokens into `_create_registration_details`. This means that the normal flow no longer needs to call `AuthHandler.issue_access_token`; the shared-secret flow can tell `RegistrationHandler.register` not to generate a token; and the appservice flow continues to work despite the above change. --- synapse/handlers/register.py | 13 +++--- synapse/rest/client/v1/register.py | 4 +- synapse/rest/client/v2_alpha/register.py | 50 +++++++++++++++------ synapse/storage/registration.py | 6 ++- tests/rest/client/v2_alpha/test_register.py | 6 ++- 5 files changed, 57 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 6b33b27149..94b19d0cb0 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -99,8 +99,13 @@ class RegistrationHandler(BaseHandler): localpart : The local part of the user ID to register. If None, one will be generated. password (str) : The password to assign to this user so they can - login again. This can be None which means they cannot login again - via a password (e.g. the user is an application service user). + login again. This can be None which means they cannot login again + via a password (e.g. the user is an application service user). + generate_token (bool): Whether a new access token should be + generated. Having this be True should be considered deprecated, + since it offers no means of associating a device_id with the + access_token. Instead you should call auth_handler.issue_access_token + after registration. Returns: A tuple of (user_id, access_token). Raises: @@ -196,15 +201,13 @@ class RegistrationHandler(BaseHandler): user_id, allowed_appservice=service ) - token = self.auth_handler().generate_access_token(user_id) yield self.store.register( user_id=user_id, - token=token, password_hash="", appservice_id=service_id, create_profile_with_localpart=user.localpart, ) - defer.returnValue((user_id, token)) + defer.returnValue(user_id) @defer.inlineCallbacks def check_recaptcha(self, ip, private_key, challenge, response): diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 8e1f1b7845..28b59952c3 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -60,6 +60,7 @@ class RegisterRestServlet(ClientV1RestServlet): # TODO: persistent storage self.sessions = {} self.enable_registration = hs.config.enable_registration + self.auth_handler = hs.get_auth_handler() def on_GET(self, request): if self.hs.config.enable_registration_captcha: @@ -299,9 +300,10 @@ class RegisterRestServlet(ClientV1RestServlet): user_localpart = register_json["user"].encode("utf-8") handler = self.handlers.registration_handler - (user_id, token) = yield handler.appservice_register( + user_id = yield handler.appservice_register( user_localpart, as_token ) + token = yield self.auth_handler.issue_access_token(user_id) self._remove_session(session) defer.returnValue({ "user_id": user_id, diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5db953a1e3..04004cfbbd 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -226,19 +226,17 @@ class RegisterRestServlet(RestServlet): add_email = True - access_token = yield self.auth_handler.issue_access_token( + result = yield self._create_registration_details( registered_user_id ) if add_email and result and LoginType.EMAIL_IDENTITY in result: threepid = result[LoginType.EMAIL_IDENTITY] yield self._register_email_threepid( - registered_user_id, threepid, access_token, + registered_user_id, threepid, result["access_token"], params.get("bind_email") ) - result = yield self._create_registration_details(registered_user_id, - access_token) defer.returnValue((200, result)) def on_OPTIONS(self, _): @@ -246,10 +244,10 @@ class RegisterRestServlet(RestServlet): @defer.inlineCallbacks def _do_appservice_registration(self, username, as_token): - (user_id, token) = yield self.registration_handler.appservice_register( + user_id = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue((yield self._create_registration_details(user_id, token))) + defer.returnValue((yield self._create_registration_details(user_id))) @defer.inlineCallbacks def _do_shared_secret_registration(self, username, password, mac): @@ -273,10 +271,12 @@ class RegisterRestServlet(RestServlet): 403, "HMAC incorrect", ) - (user_id, token) = yield self.registration_handler.register( - localpart=username, password=password + (user_id, _) = yield self.registration_handler.register( + localpart=username, password=password, generate_token=False, ) - defer.returnValue((yield self._create_registration_details(user_id, token))) + + result = yield self._create_registration_details(user_id) + defer.returnValue(result) @defer.inlineCallbacks def _register_email_threepid(self, user_id, threepid, token, bind_email): @@ -349,11 +349,31 @@ class RegisterRestServlet(RestServlet): defer.returnValue() @defer.inlineCallbacks - def _create_registration_details(self, user_id, token): - refresh_token = yield self.auth_handler.issue_refresh_token(user_id) + def _create_registration_details(self, user_id): + """Complete registration of newly-registered user + + Issues access_token and refresh_token, and builds the success response + body. + + Args: + (str) user_id: full canonical @user:id + + + Returns: + defer.Deferred: (object) dictionary for response from /register + """ + + access_token = yield self.auth_handler.issue_access_token( + user_id + ) + + refresh_token = yield self.auth_handler.issue_refresh_token( + user_id + ) + defer.returnValue({ "user_id": user_id, - "access_token": token, + "access_token": access_token, "home_server": self.hs.hostname, "refresh_token": refresh_token, }) @@ -366,7 +386,11 @@ class RegisterRestServlet(RestServlet): generate_token=False, make_guest=True ) - access_token = self.auth_handler.generate_access_token(user_id, ["guest = true"]) + access_token = self.auth_handler.generate_access_token( + user_id, ["guest = true"] + ) + # XXX the "guest" caveat is not copied by /tokenrefresh. That's ok + # so long as we don't return a refresh_token here. defer.returnValue((200, { "user_id": user_id, "access_token": access_token, diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26ef1cfd8a..9a92b35361 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -81,14 +81,16 @@ class RegistrationStore(SQLBaseStore): ) @defer.inlineCallbacks - def register(self, user_id, token, password_hash, + def register(self, user_id, token=None, password_hash=None, was_guest=False, make_guest=False, appservice_id=None, create_profile_with_localpart=None, admin=False): """Attempts to register an account. Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. + token (str): The desired access token to use for this user. If this + is not None, the given access token is associated with the user + id. password_hash (str): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 9a4215fef7..ccbb8776d3 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -61,8 +61,10 @@ class RegisterRestServletTestCase(unittest.TestCase): "id": "1234" } self.registration_handler.appservice_register = Mock( - return_value=(user_id, token) + return_value=user_id ) + self.auth_handler.issue_access_token = Mock(return_value=token) + (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) det_data = { @@ -126,6 +128,8 @@ class RegisterRestServletTestCase(unittest.TestCase): } self.assertDictContainsSubset(det_data, result) self.assertIn("refresh_token", result) + self.auth_handler.issue_access_token.assert_called_once_with( + user_id) def test_POST_disabled_registration(self): self.hs.config.enable_registration = False From 3413f1e284593aa63723cdcd52f443d63771ef62 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 10:21:42 +0100 Subject: [PATCH 255/414] Type annotations Add some type annotations to help PyCharm (in particular) to figure out the types of a bunch of things. --- synapse/handlers/_base.py | 4 ++++ synapse/handlers/auth.py | 4 ++++ synapse/rest/client/v1/base.py | 4 ++++ synapse/rest/client/v1/register.py | 4 ++++ synapse/rest/client/v2_alpha/register.py | 9 +++++++++ synapse/server.pyi | 21 +++++++++++++++++++++ 6 files changed, 46 insertions(+) create mode 100644 synapse/server.pyi diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index d00685c389..6264aa0d9a 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -36,6 +36,10 @@ class BaseHandler(object): """ def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): + """ self.store = hs.get_datastore() self.auth = hs.get_auth() self.notifier = hs.get_notifier() diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index ce9bc18849..8f83923ddb 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -45,6 +45,10 @@ class AuthHandler(BaseHandler): SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): + """ super(AuthHandler, self).__init__(hs) self.checkers = { LoginType.PASSWORD: self._check_password_auth, diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py index 1c020b7e2c..96b49b01f2 100644 --- a/synapse/rest/client/v1/base.py +++ b/synapse/rest/client/v1/base.py @@ -52,6 +52,10 @@ class ClientV1RestServlet(RestServlet): """ def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): + """ self.hs = hs self.handlers = hs.get_handlers() self.builder_factory = hs.get_event_builder_factory() diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py index 8e1f1b7845..efe796c65f 100644 --- a/synapse/rest/client/v1/register.py +++ b/synapse/rest/client/v1/register.py @@ -52,6 +52,10 @@ class RegisterRestServlet(ClientV1RestServlet): PATTERNS = client_path_patterns("/register$", releases=(), include_in_unstable=False) def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ super(RegisterRestServlet, self).__init__(hs) # sessions are stored as: # self.sessions = { diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5db953a1e3..2722a58e3e 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -45,6 +45,10 @@ class RegisterRequestTokenRestServlet(RestServlet): PATTERNS = client_v2_patterns("/register/email/requestToken$") def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ super(RegisterRequestTokenRestServlet, self).__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler @@ -77,7 +81,12 @@ class RegisterRestServlet(RestServlet): PATTERNS = client_v2_patterns("/register$") def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ super(RegisterRestServlet, self).__init__() + self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() diff --git a/synapse/server.pyi b/synapse/server.pyi new file mode 100644 index 0000000000..902f725c06 --- /dev/null +++ b/synapse/server.pyi @@ -0,0 +1,21 @@ +import synapse.handlers +import synapse.handlers.auth +import synapse.handlers.device +import synapse.storage +import synapse.state + +class HomeServer(object): + def get_auth_handler(self) -> synapse.handlers.auth.AuthHandler: + pass + + def get_datastore(self) -> synapse.storage.DataStore: + pass + + def get_device_handler(self) -> synapse.handlers.device.DeviceHandler: + pass + + def get_handlers(self) -> synapse.handlers.Handlers: + pass + + def get_state_handler(self) -> synapse.state.StateHandler: + pass From 4430b1ceb39e125fc6316b37a9767d5c1cb8de7b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 19:01:20 +0100 Subject: [PATCH 256/414] MANIFEST.in: Add *.pyi --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index dfb7c9d28d..216df265b5 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -14,6 +14,7 @@ recursive-include docs * recursive-include res * recursive-include scripts * recursive-include scripts-dev * +recursive-include synapse *.pyi recursive-include tests *.py recursive-include synapse/static *.css From f91faf09b30bf3d6b2997d3a147df23d4460b7fa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2016 10:18:09 +0100 Subject: [PATCH 257/414] Comment --- synapse/storage/stream.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3516636dd1..862c5c3ea1 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -96,6 +96,10 @@ def upper_bound(token, engine, inclusive=True): def filter_to_clause(event_filter): + # NB: This may create SQL clauses that don't optimise well (and we don't + # have indices on all possible clauses). E.g. it may create + # "room_id == X AND room_id != X", which postgres doesn't optimise. + if not event_filter: return "", [] From 57dca356923f220026d31fbb58fcf37ae9b27c8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2016 13:25:06 +0100 Subject: [PATCH 258/414] Don't notify pusher pool for backfilled events --- synapse/handlers/federation.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7622962d46..3f138daf17 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1118,11 +1118,12 @@ class FederationHandler(BaseHandler): backfilled=backfilled, ) - # this intentionally does not yield: we don't care about the result - # and don't need to wait for it. - preserve_fn(self.hs.get_pusherpool().on_new_notifications)( - event_stream_id, max_stream_id - ) + if not backfilled: + # this intentionally does not yield: we don't care about the result + # and don't need to wait for it. + preserve_fn(self.hs.get_pusherpool().on_new_notifications)( + event_stream_id, max_stream_id + ) defer.returnValue((context, event_stream_id, max_stream_id)) From 66868119dc3c42c3cc6ea0b41ade81285ef1c9de Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2016 15:47:28 +0100 Subject: [PATCH 259/414] Add metrics for psutil derived memory usage --- synapse/app/homeserver.py | 3 +++ synapse/metrics/__init__.py | 9 +++++++- synapse/metrics/metric.py | 38 ++++++++++++++++++++++++++++++++++ synapse/python_dependencies.py | 1 + 4 files changed, 50 insertions(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 9c2dd32953..fe68ceb07c 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -51,6 +51,7 @@ from synapse.api.urls import ( from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.util.logcontext import LoggingContext +from synapse.metrics import register_memory_metrics from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.replication.resource import ReplicationResource, REPLICATION_PREFIX from synapse.federation.transport.server import TransportLayerServer @@ -335,6 +336,8 @@ def setup(config_options): hs.get_datastore().start_doing_background_updates() hs.get_replication_layer().start_get_pdu_cache() + register_memory_metrics(hs) + reactor.callWhenRunning(start) return hs diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index bdd7292a30..cce3dba47c 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -27,7 +27,8 @@ import gc from twisted.internet import reactor from .metric import ( - CounterMetric, CallbackMetric, DistributionMetric, CacheMetric + CounterMetric, CallbackMetric, DistributionMetric, CacheMetric, + MemoryUsageMetric, ) @@ -66,6 +67,12 @@ class Metrics(object): return self._register(CacheMetric, *args, **kwargs) +def register_memory_metrics(hs): + metric = MemoryUsageMetric(hs) + all_metrics.append(metric) + return metric + + def get_metrics_for(pkg_name): """ Returns a Metrics instance for conveniently creating metrics namespaced with the given name prefix. """ diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index 341043952a..d100841a7f 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -16,6 +16,8 @@ from itertools import chain +import psutil + # TODO(paul): I can't believe Python doesn't have one of these def map_concat(func, items): @@ -153,3 +155,39 @@ class CacheMetric(object): """%s:total{name="%s"} %d""" % (self.name, self.cache_name, total), """%s:size{name="%s"} %d""" % (self.name, self.cache_name, size), ] + + +class MemoryUsageMetric(object): + """Keeps track of the current memory usage, using psutil. + + The class will keep the current min/max/sum/counts of rss over the last + WINDOW_SIZE_SEC, by polling UPDATE_HZ times per second + """ + + UPDATE_HZ = 2 # number of times to get memory per second + WINDOW_SIZE_SEC = 30 # the size of the window in seconds + + def __init__(self, hs): + clock = hs.get_clock() + self.memory_snapshots = [] + self.process = psutil.Process() + + clock.looping_call(self._update_curr_values, 1000 / self.UPDATE_HZ) + + def _update_curr_values(self): + max_size = self.UPDATE_HZ * self.WINDOW_SIZE_SEC + self.memory_snapshots.append(self.process.memory_info().rss) + self.memory_snapshots[:] = self.memory_snapshots[-max_size:] + + def render(self): + max_rss = max(self.memory_snapshots) + min_rss = min(self.memory_snapshots) + sum_rss = sum(self.memory_snapshots) + len_rss = len(self.memory_snapshots) + + return [ + "process_psutil_rss:max %d" % max_rss, + "process_psutil_rss:min %d" % min_rss, + "process_psutil_rss:total %d" % sum_rss, + "process_psutil_rss:count %d" % len_rss, + ] diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e024cec0a2..799d35da5e 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -36,6 +36,7 @@ REQUIREMENTS = { "blist": ["blist"], "pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"], "pymacaroons-pynacl": ["pymacaroons"], + "psutil>=2.0.0": ["psutil>=2.0.0"], } CONDITIONAL_REQUIREMENTS = { "web_client": { From 499e3281e6dd2c566b98c72857bb7944ca9b3831 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2016 16:09:59 +0100 Subject: [PATCH 260/414] Make jenkins install deps on unit tests --- jenkins-unittests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh index 104d511994..6b0c296cff 100755 --- a/jenkins-unittests.sh +++ b/jenkins-unittests.sh @@ -22,4 +22,8 @@ export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished w rm .coverage* || echo "No coverage files to remove" +tox --notest -e py27 +TOX_BIN=$WORKSPACE/.tox/py27/bin +python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install + tox -e py27 From b97a1356b149f62e5b2c28b09818d74b445cc635 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 19 Jul 2016 18:38:26 +0100 Subject: [PATCH 261/414] Register a device_id in the /v2/register flow. This doesn't cover *all* of the registration flows, but it does cover the most common ones: in particular: shared_secret registration, appservice registration, and normal user/pass registration. Pull device_id from the registration parameters. Register the device in the devices table. Associate the device with the returned access and refresh tokens. Profit. --- synapse/rest/client/v2_alpha/register.py | 54 +++++++++++++++------ tests/rest/client/v2_alpha/test_register.py | 13 +++-- 2 files changed, 49 insertions(+), 18 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b7e03ea9d1..d401722224 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -93,6 +93,7 @@ class RegisterRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_handlers().registration_handler self.identity_handler = hs.get_handlers().identity_handler + self.device_handler = hs.get_device_handler() @defer.inlineCallbacks def on_POST(self, request): @@ -145,7 +146,7 @@ class RegisterRestServlet(RestServlet): if isinstance(desired_username, basestring): result = yield self._do_appservice_registration( - desired_username, request.args["access_token"][0] + desired_username, request.args["access_token"][0], body ) defer.returnValue((200, result)) # we throw for non 200 responses return @@ -155,7 +156,7 @@ class RegisterRestServlet(RestServlet): # FIXME: Should we really be determining if this is shared secret # auth based purely on the 'mac' key? result = yield self._do_shared_secret_registration( - desired_username, desired_password, body["mac"] + desired_username, desired_password, body ) defer.returnValue((200, result)) # we throw for non 200 responses return @@ -236,7 +237,7 @@ class RegisterRestServlet(RestServlet): add_email = True result = yield self._create_registration_details( - registered_user_id + registered_user_id, body ) if add_email and result and LoginType.EMAIL_IDENTITY in result: @@ -252,14 +253,14 @@ class RegisterRestServlet(RestServlet): return 200, {} @defer.inlineCallbacks - def _do_appservice_registration(self, username, as_token): + def _do_appservice_registration(self, username, as_token, body): user_id = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue((yield self._create_registration_details(user_id))) + defer.returnValue((yield self._create_registration_details(user_id, body))) @defer.inlineCallbacks - def _do_shared_secret_registration(self, username, password, mac): + def _do_shared_secret_registration(self, username, password, body): if not self.hs.config.registration_shared_secret: raise SynapseError(400, "Shared secret registration is not enabled") @@ -267,7 +268,7 @@ class RegisterRestServlet(RestServlet): # str() because otherwise hmac complains that 'unicode' does not # have the buffer interface - got_mac = str(mac) + got_mac = str(body["mac"]) want_mac = hmac.new( key=self.hs.config.registration_shared_secret, @@ -284,7 +285,7 @@ class RegisterRestServlet(RestServlet): localpart=username, password=password, generate_token=False, ) - result = yield self._create_registration_details(user_id) + result = yield self._create_registration_details(user_id, body) defer.returnValue(result) @defer.inlineCallbacks @@ -358,35 +359,58 @@ class RegisterRestServlet(RestServlet): defer.returnValue() @defer.inlineCallbacks - def _create_registration_details(self, user_id): + def _create_registration_details(self, user_id, body): """Complete registration of newly-registered user - Issues access_token and refresh_token, and builds the success response - body. + Allocates device_id if one was not given; also creates access_token + and refresh_token. Args: (str) user_id: full canonical @user:id - + (object) body: dictionary supplied to /register call, from + which we pull device_id and initial_device_name Returns: defer.Deferred: (object) dictionary for response from /register """ + device_id = yield self._register_device(user_id, body) access_token = yield self.auth_handler.issue_access_token( - user_id + user_id, device_id=device_id ) refresh_token = yield self.auth_handler.issue_refresh_token( - user_id + user_id, device_id=device_id ) - defer.returnValue({ "user_id": user_id, "access_token": access_token, "home_server": self.hs.hostname, "refresh_token": refresh_token, + "device_id": device_id, }) + def _register_device(self, user_id, body): + """Register a device for a user. + + This is called after the user's credentials have been validated, but + before the access token has been issued. + + Args: + (str) user_id: full canonical @user:id + (object) body: dictionary supplied to /register call, from + which we pull device_id and initial_device_name + Returns: + defer.Deferred: (str) device_id + """ + # register the user's device + device_id = body.get("device_id") + initial_display_name = body.get("initial_device_display_name") + device_id = self.device_handler.check_device_registered( + user_id, device_id, initial_display_name + ) + return device_id + @defer.inlineCallbacks def _do_guest_registration(self): if not self.hs.config.allow_guest_access: diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index ccbb8776d3..3bd7065e32 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -30,6 +30,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.registration_handler = Mock() self.identity_handler = Mock() self.login_handler = Mock() + self.device_handler = Mock() # do the dance to hook it up to the hs global self.handlers = Mock( @@ -42,6 +43,7 @@ class RegisterRestServletTestCase(unittest.TestCase): self.hs.get_auth = Mock(return_value=self.auth) self.hs.get_handlers = Mock(return_value=self.handlers) self.hs.get_auth_handler = Mock(return_value=self.auth_handler) + self.hs.get_device_handler = Mock(return_value=self.device_handler) self.hs.config.enable_registration = True # init the thing we're testing @@ -107,9 +109,11 @@ class RegisterRestServletTestCase(unittest.TestCase): def test_POST_user_valid(self): user_id = "@kermit:muppet" token = "kermits_access_token" + device_id = "frogfone" self.request_data = json.dumps({ "username": "kermit", - "password": "monkey" + "password": "monkey", + "device_id": device_id, }) self.registration_handler.check_username = Mock(return_value=True) self.auth_result = (True, None, { @@ -118,18 +122,21 @@ class RegisterRestServletTestCase(unittest.TestCase): }, None) self.registration_handler.register = Mock(return_value=(user_id, None)) self.auth_handler.issue_access_token = Mock(return_value=token) + self.device_handler.check_device_registered = \ + Mock(return_value=device_id) (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) det_data = { "user_id": user_id, "access_token": token, - "home_server": self.hs.hostname + "home_server": self.hs.hostname, + "device_id": device_id, } self.assertDictContainsSubset(det_data, result) self.assertIn("refresh_token", result) self.auth_handler.issue_access_token.assert_called_once_with( - user_id) + user_id, device_id=device_id) def test_POST_disabled_registration(self): self.hs.config.enable_registration = False From 053e83dafb8d66b010a087d57b0aac108d68036e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Jul 2016 15:12:42 +0100 Subject: [PATCH 262/414] More doc-comments Fix some more comments on some things --- synapse/api/auth.py | 5 ++--- synapse/storage/_base.py | 19 ++++++++++++------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index be67ab4f4d..ff7d816cfc 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -566,9 +566,8 @@ class Auth(object): Args: request - An HTTP request with an access_token query parameter. Returns: - tuple of: - UserID (str) - Access token ID (str) + defer.Deferred: resolves to a namedtuple including "user" (UserID) + "access_token_id" (int), "is_guest" (bool) Raises: AuthError if no user by that token exists or the token is invalid. """ diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d766a30299..0117fdc639 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -597,10 +597,13 @@ class SQLBaseStore(object): more rows, returning the result as a list of dicts. Args: - table : string giving the table name - keyvalues : dict of column names and values to select the rows with, - or None to not apply a WHERE clause. - retcols : list of strings giving the names of the columns to return + table (str): the table name + keyvalues (dict[str, Any] | None): + column names and values to select the rows with, or None to not + apply a WHERE clause. + retcols (iterable[str]): the names of the columns to return + Returns: + defer.Deferred: resolves to list[dict[str, Any]] """ return self.runInteraction( desc, @@ -615,9 +618,11 @@ class SQLBaseStore(object): Args: txn : Transaction object - table : string giving the table name - keyvalues : dict of column names and values to select the rows with - retcols : list of strings giving the names of the columns to return + table (str): the table name + keyvalues (dict[str, T] | None): + column names and values to select the rows with, or None to not + apply a WHERE clause. + retcols (iterable[str]): the names of the columns to return """ if keyvalues: sql = "SELECT %s FROM %s WHERE %s" % ( From ec041b335ecb20008609c8603338ab8c586615be Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Jul 2016 15:25:40 +0100 Subject: [PATCH 263/414] Record device_id in client_ips Record the device_id when we add a client ip; it's somewhat redundant as we could get it via the access_token, but it will make querying rather easier. --- synapse/api/auth.py | 29 +++++++++++++++++++++++------ synapse/storage/client_ips.py | 3 ++- tests/api/test_auth.py | 10 +++++++++- 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index ff7d816cfc..eca8513905 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -586,6 +586,10 @@ class Auth(object): token_id = user_info["token_id"] is_guest = user_info["is_guest"] + # device_id may not be present if get_user_by_access_token has been + # stubbed out. + device_id = user_info.get("device_id") + ip_addr = self.hs.get_ip_from_request(request) user_agent = request.requestHeaders.getRawHeaders( "User-Agent", @@ -597,7 +601,8 @@ class Auth(object): user=user, access_token=access_token, ip=ip_addr, - user_agent=user_agent + user_agent=user_agent, + device_id=device_id, ) if is_guest and not allow_guest: @@ -695,6 +700,7 @@ class Auth(object): "user": user, "is_guest": True, "token_id": None, + "device_id": None, } elif rights == "delete_pusher": # We don't store these tokens in the database @@ -702,13 +708,20 @@ class Auth(object): "user": user, "is_guest": False, "token_id": None, + "device_id": None, } else: - # This codepath exists so that we can actually return a - # token ID, because we use token IDs in place of device - # identifiers throughout the codebase. - # TODO(daniel): Remove this fallback when device IDs are - # properly implemented. + # This codepath exists for several reasons: + # * so that we can actually return a token ID, which is used + # in some parts of the schema (where we probably ought to + # use device IDs instead) + # * the only way we currently have to invalidate an + # access_token is by removing it from the database, so we + # have to check here that it is still in the db + # * some attributes (notably device_id) aren't stored in the + # macaroon. They probably should be. + # TODO: build the dictionary from the macaroon once the + # above are fixed ret = yield self._look_up_user_by_access_token(macaroon_str) if ret["user"] != user: logger.error( @@ -782,10 +795,14 @@ class Auth(object): self.TOKEN_NOT_FOUND_HTTP_STATUS, "Unrecognised access token.", errcode=Codes.UNKNOWN_TOKEN ) + # we use ret.get() below because *lots* of unit tests stub out + # get_user_by_access_token in a way where it only returns a couple of + # the fields. user_info = { "user": UserID.from_string(ret.get("name")), "token_id": ret.get("token_id", None), "is_guest": False, + "device_id": ret.get("device_id"), } defer.returnValue(user_info) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index a90990e006..74330a8ddf 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -35,7 +35,7 @@ class ClientIpStore(SQLBaseStore): super(ClientIpStore, self).__init__(hs) @defer.inlineCallbacks - def insert_client_ip(self, user, access_token, ip, user_agent): + def insert_client_ip(self, user, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) key = (user.to_string(), access_token, ip) @@ -59,6 +59,7 @@ class ClientIpStore(SQLBaseStore): "access_token": access_token, "ip": ip, "user_agent": user_agent, + "device_id": device_id, }, values={ "last_seen": now, diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 960c23d631..e91723ca3d 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -45,6 +45,7 @@ class AuthTestCase(unittest.TestCase): user_info = { "name": self.test_user, "token_id": "ditto", + "device_id": "device", } self.store.get_user_by_access_token = Mock(return_value=user_info) @@ -143,7 +144,10 @@ class AuthTestCase(unittest.TestCase): # TODO(danielwh): Remove this mock when we remove the # get_user_by_access_token fallback. self.store.get_user_by_access_token = Mock( - return_value={"name": "@baldrick:matrix.org"} + return_value={ + "name": "@baldrick:matrix.org", + "device_id": "device", + } ) user_id = "@baldrick:matrix.org" @@ -158,6 +162,10 @@ class AuthTestCase(unittest.TestCase): user = user_info["user"] self.assertEqual(UserID.from_string(user_id), user) + # TODO: device_id should come from the macaroon, but currently comes + # from the db. + self.assertEqual(user_info["device_id"], "device") + @defer.inlineCallbacks def test_get_guest_user_from_macaroon(self): user_id = "@baldrick:matrix.org" From bc8f265f0a8443e918b17a94f4b2fa319e70a21f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Jul 2016 16:34:00 +0100 Subject: [PATCH 264/414] GET /devices endpoint implement a GET /devices endpoint which lists all of the user's devices. It also returns the last IP where we saw that device, so there is some dancing to fish that out of the user_ips table. --- synapse/handlers/device.py | 27 +++++++ synapse/rest/__init__.py | 2 + synapse/rest/client/v2_alpha/_base.py | 13 +++- synapse/rest/client/v2_alpha/devices.py | 51 ++++++++++++ synapse/storage/client_ips.py | 72 +++++++++++++++++ synapse/storage/devices.py | 22 +++++- .../schema/delta/33/user_ips_index.sql | 16 ++++ tests/handlers/test_device.py | 78 ++++++++++++++++--- tests/storage/test_client_ips.py | 62 +++++++++++++++ tests/storage/test_devices.py | 71 +++++++++++++++++ 10 files changed, 397 insertions(+), 17 deletions(-) create mode 100644 synapse/rest/client/v2_alpha/devices.py create mode 100644 synapse/storage/schema/delta/33/user_ips_index.sql create mode 100644 tests/storage/test_client_ips.py create mode 100644 tests/storage/test_devices.py diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 8d7d9874f8..6bbbf59e52 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -69,3 +69,30 @@ class DeviceHandler(BaseHandler): attempts += 1 raise StoreError(500, "Couldn't generate a device ID.") + + @defer.inlineCallbacks + def get_devices_by_user(self, user_id): + """ + Retrieve the given user's devices + + Args: + user_id (str): + Returns: + defer.Deferred: dict[str, dict[str, X]]: map from device_id to + info on the device + """ + + devices = yield self.store.get_devices_by_user(user_id) + + ips = yield self.store.get_last_client_ip_by_device( + devices=((user_id, device_id) for device_id in devices.keys()) + ) + + for device_id in devices.keys(): + ip = ips.get((user_id, device_id), {}) + devices[device_id].update({ + "last_seen_ts": ip.get("last_seen"), + "last_seen_ip": ip.get("ip"), + }) + + defer.returnValue(devices) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 8b223e032b..14227f1cdb 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -46,6 +46,7 @@ from synapse.rest.client.v2_alpha import ( account_data, report_event, openid, + devices, ) from synapse.http.server import JsonResource @@ -90,3 +91,4 @@ class ClientRestResource(JsonResource): account_data.register_servlets(hs, client_resource) report_event.register_servlets(hs, client_resource) openid.register_servlets(hs, client_resource) + devices.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index b6faa2b0e6..20e765f48f 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -25,7 +25,9 @@ import logging logger = logging.getLogger(__name__) -def client_v2_patterns(path_regex, releases=(0,)): +def client_v2_patterns(path_regex, releases=(0,), + v2_alpha=True, + unstable=True): """Creates a regex compiled client path with the correct client path prefix. @@ -35,9 +37,12 @@ def client_v2_patterns(path_regex, releases=(0,)): Returns: SRE_Pattern """ - patterns = [re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex)] - unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable") - patterns.append(re.compile("^" + unstable_prefix + path_regex)) + patterns = [] + if v2_alpha: + patterns.append(re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex)) + if unstable: + unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable") + patterns.append(re.compile("^" + unstable_prefix + path_regex)) for release in releases: new_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/r%d" % release) patterns.append(re.compile("^" + new_prefix + path_regex)) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py new file mode 100644 index 0000000000..5cf8bd1afa --- /dev/null +++ b/synapse/rest/client/v2_alpha/devices.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.http.servlet import RestServlet + +from ._base import client_v2_patterns + +import logging + + +logger = logging.getLogger(__name__) + + +class DevicesRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/devices$", releases=[], v2_alpha=False) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(DevicesRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + @defer.inlineCallbacks + def on_GET(self, request): + requester = yield self.auth.get_user_by_req(request) + devices = yield self.device_handler.get_devices_by_user( + requester.user.to_string() + ) + defer.returnValue((200, {"devices": devices})) + + +def register_servlets(hs, http_server): + DevicesRestServlet(hs).register(http_server) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index a90990e006..07161496ca 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from ._base import SQLBaseStore, Cache from twisted.internet import defer +logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits @@ -66,3 +69,72 @@ class ClientIpStore(SQLBaseStore): desc="insert_client_ip", lock=False, ) + + @defer.inlineCallbacks + def get_last_client_ip_by_device(self, devices): + """For each device_id listed, give the user_ip it was last seen on + + Args: + devices (iterable[(str, str)]): list of (user_id, device_id) pairs + + Returns: + defer.Deferred: resolves to a dict, where the keys + are (user_id, device_id) tuples. The values are also dicts, with + keys giving the column names + """ + + res = yield self.runInteraction( + "get_last_client_ip_by_device", + self._get_last_client_ip_by_device_txn, + retcols=( + "user_id", + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ), + devices=devices + ) + + ret = {(d["user_id"], d["device_id"]): d for d in res} + defer.returnValue(ret) + + @classmethod + def _get_last_client_ip_by_device_txn(cls, txn, devices, retcols): + def where_clause_for_device(d): + return + + where_clauses = [] + bindings = [] + for (user_id, device_id) in devices: + if device_id is None: + where_clauses.append("(user_id = ? AND device_id IS NULL)") + bindings.extend((user_id, )) + else: + where_clauses.append("(user_id = ? AND device_id = ?)") + bindings.extend((user_id, device_id)) + + inner_select = ( + "SELECT MAX(last_seen) mls, user_id, device_id FROM user_ips " + "WHERE %(where)s " + "GROUP BY user_id, device_id" + ) % { + "where": " OR ".join(where_clauses), + } + + sql = ( + "SELECT %(retcols)s FROM user_ips " + "JOIN (%(inner_select)s) ips ON" + " user_ips.last_seen = ips.mls AND" + " user_ips.user_id = ips.user_id AND" + " (user_ips.device_id = ips.device_id OR" + " (user_ips.device_id IS NULL AND ips.device_id IS NULL)" + " )" + ) % { + "retcols": ",".join("user_ips." + c for c in retcols), + "inner_select": inner_select, + } + + txn.execute(sql, bindings) + return cls.cursor_to_dict(txn) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 9065e96d28..1cc6e07f2b 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -65,7 +65,7 @@ class DeviceStore(SQLBaseStore): user_id (str): The ID of the user which owns the device device_id (str): The ID of the device to retrieve Returns: - defer.Deferred for a namedtuple containing the device information + defer.Deferred for a dict containing the device information Raises: StoreError: if the device is not found """ @@ -75,3 +75,23 @@ class DeviceStore(SQLBaseStore): retcols=("user_id", "device_id", "display_name"), desc="get_device", ) + + @defer.inlineCallbacks + def get_devices_by_user(self, user_id): + """Retrieve all of a user's registered devices. + + Args: + user_id (str): + Returns: + defer.Deferred: resolves to a dict from device_id to a dict + containing "device_id", "user_id" and "display_name" for each + device. + """ + devices = yield self._simple_select_list( + table="devices", + keyvalues={"user_id": user_id}, + retcols=("user_id", "device_id", "display_name"), + desc="get_devices_by_user" + ) + + defer.returnValue({d["device_id"]: d for d in devices}) diff --git a/synapse/storage/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/delta/33/user_ips_index.sql new file mode 100644 index 0000000000..8a05677d42 --- /dev/null +++ b/synapse/storage/schema/delta/33/user_ips_index.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE INDEX user_ips_device_id ON user_ips(user_id, device_id, last_seen); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index cc6512ccc7..c2e12135d6 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -12,25 +12,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from synapse import types from twisted.internet import defer -from synapse.handlers.device import DeviceHandler -from tests import unittest -from tests.utils import setup_test_homeserver - - -class DeviceHandlers(object): - def __init__(self, hs): - self.device_handler = DeviceHandler(hs) +import synapse.handlers.device +import synapse.storage +from tests import unittest, utils class DeviceTestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(DeviceTestCase, self).__init__(*args, **kwargs) + self.store = None # type: synapse.storage.DataStore + self.handler = None # type: device.DeviceHandler + self.clock = None # type: utils.MockClock + @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(handlers=None) - self.hs.handlers = handlers = DeviceHandlers(self.hs) - self.handler = handlers.device_handler + hs = yield utils.setup_test_homeserver(handlers=None) + self.handler = synapse.handlers.device.DeviceHandler(hs) + self.store = hs.get_datastore() + self.clock = hs.get_clock() @defer.inlineCallbacks def test_device_is_created_if_doesnt_exist(self): @@ -73,3 +75,55 @@ class DeviceTestCase(unittest.TestCase): dev = yield self.handler.store.get_device("theresa", device_id) self.assertEqual(dev["display_name"], "display") + + @defer.inlineCallbacks + def test_get_devices_by_user(self): + # check this works for both devices which have a recorded client_ip, + # and those which don't. + user1 = "@boris:aaa" + user2 = "@theresa:bbb" + yield self._record_user(user1, "xyz", "display 0") + yield self._record_user(user1, "fco", "display 1", "token1", "ip1") + yield self._record_user(user1, "abc", "display 2", "token2", "ip2") + yield self._record_user(user1, "abc", "display 2", "token3", "ip3") + + yield self._record_user(user2, "def", "dispkay", "token4", "ip4") + + res = yield self.handler.get_devices_by_user(user1) + self.assertEqual(3, len(res.keys())) + self.assertDictContainsSubset({ + "user_id": user1, + "device_id": "xyz", + "display_name": "display 0", + "last_seen_ip": None, + "last_seen_ts": None, + }, res["xyz"]) + self.assertDictContainsSubset({ + "user_id": user1, + "device_id": "fco", + "display_name": "display 1", + "last_seen_ip": "ip1", + "last_seen_ts": 1000000, + }, res["fco"]) + self.assertDictContainsSubset({ + "user_id": user1, + "device_id": "abc", + "display_name": "display 2", + "last_seen_ip": "ip3", + "last_seen_ts": 3000000, + }, res["abc"]) + + @defer.inlineCallbacks + def _record_user(self, user_id, device_id, display_name, + access_token=None, ip=None): + device_id = yield self.handler.check_device_registered( + user_id=user_id, + device_id=device_id, + initial_device_display_name=display_name + ) + + if ip is not None: + yield self.store.insert_client_ip( + types.UserID.from_string(user_id), + access_token, ip, "user_agent", device_id) + self.clock.advance_time(1000) \ No newline at end of file diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py new file mode 100644 index 0000000000..1f0c0e7c37 --- /dev/null +++ b/tests/storage/test_client_ips.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +import synapse.server +import synapse.storage +import synapse.types +import tests.unittest +import tests.utils + + +class ClientIpStoreTestCase(tests.unittest.TestCase): + def __init__(self, *args, **kwargs): + super(ClientIpStoreTestCase, self).__init__(*args, **kwargs) + self.store = None # type: synapse.storage.DataStore + self.clock = None # type: tests.utils.MockClock + + @defer.inlineCallbacks + def setUp(self): + hs = yield tests.utils.setup_test_homeserver() + self.store = hs.get_datastore() + self.clock = hs.get_clock() + + @defer.inlineCallbacks + def test_insert_new_client_ip(self): + self.clock.now = 12345678 + user_id = "@user:id" + yield self.store.insert_client_ip( + synapse.types.UserID.from_string(user_id), + "access_token", "ip", "user_agent", "device_id", + ) + + # deliberately use an iterable here to make sure that the lookup + # method doesn't iterate it twice + device_list = iter(((user_id, "device_id"),)) + result = yield self.store.get_last_client_ip_by_device(device_list) + + r = result[(user_id, "device_id")] + self.assertDictContainsSubset( + { + "user_id": user_id, + "device_id": "device_id", + "access_token": "access_token", + "ip": "ip", + "user_agent": "user_agent", + "last_seen": 12345678000, + }, + r + ) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py new file mode 100644 index 0000000000..d3e9d97a9a --- /dev/null +++ b/tests/storage/test_devices.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +import synapse.server +import synapse.types +import tests.unittest +import tests.utils + + +class DeviceStoreTestCase(tests.unittest.TestCase): + def __init__(self, *args, **kwargs): + super(DeviceStoreTestCase, self).__init__(*args, **kwargs) + self.store = None # type: synapse.storage.DataStore + + @defer.inlineCallbacks + def setUp(self): + hs = yield tests.utils.setup_test_homeserver() + + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def test_store_new_device(self): + yield self.store.store_device( + "user_id", "device_id", "display_name" + ) + + res = yield self.store.get_device("user_id", "device_id") + self.assertDictContainsSubset({ + "user_id": "user_id", + "device_id": "device_id", + "display_name": "display_name", + }, res) + + @defer.inlineCallbacks + def test_get_devices_by_user(self): + yield self.store.store_device( + "user_id", "device1", "display_name 1" + ) + yield self.store.store_device( + "user_id", "device2", "display_name 2" + ) + yield self.store.store_device( + "user_id2", "device3", "display_name 3" + ) + + res = yield self.store.get_devices_by_user("user_id") + self.assertEqual(2, len(res.keys())) + self.assertDictContainsSubset({ + "user_id": "user_id", + "device_id": "device1", + "display_name": "display_name 1", + }, res["device1"]) + self.assertDictContainsSubset({ + "user_id": "user_id", + "device_id": "device2", + "display_name": "display_name 2", + }, res["device2"]) From d36b1d849d5d896967ab2ade7c206513e502d94f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Jul 2016 16:59:52 +0100 Subject: [PATCH 265/414] Don't explode if we have no snapshots yet --- synapse/metrics/metric.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py index d100841a7f..7becbe0491 100644 --- a/synapse/metrics/metric.py +++ b/synapse/metrics/metric.py @@ -180,6 +180,9 @@ class MemoryUsageMetric(object): self.memory_snapshots[:] = self.memory_snapshots[-max_size:] def render(self): + if not self.memory_snapshots: + return [] + max_rss = max(self.memory_snapshots) min_rss = min(self.memory_snapshots) sum_rss = sum(self.memory_snapshots) From 40a1c96617fd5926b53f8993bb93af159af4d674 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Jul 2016 18:06:28 +0100 Subject: [PATCH 266/414] Fix PEP8 errors --- tests/handlers/test_device.py | 2 +- tests/storage/test_devices.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index c2e12135d6..b05aa9bb55 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -126,4 +126,4 @@ class DeviceTestCase(unittest.TestCase): yield self.store.insert_client_ip( types.UserID.from_string(user_id), access_token, ip, "user_agent", device_id) - self.clock.advance_time(1000) \ No newline at end of file + self.clock.advance_time(1000) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index d3e9d97a9a..a6ce993375 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -15,8 +15,6 @@ from twisted.internet import defer -import synapse.server -import synapse.types import tests.unittest import tests.utils From 248e6770ca0faadf574cfd62f72d8e200cb5b57a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2016 10:30:12 +0100 Subject: [PATCH 267/414] Cache federation state responses --- synapse/federation/federation_server.py | 68 +++++++++++++++++-------- synapse/handlers/federation.py | 7 +-- synapse/handlers/room.py | 4 +- synapse/handlers/sync.py | 2 +- synapse/util/caches/response_cache.py | 13 ++++- 5 files changed, 61 insertions(+), 33 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 85f5e752fe..d15c7e1b40 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -21,10 +21,11 @@ from .units import Transaction, Edu from synapse.util.async import Linearizer from synapse.util.logutils import log_function +from synapse.util.caches.response_cache import ResponseCache from synapse.events import FrozenEvent import synapse.metrics -from synapse.api.errors import FederationError, SynapseError +from synapse.api.errors import AuthError, FederationError, SynapseError from synapse.crypto.event_signing import compute_event_signature @@ -48,9 +49,15 @@ class FederationServer(FederationBase): def __init__(self, hs): super(FederationServer, self).__init__(hs) + self.auth = hs.get_auth() + self._room_pdu_linearizer = Linearizer() self._server_linearizer = Linearizer() + # We cache responses to state queries, as they take a while and often + # come in waves. + self._state_resp_cache = ResponseCache(hs, timeout_ms=30000) + def set_handler(self, handler): """Sets the handler that the replication layer will use to communicate receipt of new PDUs from other home servers. The required methods are @@ -188,28 +195,45 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function def on_context_state_request(self, origin, room_id, event_id): - with (yield self._server_linearizer.queue((origin, room_id))): - if event_id: - pdus = yield self.handler.get_state_for_pdu( - origin, room_id, event_id, - ) - auth_chain = yield self.store.get_auth_chain( - [pdu.event_id for pdu in pdus] - ) + if not event_id: + raise NotImplementedError("Specify an event") - for event in auth_chain: - # We sign these again because there was a bug where we - # incorrectly signed things the first time round - if self.hs.is_mine_id(event.event_id): - event.signatures.update( - compute_event_signature( - event, - self.hs.hostname, - self.hs.config.signing_key[0] - ) - ) - else: - raise NotImplementedError("Specify an event") + in_room = yield self.auth.check_host_in_room(room_id, origin) + if not in_room: + raise AuthError(403, "Host not in room.") + + result = self._state_resp_cache.get((room_id, event_id)) + if not result: + with (yield self._server_linearizer.queue((origin, room_id))): + resp = yield self.response_cache.set( + (room_id, event_id), + self._on_context_state_request_compute(room_id, event_id) + ) + else: + resp = yield result + + defer.returnValue((200, resp)) + + @defer.inlineCallbacks + def _on_context_state_request_compute(self, room_id, event_id): + pdus = yield self.handler.get_state_for_pdu( + room_id, event_id, + ) + auth_chain = yield self.store.get_auth_chain( + [pdu.event_id for pdu in pdus] + ) + + for event in auth_chain: + # We sign these again because there was a bug where we + # incorrectly signed things the first time round + if self.hs.is_mine_id(event.event_id): + event.signatures.update( + compute_event_signature( + event, + self.hs.hostname, + self.hs.config.signing_key[0] + ) + ) defer.returnValue((200, { "pdus": [pdu.get_pdu_json() for pdu in pdus], diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3f138daf17..fcad41d7b6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -991,14 +991,9 @@ class FederationHandler(BaseHandler): defer.returnValue(None) @defer.inlineCallbacks - def get_state_for_pdu(self, origin, room_id, event_id, do_auth=True): + def get_state_for_pdu(self, room_id, event_id): yield run_on_reactor() - if do_auth: - in_room = yield self.auth.check_host_in_room(room_id, origin) - if not in_room: - raise AuthError(403, "Host not in room.") - state_groups = yield self.store.get_state_groups( room_id, [event_id] ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ae44c7a556..bf6b1c1535 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -345,8 +345,8 @@ class RoomCreationHandler(BaseHandler): class RoomListHandler(BaseHandler): def __init__(self, hs): super(RoomListHandler, self).__init__(hs) - self.response_cache = ResponseCache() - self.remote_list_request_cache = ResponseCache() + self.response_cache = ResponseCache(hs) + self.remote_list_request_cache = ResponseCache(hs) self.remote_list_cache = {} self.fetch_looping_call = hs.get_clock().looping_call( self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index be26a491ff..0ee4ebe504 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -138,7 +138,7 @@ class SyncHandler(object): self.presence_handler = hs.get_presence_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() - self.response_cache = ResponseCache() + self.response_cache = ResponseCache(hs) def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0, full_state=False): diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 36686b479e..00af539880 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -24,9 +24,12 @@ class ResponseCache(object): used rather than trying to compute a new response. """ - def __init__(self): + def __init__(self, hs, timeout_ms=0): self.pending_result_cache = {} # Requests that haven't finished yet. + self.clock = hs.get_clock() + self.timeout_sec = timeout_ms / 1000. + def get(self, key): result = self.pending_result_cache.get(key) if result is not None: @@ -39,7 +42,13 @@ class ResponseCache(object): self.pending_result_cache[key] = result def remove(r): - self.pending_result_cache.pop(key, None) + if self.timeout_sec: + self.clock.call_later( + self.timeout_sec, + self.pending_result_cache.pop, key, None, + ) + else: + self.pending_result_cache.pop(key, None) return r result.addBoth(remove) From 081e5d55e68b1a55d4f52ef062084d9126ce2231 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2016 11:14:54 +0100 Subject: [PATCH 268/414] Send the correct host header when fetching keys --- synapse/crypto/keyclient.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 54b83da9d8..4fca215c97 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -79,8 +79,7 @@ class SynapseKeyClientProtocol(HTTPClient): self.host = None def connectionMade(self): - self.host = self.transport.getHost() - logger.debug("Connected to %s", self.host) + logger.debug("Connected to %s", self.transport.getPeer()) self.sendCommand(b"GET", self.path) if self.host: self.sendHeader(b"Host", self.host) @@ -124,7 +123,10 @@ class SynapseKeyClientProtocol(HTTPClient): self.timer.cancel() def on_timeout(self): - logger.debug("Timeout waiting for response from %s", self.host) + logger.debug( + "Timeout waiting for response from %s: %s", + self.host, self.transport.getPeer(), + ) self.errback(IOError("Timeout waiting for response")) self.transport.abortConnection() @@ -133,4 +135,5 @@ class SynapseKeyClientFactory(Factory): def protocol(self): protocol = SynapseKeyClientProtocol() protocol.path = self.path + protocol.path = self.host return protocol From 1a64dffb00287a30c2d4992944836122bd4d8923 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jul 2016 11:34:16 +0100 Subject: [PATCH 269/414] Preserve device_id from first call to /register device_id may only be passed in the first call to /register, so make sure we fish it out of the register `params` rather than the body of the final call. --- synapse/rest/client/v2_alpha/register.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index d401722224..c8c9395fc6 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -237,7 +237,7 @@ class RegisterRestServlet(RestServlet): add_email = True result = yield self._create_registration_details( - registered_user_id, body + registered_user_id, params ) if add_email and result and LoginType.EMAIL_IDENTITY in result: @@ -359,7 +359,7 @@ class RegisterRestServlet(RestServlet): defer.returnValue() @defer.inlineCallbacks - def _create_registration_details(self, user_id, body): + def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user Allocates device_id if one was not given; also creates access_token @@ -367,13 +367,12 @@ class RegisterRestServlet(RestServlet): Args: (str) user_id: full canonical @user:id - (object) body: dictionary supplied to /register call, from - which we pull device_id and initial_device_name - + (object) params: registration parameters, from which we pull + device_id and initial_device_name Returns: defer.Deferred: (object) dictionary for response from /register """ - device_id = yield self._register_device(user_id, body) + device_id = yield self._register_device(user_id, params) access_token = yield self.auth_handler.issue_access_token( user_id, device_id=device_id @@ -390,7 +389,7 @@ class RegisterRestServlet(RestServlet): "device_id": device_id, }) - def _register_device(self, user_id, body): + def _register_device(self, user_id, params): """Register a device for a user. This is called after the user's credentials have been validated, but @@ -398,14 +397,14 @@ class RegisterRestServlet(RestServlet): Args: (str) user_id: full canonical @user:id - (object) body: dictionary supplied to /register call, from - which we pull device_id and initial_device_name + (object) params: registration parameters, from which we pull + device_id and initial_device_name Returns: defer.Deferred: (str) device_id """ # register the user's device - device_id = body.get("device_id") - initial_display_name = body.get("initial_device_display_name") + device_id = params.get("device_id") + initial_display_name = params.get("initial_device_display_name") device_id = self.device_handler.check_device_registered( user_id, device_id, initial_display_name ) From cf94a78872397fd97465b4704465a2d03d27d41e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2016 11:45:53 +0100 Subject: [PATCH 270/414] Set host not path --- synapse/crypto/keyclient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 4fca215c97..1d85990369 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -135,5 +135,5 @@ class SynapseKeyClientFactory(Factory): def protocol(self): protocol = SynapseKeyClientProtocol() protocol.path = self.path - protocol.path = self.host + protocol.host = self.host return protocol From c445f5fec7ed9e9228022be0cccc82f4bf028016 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jul 2016 11:58:47 +0100 Subject: [PATCH 271/414] storage/client_ips: remove some dead code --- synapse/storage/client_ips.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 365f08650d..e31fa53c3f 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -103,9 +103,6 @@ class ClientIpStore(SQLBaseStore): @classmethod def _get_last_client_ip_by_device_txn(cls, txn, devices, retcols): - def where_clause_for_device(d): - return - where_clauses = [] bindings = [] for (user_id, device_id) in devices: From 406f7aa0f6ca7433e52433485824e80b79930498 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 20 Jul 2016 17:58:44 +0100 Subject: [PATCH 272/414] Implement GET /device/{deviceId} --- synapse/handlers/device.py | 46 ++++++++++++++++++++----- synapse/rest/client/v2_alpha/devices.py | 25 ++++++++++++++ tests/handlers/test_device.py | 37 ++++++++++++++------ 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 6bbbf59e52..3c88be0679 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -12,7 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.errors import StoreError + +from synapse.api import errors from synapse.util import stringutils from twisted.internet import defer from ._base import BaseHandler @@ -65,10 +66,10 @@ class DeviceHandler(BaseHandler): ignore_if_known=False, ) defer.returnValue(device_id) - except StoreError: + except errors.StoreError: attempts += 1 - raise StoreError(500, "Couldn't generate a device ID.") + raise errors.StoreError(500, "Couldn't generate a device ID.") @defer.inlineCallbacks def get_devices_by_user(self, user_id): @@ -88,11 +89,38 @@ class DeviceHandler(BaseHandler): devices=((user_id, device_id) for device_id in devices.keys()) ) - for device_id in devices.keys(): - ip = ips.get((user_id, device_id), {}) - devices[device_id].update({ - "last_seen_ts": ip.get("last_seen"), - "last_seen_ip": ip.get("ip"), - }) + for device in devices.values(): + _update_device_from_client_ips(device, ips) defer.returnValue(devices) + + @defer.inlineCallbacks + def get_device(self, user_id, device_id): + """ Retrieve the given device + + Args: + user_id (str): + device_id (str) + + Returns: + defer.Deferred: dict[str, X]: info on the device + Raises: + errors.NotFoundError: if the device was not found + """ + try: + device = yield self.store.get_device(user_id, device_id) + except errors.StoreError, e: + raise errors.NotFoundError + ips = yield self.store.get_last_client_ip_by_device( + devices=((user_id, device_id),) + ) + _update_device_from_client_ips(device, ips) + defer.returnValue(device) + + +def _update_device_from_client_ips(device, client_ips): + ip = client_ips.get((device["user_id"], device["device_id"]), {}) + device.update({ + "last_seen_ts": ip.get("last_seen"), + "last_seen_ip": ip.get("ip"), + }) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 5cf8bd1afa..8b9ab4f674 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -47,5 +47,30 @@ class DevicesRestServlet(RestServlet): defer.returnValue((200, {"devices": devices})) +class DeviceRestServlet(RestServlet): + PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$", + releases=[], v2_alpha=False) + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(DeviceRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + @defer.inlineCallbacks + def on_GET(self, request, device_id): + requester = yield self.auth.get_user_by_req(request) + device = yield self.device_handler.get_device( + requester.user.to_string(), + device_id, + ) + defer.returnValue((200, device)) + + def register_servlets(hs, http_server): DevicesRestServlet(hs).register(http_server) + DeviceRestServlet(hs).register(http_server) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index b05aa9bb55..73f09874d8 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -19,6 +19,8 @@ import synapse.handlers.device import synapse.storage from tests import unittest, utils +user1 = "@boris:aaa" +user2 = "@theresa:bbb" class DeviceTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): @@ -78,16 +80,7 @@ class DeviceTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_devices_by_user(self): - # check this works for both devices which have a recorded client_ip, - # and those which don't. - user1 = "@boris:aaa" - user2 = "@theresa:bbb" - yield self._record_user(user1, "xyz", "display 0") - yield self._record_user(user1, "fco", "display 1", "token1", "ip1") - yield self._record_user(user1, "abc", "display 2", "token2", "ip2") - yield self._record_user(user1, "abc", "display 2", "token3", "ip3") - - yield self._record_user(user2, "def", "dispkay", "token4", "ip4") + yield self._record_users() res = yield self.handler.get_devices_by_user(user1) self.assertEqual(3, len(res.keys())) @@ -113,6 +106,30 @@ class DeviceTestCase(unittest.TestCase): "last_seen_ts": 3000000, }, res["abc"]) + @defer.inlineCallbacks + def test_get_device(self): + yield self._record_users() + + res = yield self.handler.get_device(user1, "abc") + self.assertDictContainsSubset({ + "user_id": user1, + "device_id": "abc", + "display_name": "display 2", + "last_seen_ip": "ip3", + "last_seen_ts": 3000000, + }, res) + + @defer.inlineCallbacks + def _record_users(self): + # check this works for both devices which have a recorded client_ip, + # and those which don't. + yield self._record_user(user1, "xyz", "display 0") + yield self._record_user(user1, "fco", "display 1", "token1", "ip1") + yield self._record_user(user1, "abc", "display 2", "token2", "ip2") + yield self._record_user(user1, "abc", "display 2", "token3", "ip3") + + yield self._record_user(user2, "def", "dispkay", "token4", "ip4") + @defer.inlineCallbacks def _record_user(self, user_id, device_id, display_name, access_token=None, ip=None): From 1c3c202b969d6a7e5e4af2b2dca370f053b92c9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jul 2016 13:15:15 +0100 Subject: [PATCH 273/414] Fix PEP8 errors --- synapse/handlers/device.py | 2 +- tests/handlers/test_device.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 3c88be0679..110f5fbb5c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -109,7 +109,7 @@ class DeviceHandler(BaseHandler): """ try: device = yield self.store.get_device(user_id, device_id) - except errors.StoreError, e: + except errors.StoreError: raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device( devices=((user_id, device_id),) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 73f09874d8..87c3c75aea 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -22,6 +22,7 @@ from tests import unittest, utils user1 = "@boris:aaa" user2 = "@theresa:bbb" + class DeviceTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(DeviceTestCase, self).__init__(*args, **kwargs) From 55abbe1850efff95efe9935873b666e5fc4bf0e9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jul 2016 15:55:13 +0100 Subject: [PATCH 274/414] make /devices return a list Turns out I specced this to return a list of devices rather than a dict of them --- synapse/handlers/device.py | 10 +++++----- tests/handlers/test_device.py | 11 +++++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 110f5fbb5c..1f9e15c33c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -79,17 +79,17 @@ class DeviceHandler(BaseHandler): Args: user_id (str): Returns: - defer.Deferred: dict[str, dict[str, X]]: map from device_id to - info on the device + defer.Deferred: list[dict[str, X]]: info on each device """ - devices = yield self.store.get_devices_by_user(user_id) + device_map = yield self.store.get_devices_by_user(user_id) ips = yield self.store.get_last_client_ip_by_device( - devices=((user_id, device_id) for device_id in devices.keys()) + devices=((user_id, device_id) for device_id in device_map.keys()) ) - for device in devices.values(): + devices = device_map.values() + for device in devices: _update_device_from_client_ips(device, ips) defer.returnValue(devices) diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 87c3c75aea..331aa13fed 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -84,28 +84,31 @@ class DeviceTestCase(unittest.TestCase): yield self._record_users() res = yield self.handler.get_devices_by_user(user1) - self.assertEqual(3, len(res.keys())) + self.assertEqual(3, len(res)) + device_map = { + d["device_id"]: d for d in res + } self.assertDictContainsSubset({ "user_id": user1, "device_id": "xyz", "display_name": "display 0", "last_seen_ip": None, "last_seen_ts": None, - }, res["xyz"]) + }, device_map["xyz"]) self.assertDictContainsSubset({ "user_id": user1, "device_id": "fco", "display_name": "display 1", "last_seen_ip": "ip1", "last_seen_ts": 1000000, - }, res["fco"]) + }, device_map["fco"]) self.assertDictContainsSubset({ "user_id": user1, "device_id": "abc", "display_name": "display 2", "last_seen_ip": "ip3", "last_seen_ts": 3000000, - }, res["abc"]) + }, device_map["abc"]) @defer.inlineCallbacks def test_get_device(self): From aede7248ab04118b83d7787547b9cf3fd615e7ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2016 17:37:44 +0100 Subject: [PATCH 275/414] Split out a FederationReader process --- synapse/app/federation_reader.py | 200 ++++++++++++++++++++ synapse/replication/slave/storage/events.py | 5 + synapse/replication/slave/storage/keys.py | 29 +++ synapse/storage/keys.py | 4 + 4 files changed, 238 insertions(+) create mode 100644 synapse/app/federation_reader.py create mode 100644 synapse/replication/slave/storage/keys.py diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py new file mode 100644 index 0000000000..98a18f9b3d --- /dev/null +++ b/synapse/app/federation_reader.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import synapse + +from synapse.config._base import ConfigError +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.http.site import SynapseSite +from synapse.metrics.resource import MetricsResource, METRICS_PREFIX +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.keys import SlavedKeyStore +from synapse.server import HomeServer +from synapse.storage.engines import create_engine +from synapse.util.async import sleep +from synapse.util.httpresourcetree import create_resource_tree +from synapse.util.logcontext import LoggingContext +from synapse.util.manhole import manhole +from synapse.util.rlimit import change_resource_limit +from synapse.util.versionstring import get_version_string +from synapse.api.urls import FEDERATION_PREFIX +from synapse.federation.transport.server import TransportLayerServer +from synapse.crypto import context_factory + + +from twisted.internet import reactor, defer +from twisted.web.resource import Resource + +from daemonize import Daemonize + +import sys +import logging +import gc + +logger = logging.getLogger("synapse.app.federation_reader") + + +class FederationReaderSlavedStore( + SlavedEventStore, + SlavedKeyStore, + BaseSlavedStore, +): + pass + + +class FederationReaderServer(HomeServer): + def get_db_conn(self, run_new_connection=True): + # Any param beginning with cp_ is a parameter for adbapi, and should + # not be passed to the database engine. + db_params = { + k: v for k, v in self.db_config.get("args", {}).items() + if not k.startswith("cp_") + } + db_conn = self.database_engine.module.connect(**db_params) + + if run_new_connection: + self.database_engine.on_new_connection(db_conn) + return db_conn + + def setup(self): + logger.info("Setting up.") + self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self) + logger.info("Finished setting up.") + + def _listen_http(self, listener_config): + port = listener_config["port"] + bind_address = listener_config.get("bind_address", "") + site_tag = listener_config.get("tag", port) + resources = {} + for res in listener_config["resources"]: + for name in res["names"]: + if name == "metrics": + resources[METRICS_PREFIX] = MetricsResource(self) + elif name == "federation": + resources.update({ + FEDERATION_PREFIX: TransportLayerServer(self), + }) + + root_resource = create_resource_tree(resources, Resource()) + reactor.listenTCP( + port, + SynapseSite( + "synapse.access.http.%s" % (site_tag,), + site_tag, + listener_config, + root_resource, + ), + interface=bind_address + ) + logger.info("Synapse federation reader now listening on port %d", port) + + def start_listening(self, listeners): + for listener in listeners: + if listener["type"] == "http": + self._listen_http(listener) + elif listener["type"] == "manhole": + reactor.listenTCP( + listener["port"], + manhole( + username="matrix", + password="rabbithole", + globals={"hs": self}, + ), + interface=listener.get("bind_address", '127.0.0.1') + ) + else: + logger.warn("Unrecognized listener type: %s", listener["type"]) + + @defer.inlineCallbacks + def replicate(self): + http_client = self.get_simple_http_client() + store = self.get_datastore() + replication_url = self.config.worker_replication_url + + while True: + try: + args = store.stream_positions() + args["timeout"] = 30000 + result = yield http_client.get_json(replication_url, args=args) + yield store.process_replication(result) + except: + logger.exception("Error replicating from %r", replication_url) + yield sleep(5) + + +def start(config_options): + try: + config = HomeServerConfig.load_config( + "Synapse federation reader", config_options + ) + except ConfigError as e: + sys.stderr.write("\n" + e.message + "\n") + sys.exit(1) + + assert config.worker_app == "synapse.app.federation_reader" + + setup_logging(config.worker_log_config, config.worker_log_file) + + database_engine = create_engine(config.database_config) + + tls_server_context_factory = context_factory.ServerContextFactory(config) + + ss = FederationReaderServer( + config.server_name, + db_config=config.database_config, + tls_server_context_factory=tls_server_context_factory, + config=config, + version_string=get_version_string("Synapse", synapse), + database_engine=database_engine, + ) + + ss.setup() + ss.get_handlers() + ss.start_listening(config.worker_listeners) + + def run(): + with LoggingContext("run"): + logger.info("Running") + change_resource_limit(config.soft_file_limit) + if config.gc_thresholds: + gc.set_threshold(*config.gc_thresholds) + reactor.run() + + def start(): + ss.get_datastore().start_profiling() + ss.replicate() + + reactor.callWhenRunning(start) + + if config.worker_daemonize: + daemon = Daemonize( + app="synapse-federation-reader", + pid=config.worker_pid_file, + action=run, + auto_close_fds=False, + verbose=True, + logger=logger, + ) + daemon.start() + else: + run() + + +if __name__ == '__main__': + with LoggingContext("main"): + start(sys.argv[1:]) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 369d839464..2ba1e6b803 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -142,6 +142,11 @@ class SlavedEventStore(BaseSlavedStore): _get_events_around_txn = DataStore._get_events_around_txn.__func__ _get_some_state_from_cache = DataStore._get_some_state_from_cache.__func__ + get_backfill_events = DataStore.get_backfill_events.__func__ + _get_backfill_events = DataStore._get_backfill_events.__func__ + get_missing_events = DataStore.get_missing_events.__func__ + _get_missing_events = DataStore._get_missing_events.__func__ + def stream_positions(self): result = super(SlavedEventStore, self).stream_positions() result["events"] = self._stream_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py new file mode 100644 index 0000000000..c1c895439d --- /dev/null +++ b/synapse/replication/slave/storage/keys.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from ._base import BaseSlavedStore +from synapse.storage import DataStore +from synapse.storage.keys import KeyStore + + +class SlavedKeyStore(BaseSlavedStore): + # TODO: use the cached version and invalidate deleted tokens + get_all_server_verify_keys = defer.inlineCallbacks(KeyStore.__dict__[ + "get_all_server_verify_keys" + ].orig) + + get_server_verify_keys = DataStore.get_server_verify_keys.__func__ diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index a495a8a7d9..1195efec08 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -22,6 +22,10 @@ import OpenSSL from signedjson.key import decode_verify_key_bytes import hashlib +import logging + +logger = logging.getLogger(__name__) + class KeyStore(SQLBaseStore): """Persistence for signature verification keys and tls X.509 certificates From d26b660aa6580b1947f04f7efd598d34a259b970 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 21 Jul 2016 17:38:51 +0100 Subject: [PATCH 276/414] Cache getPeer --- synapse/crypto/keyclient.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 1d85990369..c2bd64d6c2 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -77,9 +77,12 @@ class SynapseKeyClientProtocol(HTTPClient): def __init__(self): self.remote_key = defer.Deferred() self.host = None + self._peer = None def connectionMade(self): - logger.debug("Connected to %s", self.transport.getPeer()) + self._peer = self.transport.getPeer() + logger.debug("Connected to %s", self._peer) + self.sendCommand(b"GET", self.path) if self.host: self.sendHeader(b"Host", self.host) @@ -125,7 +128,7 @@ class SynapseKeyClientProtocol(HTTPClient): def on_timeout(self): logger.debug( "Timeout waiting for response from %s: %s", - self.host, self.transport.getPeer(), + self.host, self._peer, ) self.errback(IOError("Timeout waiting for response")) self.transport.abortConnection() From ec5717caf59eb72caf6f82f1643f492f328a4be5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 22 Jul 2016 13:14:03 +0100 Subject: [PATCH 277/414] Create index on user_ips in the background user_ips is kinda big, so really we want to add the index in the background once we're running. Replace the schema delta with one which will do that. I've done this in a way that's reasonably easy to reuse as there a few other indexes I need, and I don't suppose they will be the last. --- synapse/storage/background_updates.py | 73 +++++++++++++++++-- synapse/storage/client_ips.py | 16 +++- .../schema/delta/33/user_ips_index.sql | 3 +- 3 files changed, 80 insertions(+), 12 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 66a995157d..75951d0173 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -14,6 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore +from . import engines from twisted.internet import defer @@ -106,13 +107,13 @@ class BackgroundUpdateStore(SQLBaseStore): ) except: logger.exception("Error doing update") - - if result is None: - logger.info( - "No more background updates to do." - " Unscheduling background update task." - ) - return + else: + if result is None: + logger.info( + "No more background updates to do." + " Unscheduling background update task." + ) + return @defer.inlineCallbacks def do_background_update(self, desired_duration_ms): @@ -202,6 +203,64 @@ class BackgroundUpdateStore(SQLBaseStore): """ self._background_update_handlers[update_name] = update_handler + def register_background_index_update(self, update_name, index_name, + table, columns): + """Helper for store classes to do a background index addition + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('my_new_index', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name (str): update_name to register for + index_name (str): name of index to add + table (str): table to add index to + columns (list[str]): columns/expressions to include in index + """ + + # if this is postgres, we add the indexes concurrently. Otherwise + # we fall back to doing it inline + if isinstance(self.database_engine, engines.PostgresEngine): + conc = True + else: + conc = False + + sql = "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)" \ + % { + "conc": "CONCURRENTLY" if conc else "", + "name": index_name, + "table": table, + "columns": ", ".join(columns), + } + + def create_index_concurrently(conn): + conn.rollback() + # postgres insists on autocommit for the index + conn.set_session(autocommit=True) + c = conn.cursor() + c.execute(sql) + conn.set_session(autocommit=False) + + def create_index(conn): + c = conn.cursor() + c.execute(sql) + + @defer.inlineCallbacks + def updater(progress, batch_size): + logger.info("Adding index %s to %s", index_name, table) + if conc: + yield self.runWithConnection(create_index_concurrently) + else: + yield self.runWithConnection(create_index) + yield self._end_background_update(update_name) + defer.returnValue(1) + + self.register_background_update_handler(update_name, updater) + def start_background_update(self, update_name, progress): """Starts a background update running. diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index e31fa53c3f..20eb9ac15f 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -15,10 +15,11 @@ import logging -from ._base import SQLBaseStore, Cache - from twisted.internet import defer +from ._base import Cache +from . import background_updates + logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller @@ -27,8 +28,7 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120 * 1000 -class ClientIpStore(SQLBaseStore): - +class ClientIpStore(background_updates.BackgroundUpdateStore): def __init__(self, hs): self.client_ip_last_seen = Cache( name="client_ip_last_seen", @@ -37,6 +37,14 @@ class ClientIpStore(SQLBaseStore): super(ClientIpStore, self).__init__(hs) + self.register_background_index_update( + "user_ips_device_index", + index_name="user_ips_device_id", + table="user_ips", + columns=["user_id", "device_id", "last_seen"], + ) + + @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) diff --git a/synapse/storage/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/delta/33/user_ips_index.sql index 8a05677d42..473f75a78e 100644 --- a/synapse/storage/schema/delta/33/user_ips_index.sql +++ b/synapse/storage/schema/delta/33/user_ips_index.sql @@ -13,4 +13,5 @@ * limitations under the License. */ -CREATE INDEX user_ips_device_id ON user_ips(user_id, device_id, last_seen); +INSERT INTO background_updates (update_name, progress_json) VALUES + ('user_ips_device_index', '{}'); From 363786845b728bcd7146b3d949a86021a96eb2d2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 22 Jul 2016 13:21:07 +0100 Subject: [PATCH 278/414] PEP8 --- synapse/storage/client_ips.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 20eb9ac15f..71e5ea112f 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -44,7 +44,6 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): columns=["user_id", "device_id", "last_seen"], ) - @defer.inlineCallbacks def insert_client_ip(self, user, access_token, ip, user_agent, device_id): now = int(self._clock.time_msec()) From dad2da7e54a4f0e92185e4f8553fb51b037c0bd3 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 22 Jul 2016 17:00:56 +0100 Subject: [PATCH 279/414] Log the hostname the reCAPTCHA was completed on This could be useful information to have in the logs. Also comment about how & why we don't verify the hostname. --- synapse/handlers/auth.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 8f83923ddb..6fff7e7d03 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -279,8 +279,17 @@ class AuthHandler(BaseHandler): data = pde.response resp_body = simplejson.loads(data) - if 'success' in resp_body and resp_body['success']: - defer.returnValue(True) + if 'success' in resp_body: + # Note that we do NOT check the hostname here: we explicitly + # intend the CAPTCHA to be presented by whatever client the + # user is using, we just care that they have completed a CAPTCHA. + logger.info( + "%s reCAPTCHA from hostname %s", + "Successful" if resp_body['success'] else "Failed", + resp_body['hostname'] + ) + if resp_body['success']: + defer.returnValue(True) raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) @defer.inlineCallbacks From 7ed58bb3476c4a18a9af97b8ee3358dac00098eb Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 22 Jul 2016 17:18:50 +0100 Subject: [PATCH 280/414] Use get to avoid KeyErrors --- synapse/handlers/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 6fff7e7d03..d5d2072436 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -286,7 +286,7 @@ class AuthHandler(BaseHandler): logger.info( "%s reCAPTCHA from hostname %s", "Successful" if resp_body['success'] else "Failed", - resp_body['hostname'] + resp_body.get('hostname') ) if resp_body['success']: defer.returnValue(True) From 465117d7ca40ba9906697aa023897798f7833830 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 12:10:42 +0100 Subject: [PATCH 281/414] Fix background_update tests A bit of a cleanup for background_updates, and make sure that the real background updates have run before we start the unit tests, so that they don't interfere with the tests. --- synapse/storage/background_updates.py | 27 ++++++++++++++++++------- tests/storage/test_background_update.py | 22 ++++++++++++++------ 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 75951d0173..2771f7c3c1 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -88,10 +88,12 @@ class BackgroundUpdateStore(SQLBaseStore): @defer.inlineCallbacks def start_doing_background_updates(self): - while True: - if self._background_update_timer is not None: - return + assert(self._background_update_timer is not None, + "background updates already running") + logger.info("Starting background schema updates") + + while True: sleep = defer.Deferred() self._background_update_timer = self._clock.call_later( self.BACKGROUND_UPDATE_INTERVAL_MS / 1000., sleep.callback, None @@ -102,7 +104,7 @@ class BackgroundUpdateStore(SQLBaseStore): self._background_update_timer = None try: - result = yield self.do_background_update( + result = yield self.do_next_background_update( self.BACKGROUND_UPDATE_DURATION_MS ) except: @@ -113,11 +115,12 @@ class BackgroundUpdateStore(SQLBaseStore): "No more background updates to do." " Unscheduling background update task." ) - return + defer.returnValue() @defer.inlineCallbacks - def do_background_update(self, desired_duration_ms): - """Does some amount of work on a background update + def do_next_background_update(self, desired_duration_ms): + """Does some amount of work on the next queued background update + Args: desired_duration_ms(float): How long we want to spend updating. @@ -136,11 +139,21 @@ class BackgroundUpdateStore(SQLBaseStore): self._background_update_queue.append(update['update_name']) if not self._background_update_queue: + # no work left to do defer.returnValue(None) + # pop from the front, and add back to the back update_name = self._background_update_queue.pop(0) self._background_update_queue.append(update_name) + res = yield self._do_background_update(update_name, desired_duration_ms) + defer.returnValue(res) + + @defer.inlineCallbacks + def _do_background_update(self, update_name, desired_duration_ms): + logger.info("Starting update batch on background update '%s'", + update_name) + update_handler = self._background_update_handlers[update_name] performance = self._background_update_performance.get(update_name) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 6e4d9b1373..4944cb0d2e 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -10,7 +10,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - hs = yield setup_test_homeserver() + hs = yield setup_test_homeserver() # type: synapse.server.HomeServer self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -20,11 +20,20 @@ class BackgroundUpdateTestCase(unittest.TestCase): "test_update", self.update_handler ) + # run the real background updates, to get them out the way + # (perhaps we should run them as part of the test HS setup, since we + # run all of the other schema setup stuff there?) + while True: + res = yield self.store.do_next_background_update(1000) + if res is None: + break + @defer.inlineCallbacks def test_do_background_update(self): desired_count = 1000 duration_ms = 42 + # first step: make a bit of progress @defer.inlineCallbacks def update(progress, count): self.clock.advance_time_msec(count * duration_ms) @@ -42,7 +51,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): yield self.store.start_background_update("test_update", {"my_key": 1}) self.update_handler.reset_mock() - result = yield self.store.do_background_update( + result = yield self.store.do_next_background_update( duration_ms * desired_count ) self.assertIsNotNone(result) @@ -50,24 +59,25 @@ class BackgroundUpdateTestCase(unittest.TestCase): {"my_key": 1}, self.store.DEFAULT_BACKGROUND_BATCH_SIZE ) + # second step: complete the update @defer.inlineCallbacks def update(progress, count): yield self.store._end_background_update("test_update") defer.returnValue(count) self.update_handler.side_effect = update - self.update_handler.reset_mock() - result = yield self.store.do_background_update( - duration_ms * desired_count + result = yield self.store.do_next_background_update( + duration_ms * desired_count ) self.assertIsNotNone(result) self.update_handler.assert_called_once_with( {"my_key": 2}, desired_count ) + # third step: we don't expect to be called any more self.update_handler.reset_mock() - result = yield self.store.do_background_update( + result = yield self.store.do_next_background_update( duration_ms * desired_count ) self.assertIsNone(result) From f16f0e169d30e6920b892ee772693199b16713fd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 12:12:47 +0100 Subject: [PATCH 282/414] Slightly saner logging for unittests 1. Give the handler used for logging in unit tests a formatter, so that the output is slightly more meaningful 2. Log some synapse.storage stuff, because it's useful. --- tests/unittest.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/unittest.py b/tests/unittest.py index 5b22abfe74..38715972dd 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -17,13 +17,18 @@ from twisted.trial import unittest import logging - # logging doesn't have a "don't log anything at all EVARRRR setting, # but since the highest value is 50, 1000000 should do ;) NEVER = 1000000 -logging.getLogger().addHandler(logging.StreamHandler()) +handler = logging.StreamHandler() +handler.setFormatter(logging.Formatter( + "%(levelname)s:%(name)s:%(message)s [%(pathname)s:%(lineno)d]" +)) +logging.getLogger().addHandler(handler) logging.getLogger().setLevel(NEVER) +logging.getLogger("synapse.storage.SQL").setLevel(NEVER) +logging.getLogger("synapse.storage.txn").setLevel(NEVER) def around(target): @@ -70,8 +75,6 @@ class TestCase(unittest.TestCase): return ret logging.getLogger().setLevel(level) - # Don't set SQL logging - logging.getLogger("synapse.storage").setLevel(old_level) return orig() def assertObjectHasAttributes(self, attrs, obj): From 42f4feb2b709671bb2dbbabfe1aad7e951479652 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 12:25:06 +0100 Subject: [PATCH 283/414] PEP8 --- tests/storage/test_background_update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 4944cb0d2e..1286b4ce2d 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -68,7 +68,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): self.update_handler.side_effect = update self.update_handler.reset_mock() result = yield self.store.do_next_background_update( - duration_ms * desired_count + duration_ms * desired_count ) self.assertIsNotNone(result) self.update_handler.assert_called_once_with( From 9dbd903f4108c81499205ff80d9d420911fd0f54 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 14:05:23 +0100 Subject: [PATCH 284/414] background updates: Fix assertion to do something --- synapse/storage/background_updates.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 2771f7c3c1..321c889b2f 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -88,8 +88,8 @@ class BackgroundUpdateStore(SQLBaseStore): @defer.inlineCallbacks def start_doing_background_updates(self): - assert(self._background_update_timer is not None, - "background updates already running") + assert self._background_update_timer is not None, \ + "background updates already running" logger.info("Starting background schema updates") From 2ee4c9ee023a50ae7c0800c34c609886fb27298f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 16:01:46 +0100 Subject: [PATCH 285/414] background updates: fix assert again --- synapse/storage/background_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 321c889b2f..af9bfbbe47 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -88,7 +88,7 @@ class BackgroundUpdateStore(SQLBaseStore): @defer.inlineCallbacks def start_doing_background_updates(self): - assert self._background_update_timer is not None, \ + assert self._background_update_timer is None, \ "background updates already running" logger.info("Starting background schema updates") From 955ef1f06caee7385cb5ef21477b4d0490889c3c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 25 Jul 2016 16:04:45 +0100 Subject: [PATCH 286/414] fix: defer.returnValue takes one argument --- synapse/storage/background_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index af9bfbbe47..30d0e4c5dc 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -115,7 +115,7 @@ class BackgroundUpdateStore(SQLBaseStore): "No more background updates to do." " Unscheduling background update task." ) - defer.returnValue() + defer.returnValue(None) @defer.inlineCallbacks def do_next_background_update(self, desired_duration_ms): From 2623cec8746392067e781164e8ed4f2236b15bec Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 25 Jul 2016 16:12:16 +0100 Subject: [PATCH 287/414] Don't add rejections to the state_group, persist all rejections --- synapse/storage/events.py | 9 +++++---- synapse/storage/state.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6610549281..41c9b17d14 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -591,10 +591,11 @@ class EventsStore(SQLBaseStore): ], ) - if context.rejected: - self._store_rejections_txn( - txn, event.event_id, context.rejected - ) + for event, context in events_and_contexts: + if context.rejected: + self._store_rejections_txn( + txn, event.event_id, context.rejected + ) self._simple_insert_many_txn( txn, diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 5b743db67a..cc1c7ec6a7 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -79,7 +79,7 @@ class StateStore(SQLBaseStore): state_events = dict(context.current_state) - if event.is_state(): + if event.is_state() and not context.rejected: state_events[(event.type, event.state_key)] = event state_group = context.new_state_group_id From 8f7f4cb92baa1eb8c772644e2567fe56d563b4b9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 25 Jul 2016 17:13:37 +0100 Subject: [PATCH 288/414] Don't add the events to forward extremities if the event is rejected --- synapse/storage/events.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 41c9b17d14..201a4455fa 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -498,8 +498,8 @@ class EventsStore(SQLBaseStore): sql, (False, event.event_id,) ) - - self._update_extremeties(txn, [event]) + if not context.rejected: + self._update_extremeties(txn, [event]) events_and_contexts = [ ec for ec in events_and_contexts if ec[0] not in to_remove @@ -512,7 +512,10 @@ class EventsStore(SQLBaseStore): self._handle_mult_prev_events( txn, - events=[event for event, _ in events_and_contexts], + events=[ + event for event, context in events_and_contexts + if not context.rejected + ], ) for event, _ in events_and_contexts: From 33d08e843368a9caf01835ec4d56160fdc0f9469 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 21 Jul 2016 15:56:57 +0100 Subject: [PATCH 289/414] Log when adding listeners --- synapse/http/server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/http/server.py b/synapse/http/server.py index f705abab94..2b3c05a740 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -205,6 +205,7 @@ class JsonResource(HttpServer, resource.Resource): def register_paths(self, method, path_patterns, callback): for path_pattern in path_patterns: + logger.debug("Registering for %s %s", method, path_pattern.pattern) self.path_regexs.setdefault(method, []).append( self._PathEntry(path_pattern, callback) ) From 1b3c3e6d68bf503bf09e046ecf57bb652669e637 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Mon, 25 Jul 2016 18:44:30 +0100 Subject: [PATCH 290/414] Only update the events and event_json tables for rejected events --- synapse/storage/events.py | 113 +++++++++++++++++++++----------------- synapse/storage/state.py | 2 +- 2 files changed, 63 insertions(+), 52 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 201a4455fa..c38a631081 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -407,21 +407,11 @@ class EventsStore(SQLBaseStore): event.room_id, event.internal_metadata.stream_ordering, ) - if not event.internal_metadata.is_outlier(): + if not event.internal_metadata.is_outlier() and not context.rejected: depth_updates[event.room_id] = max( event.depth, depth_updates.get(event.room_id, event.depth) ) - if context.push_actions: - self._set_push_actions_for_event_and_users_txn( - txn, event, context.push_actions - ) - - if event.type == EventTypes.Redaction and event.redacts is not None: - self._remove_push_actions_for_event_id_txn( - txn, event.room_id, event.redacts - ) - for room_id, depth in depth_updates.items(): self._update_min_depth_for_room_txn(txn, room_id, depth) @@ -431,6 +421,7 @@ class EventsStore(SQLBaseStore): ), [event.event_id for event, _ in events_and_contexts] ) + have_persisted = { event_id: outlier for event_id, outlier in txn.fetchall() @@ -442,6 +433,9 @@ class EventsStore(SQLBaseStore): # Handle the case of the list including the same event multiple # times. The tricky thing here is when they differ by whether # they are an outlier. + if context.rejected: + continue + if event.event_id in event_map: other = event_map[event.event_id] @@ -498,8 +492,8 @@ class EventsStore(SQLBaseStore): sql, (False, event.event_id,) ) - if not context.rejected: - self._update_extremeties(txn, [event]) + + self._update_extremeties(txn, [event]) events_and_contexts = [ ec for ec in events_and_contexts if ec[0] not in to_remove @@ -508,39 +502,8 @@ class EventsStore(SQLBaseStore): if not events_and_contexts: return - self._store_mult_state_groups_txn(txn, events_and_contexts) - - self._handle_mult_prev_events( - txn, - events=[ - event for event, context in events_and_contexts - if not context.rejected - ], - ) - - for event, _ in events_and_contexts: - if event.type == EventTypes.Name: - self._store_room_name_txn(txn, event) - elif event.type == EventTypes.Topic: - self._store_room_topic_txn(txn, event) - elif event.type == EventTypes.Message: - self._store_room_message_txn(txn, event) - elif event.type == EventTypes.Redaction: - self._store_redaction(txn, event) - elif event.type == EventTypes.RoomHistoryVisibility: - self._store_history_visibility_txn(txn, event) - elif event.type == EventTypes.GuestAccess: - self._store_guest_access_txn(txn, event) - - self._store_room_members_txn( - txn, - [ - event - for event, _ in events_and_contexts - if event.type == EventTypes.Member - ], - backfilled=backfilled, - ) + # From this point onwards the events are only events that we haven't + # seen before. def event_dict(event): return { @@ -594,11 +557,28 @@ class EventsStore(SQLBaseStore): ], ) + to_remove = set() for event, context in events_and_contexts: if context.rejected: self._store_rejections_txn( txn, event.event_id, context.rejected ) + to_remove.add(event.event_id) + + events_and_contexts = [ + ec for ec in events_and_contexts if ec[0].event_id not in to_remove + ] + + if not events_and_contexts: + return + + # From this point onwards the events are only ones that weren't rejected. + + for event, context in events_and_contexts: + if context.push_actions: + self._set_push_actions_for_event_and_users_txn( + txn, event, context.push_actions + ) self._simple_insert_many_txn( txn, @@ -614,6 +594,42 @@ class EventsStore(SQLBaseStore): ], ) + if event.type == EventTypes.Redaction and event.redacts is not None: + self._remove_push_actions_for_event_id_txn( + txn, event.room_id, event.redacts + ) + + self._store_mult_state_groups_txn(txn, events_and_contexts) + + self._handle_mult_prev_events( + txn, + events=[event for event, _ in events_and_contexts], + ) + + for event, _ in events_and_contexts: + if event.type == EventTypes.Name: + self._store_room_name_txn(txn, event) + elif event.type == EventTypes.Topic: + self._store_room_topic_txn(txn, event) + elif event.type == EventTypes.Message: + self._store_room_message_txn(txn, event) + elif event.type == EventTypes.Redaction: + self._store_redaction(txn, event) + elif event.type == EventTypes.RoomHistoryVisibility: + self._store_history_visibility_txn(txn, event) + elif event.type == EventTypes.GuestAccess: + self._store_guest_access_txn(txn, event) + + self._store_room_members_txn( + txn, + [ + event + for event, _ in events_and_contexts + if event.type == EventTypes.Member + ], + backfilled=backfilled, + ) + self._store_event_reference_hashes_txn( txn, [event for event, _ in events_and_contexts] ) @@ -670,11 +686,6 @@ class EventsStore(SQLBaseStore): # Outlier events shouldn't clobber the current state. continue - if context.rejected: - # If the event failed it's auth checks then it shouldn't - # clobbler the current state. - continue - txn.call_after( self._get_current_state_for_key.invalidate, (event.room_id, event.type, event.state_key,) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index cc1c7ec6a7..5b743db67a 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -79,7 +79,7 @@ class StateStore(SQLBaseStore): state_events = dict(context.current_state) - if event.is_state() and not context.rejected: + if event.is_state(): state_events[(event.type, event.state_key)] = event state_group = context.new_state_group_id From 436bffd15fb8382a0d2dddd3c6f7a077ba751da2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 22 Jul 2016 14:52:53 +0100 Subject: [PATCH 291/414] Implement deleting devices --- synapse/handlers/auth.py | 22 +++++++++++++-- synapse/handlers/device.py | 27 ++++++++++++++++++- synapse/rest/client/v1/login.py | 13 ++++++--- synapse/rest/client/v2_alpha/devices.py | 14 ++++++++++ synapse/rest/client/v2_alpha/register.py | 10 +++---- synapse/storage/devices.py | 15 +++++++++++ synapse/storage/registration.py | 26 +++++++++++++++--- .../delta/33/access_tokens_device_index.sql | 17 ++++++++++++ .../delta/33/refreshtoken_device_index.sql | 17 ++++++++++++ tests/handlers/test_device.py | 22 +++++++++++++-- tests/rest/client/v2_alpha/test_register.py | 14 +++++++--- 11 files changed, 176 insertions(+), 21 deletions(-) create mode 100644 synapse/storage/schema/delta/33/access_tokens_device_index.sql create mode 100644 synapse/storage/schema/delta/33/refreshtoken_device_index.sql diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d5d2072436..2e138f328f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -77,6 +77,7 @@ class AuthHandler(BaseHandler): self.ldap_bind_password = hs.config.ldap_bind_password self.hs = hs # FIXME better possibility to access registrationHandler later? + self.device_handler = hs.get_device_handler() @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip): @@ -374,7 +375,8 @@ class AuthHandler(BaseHandler): return self._check_password(user_id, password) @defer.inlineCallbacks - def get_login_tuple_for_user_id(self, user_id, device_id=None): + def get_login_tuple_for_user_id(self, user_id, device_id=None, + initial_display_name=None): """ Gets login tuple for the user with the given user ID. @@ -383,9 +385,15 @@ class AuthHandler(BaseHandler): The user is assumed to have been authenticated by some other machanism (e.g. CAS), and the user_id converted to the canonical case. + The device will be recorded in the table if it is not there already. + Args: user_id (str): canonical User ID - device_id (str): the device ID to associate with the access token + device_id (str|None): the device ID to associate with the tokens. + None to leave the tokens unassociated with a device (deprecated: + we should always have a device ID) + initial_display_name (str): display name to associate with the + device if it needs re-registering Returns: A tuple of: The access token for the user's session. @@ -397,6 +405,16 @@ class AuthHandler(BaseHandler): logger.info("Logging in user %s on device %s", user_id, device_id) access_token = yield self.issue_access_token(user_id, device_id) refresh_token = yield self.issue_refresh_token(user_id, device_id) + + # the device *should* have been registered before we got here; however, + # it's possible we raced against a DELETE operation. The thing we + # really don't want is active access_tokens without a record of the + # device, so we double-check it here. + if device_id is not None: + yield self.device_handler.check_device_registered( + user_id, device_id, initial_display_name + ) + defer.returnValue((access_token, refresh_token)) @defer.inlineCallbacks diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 1f9e15c33c..a7a192e1c9 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -100,7 +100,7 @@ class DeviceHandler(BaseHandler): Args: user_id (str): - device_id (str) + device_id (str): Returns: defer.Deferred: dict[str, X]: info on the device @@ -117,6 +117,31 @@ class DeviceHandler(BaseHandler): _update_device_from_client_ips(device, ips) defer.returnValue(device) + @defer.inlineCallbacks + def delete_device(self, user_id, device_id): + """ Delete the given device + + Args: + user_id (str): + device_id (str): + + Returns: + defer.Deferred: + """ + + try: + yield self.store.delete_device(user_id, device_id) + except errors.StoreError, e: + if e.code == 404: + # no match + pass + else: + raise + + yield self.store.user_delete_access_tokens(user_id, + device_id=device_id) + + def _update_device_from_client_ips(device, client_ips): ip = client_ips.get((device["user_id"], device["device_id"]), {}) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index e8b791519c..92fcae674a 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -152,7 +152,10 @@ class LoginRestServlet(ClientV1RestServlet): ) device_id = yield self._register_device(user_id, login_submission) access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id, device_id) + yield auth_handler.get_login_tuple_for_user_id( + user_id, device_id, + login_submission.get("initial_device_display_name") + ) ) result = { "user_id": user_id, # may have changed @@ -173,7 +176,10 @@ class LoginRestServlet(ClientV1RestServlet): ) device_id = yield self._register_device(user_id, login_submission) access_token, refresh_token = ( - yield auth_handler.get_login_tuple_for_user_id(user_id, device_id) + yield auth_handler.get_login_tuple_for_user_id( + user_id, device_id, + login_submission.get("initial_device_display_name") + ) ) result = { "user_id": user_id, # may have changed @@ -262,7 +268,8 @@ class LoginRestServlet(ClientV1RestServlet): ) access_token, refresh_token = ( yield auth_handler.get_login_tuple_for_user_id( - registered_user_id, device_id + registered_user_id, device_id, + login_submission.get("initial_device_display_name") ) ) result = { diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 8b9ab4f674..30ef8b3da9 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -70,6 +70,20 @@ class DeviceRestServlet(RestServlet): ) defer.returnValue((200, device)) + @defer.inlineCallbacks + def on_DELETE(self, request, device_id): + # XXX: it's not completely obvious we want to expose this endpoint. + # It allows the client to delete access tokens, which feels like a + # thing which merits extra auth. But if we want to do the interactive- + # auth dance, we should really make it possible to delete more than one + # device at a time. + requester = yield self.auth.get_user_by_req(request) + yield self.device_handler.delete_device( + requester.user.to_string(), + device_id, + ) + defer.returnValue((200, {})) + def register_servlets(hs, http_server): DevicesRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c8c9395fc6..9f599ea8bb 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -374,13 +374,13 @@ class RegisterRestServlet(RestServlet): """ device_id = yield self._register_device(user_id, params) - access_token = yield self.auth_handler.issue_access_token( - user_id, device_id=device_id + access_token, refresh_token = ( + yield self.auth_handler.get_login_tuple_for_user_id( + user_id, device_id=device_id, + initial_display_name=params.get("initial_device_display_name") + ) ) - refresh_token = yield self.auth_handler.issue_refresh_token( - user_id, device_id=device_id - ) defer.returnValue({ "user_id": user_id, "access_token": access_token, diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 1cc6e07f2b..4689980f80 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -76,6 +76,21 @@ class DeviceStore(SQLBaseStore): desc="get_device", ) + def delete_device(self, user_id, device_id): + """Delete a device. + + Args: + user_id (str): The ID of the user which owns the device + device_id (str): The ID of the device to retrieve + Returns: + defer.Deferred + """ + return self._simple_delete_one( + table="devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + desc="delete_device", + ) + @defer.inlineCallbacks def get_devices_by_user(self, user_id): """Retrieve all of a user's registered devices. diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9a92b35361..935e82bf7a 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -18,18 +18,31 @@ import re from twisted.internet import defer from synapse.api.errors import StoreError, Codes - -from ._base import SQLBaseStore +from synapse.storage import background_updates from synapse.util.caches.descriptors import cached, cachedInlineCallbacks -class RegistrationStore(SQLBaseStore): +class RegistrationStore(background_updates.BackgroundUpdateStore): def __init__(self, hs): super(RegistrationStore, self).__init__(hs) self.clock = hs.get_clock() + self.register_background_index_update( + "access_tokens_device_index", + index_name="access_tokens_device_id", + table="access_tokens", + columns=["user_id", "device_id"], + ) + + self.register_background_index_update( + "refresh_tokens_device_index", + index_name="refresh_tokens_device_id", + table="refresh_tokens", + columns=["user_id", "device_id"], + ) + @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. @@ -238,11 +251,16 @@ class RegistrationStore(SQLBaseStore): self.get_user_by_id.invalidate((user_id,)) @defer.inlineCallbacks - def user_delete_access_tokens(self, user_id, except_token_ids=[]): + def user_delete_access_tokens(self, user_id, except_token_ids=[], + device_id=None): def f(txn): sql = "SELECT token FROM access_tokens WHERE user_id = ?" clauses = [user_id] + if device_id is not None: + sql += " AND device_id = ?" + clauses.append(device_id) + if except_token_ids: sql += " AND id NOT IN (%s)" % ( ",".join(["?" for _ in except_token_ids]), diff --git a/synapse/storage/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/schema/delta/33/access_tokens_device_index.sql new file mode 100644 index 0000000000..61ad3fe3e8 --- /dev/null +++ b/synapse/storage/schema/delta/33/access_tokens_device_index.sql @@ -0,0 +1,17 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('access_tokens_device_index', '{}'); diff --git a/synapse/storage/schema/delta/33/refreshtoken_device_index.sql b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql new file mode 100644 index 0000000000..bb225dafbf --- /dev/null +++ b/synapse/storage/schema/delta/33/refreshtoken_device_index.sql @@ -0,0 +1,17 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('refresh_tokens_device_index', '{}'); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 331aa13fed..214e722eb3 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -12,11 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse import types + from twisted.internet import defer +import synapse.api.errors import synapse.handlers.device + import synapse.storage +from synapse import types from tests import unittest, utils user1 = "@boris:aaa" @@ -27,7 +30,7 @@ class DeviceTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(DeviceTestCase, self).__init__(*args, **kwargs) self.store = None # type: synapse.storage.DataStore - self.handler = None # type: device.DeviceHandler + self.handler = None # type: synapse.handlers.device.DeviceHandler self.clock = None # type: utils.MockClock @defer.inlineCallbacks @@ -123,6 +126,21 @@ class DeviceTestCase(unittest.TestCase): "last_seen_ts": 3000000, }, res) + @defer.inlineCallbacks + def test_delete_device(self): + yield self._record_users() + + # delete the device + yield self.handler.delete_device(user1, "abc") + + # check the device was deleted + with self.assertRaises(synapse.api.errors.NotFoundError): + yield self.handler.get_device(user1, "abc") + + # we'd like to check the access token was invalidated, but that's a + # bit of a PITA. + + @defer.inlineCallbacks def _record_users(self): # check this works for both devices which have a recorded client_ip, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 3bd7065e32..8ac56a1fb2 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -65,13 +65,16 @@ class RegisterRestServletTestCase(unittest.TestCase): self.registration_handler.appservice_register = Mock( return_value=user_id ) - self.auth_handler.issue_access_token = Mock(return_value=token) + self.auth_handler.get_login_tuple_for_user_id = Mock( + return_value=(token, "kermits_refresh_token") + ) (code, result) = yield self.servlet.on_POST(self.request) self.assertEquals(code, 200) det_data = { "user_id": user_id, "access_token": token, + "refresh_token": "kermits_refresh_token", "home_server": self.hs.hostname } self.assertDictContainsSubset(det_data, result) @@ -121,7 +124,9 @@ class RegisterRestServletTestCase(unittest.TestCase): "password": "monkey" }, None) self.registration_handler.register = Mock(return_value=(user_id, None)) - self.auth_handler.issue_access_token = Mock(return_value=token) + self.auth_handler.get_login_tuple_for_user_id = Mock( + return_value=(token, "kermits_refresh_token") + ) self.device_handler.check_device_registered = \ Mock(return_value=device_id) @@ -130,13 +135,14 @@ class RegisterRestServletTestCase(unittest.TestCase): det_data = { "user_id": user_id, "access_token": token, + "refresh_token": "kermits_refresh_token", "home_server": self.hs.hostname, "device_id": device_id, } self.assertDictContainsSubset(det_data, result) self.assertIn("refresh_token", result) - self.auth_handler.issue_access_token.assert_called_once_with( - user_id, device_id=device_id) + self.auth_handler.get_login_tuple_for_user_id( + user_id, device_id=device_id, initial_device_display_name=None) def test_POST_disabled_registration(self): self.hs.config.enable_registration = False From 012b4c19132d57fdbc1b6b0e304eb60eaf19200f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Jul 2016 17:51:24 +0100 Subject: [PATCH 292/414] Implement updating devices You can update the displayname of devices now. --- synapse/handlers/device.py | 24 +++++++++++++++++ synapse/rest/client/v2_alpha/devices.py | 26 ++++++++++++------ synapse/storage/devices.py | 27 ++++++++++++++++++- tests/handlers/test_device.py | 16 +++++++++++ tests/storage/test_devices.py | 36 +++++++++++++++++++++++++ 5 files changed, 120 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a7a192e1c9..9e65d85e6d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -141,6 +141,30 @@ class DeviceHandler(BaseHandler): yield self.store.user_delete_access_tokens(user_id, device_id=device_id) + @defer.inlineCallbacks + def update_device(self, user_id, device_id, content): + """ Update the given device + + Args: + user_id (str): + device_id (str): + content (dict): body of update request + + Returns: + defer.Deferred: + """ + + try: + yield self.store.update_device( + user_id, + device_id, + new_display_name=content.get("display_name") + ) + except errors.StoreError, e: + if e.code == 404: + raise errors.NotFoundError() + else: + raise def _update_device_from_client_ips(device, client_ips): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 30ef8b3da9..8fbd3d3dfc 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -13,19 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - -from synapse.http.servlet import RestServlet - -from ._base import client_v2_patterns - import logging +from twisted.internet import defer + +from synapse.http import servlet +from ._base import client_v2_patterns logger = logging.getLogger(__name__) -class DevicesRestServlet(RestServlet): +class DevicesRestServlet(servlet.RestServlet): PATTERNS = client_v2_patterns("/devices$", releases=[], v2_alpha=False) def __init__(self, hs): @@ -47,7 +45,7 @@ class DevicesRestServlet(RestServlet): defer.returnValue((200, {"devices": devices})) -class DeviceRestServlet(RestServlet): +class DeviceRestServlet(servlet.RestServlet): PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$", releases=[], v2_alpha=False) @@ -84,6 +82,18 @@ class DeviceRestServlet(RestServlet): ) defer.returnValue((200, {})) + @defer.inlineCallbacks + def on_PUT(self, request, device_id): + requester = yield self.auth.get_user_by_req(request) + + body = servlet.parse_json_object_from_request(request) + yield self.device_handler.update_device( + requester.user.to_string(), + device_id, + body + ) + defer.returnValue((200, {})) + def register_servlets(hs, http_server): DevicesRestServlet(hs).register(http_server) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 4689980f80..afd6530cab 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -81,7 +81,7 @@ class DeviceStore(SQLBaseStore): Args: user_id (str): The ID of the user which owns the device - device_id (str): The ID of the device to retrieve + device_id (str): The ID of the device to delete Returns: defer.Deferred """ @@ -91,6 +91,31 @@ class DeviceStore(SQLBaseStore): desc="delete_device", ) + def update_device(self, user_id, device_id, new_display_name=None): + """Update a device. + + Args: + user_id (str): The ID of the user which owns the device + device_id (str): The ID of the device to update + new_display_name (str|None): new displayname for device; None + to leave unchanged + Raises: + StoreError: if the device is not found + Returns: + defer.Deferred + """ + updates = {} + if new_display_name is not None: + updates["display_name"] = new_display_name + if not updates: + return defer.succeed(None) + return self._simple_update_one( + table="devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + updatevalues=updates, + desc="update_device", + ) + @defer.inlineCallbacks def get_devices_by_user(self, user_id): """Retrieve all of a user's registered devices. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 214e722eb3..85a970a6c9 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -140,6 +140,22 @@ class DeviceTestCase(unittest.TestCase): # we'd like to check the access token was invalidated, but that's a # bit of a PITA. + @defer.inlineCallbacks + def test_update_device(self): + yield self._record_users() + + update = {"display_name": "new display"} + yield self.handler.update_device(user1, "abc", update) + + res = yield self.handler.get_device(user1, "abc") + self.assertEqual(res["display_name"], "new display") + + @defer.inlineCallbacks + def test_update_unknown_device(self): + update = {"display_name": "new_display"} + with self.assertRaises(synapse.api.errors.NotFoundError): + yield self.handler.update_device("user_id", "unknown_device_id", + update) @defer.inlineCallbacks def _record_users(self): diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index a6ce993375..f8725acea0 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -15,6 +15,7 @@ from twisted.internet import defer +import synapse.api.errors import tests.unittest import tests.utils @@ -67,3 +68,38 @@ class DeviceStoreTestCase(tests.unittest.TestCase): "device_id": "device2", "display_name": "display_name 2", }, res["device2"]) + + @defer.inlineCallbacks + def test_update_device(self): + yield self.store.store_device( + "user_id", "device_id", "display_name 1" + ) + + res = yield self.store.get_device("user_id", "device_id") + self.assertEqual("display_name 1", res["display_name"]) + + # do a no-op first + yield self.store.update_device( + "user_id", "device_id", + ) + res = yield self.store.get_device("user_id", "device_id") + self.assertEqual("display_name 1", res["display_name"]) + + # do the update + yield self.store.update_device( + "user_id", "device_id", + new_display_name="display_name 2", + ) + + # check it worked + res = yield self.store.get_device("user_id", "device_id") + self.assertEqual("display_name 2", res["display_name"]) + + @defer.inlineCallbacks + def test_update_unknown_device(self): + with self.assertRaises(synapse.api.errors.StoreError) as cm: + yield self.store.update_device( + "user_id", "unknown_device_id", + new_display_name="display_name 2", + ) + self.assertEqual(404, cm.exception.code) From 242c52d607da68f48b3a4bce980663e0e5f103c6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 26 Jul 2016 10:09:25 +0200 Subject: [PATCH 293/414] typo --- synapse/util/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index e1f374807e..0b944d3e63 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -84,7 +84,7 @@ class Measure(object): if context != self.start_context: logger.warn( - "Context have unexpectedly changed from '%s' to '%s'. (%r)", + "Context has unexpectedly changed from '%s' to '%s'. (%r)", context, self.start_context, self.name ) return From 1a54513cf124f5796654c990c469a1a1b893909d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Tue, 26 Jul 2016 10:09:37 +0200 Subject: [PATCH 294/414] federation doesn't work over ipv6 yet thanks to twisted --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index ebcb15a977..89458badc9 100644 --- a/README.rst +++ b/README.rst @@ -445,7 +445,7 @@ You have two choices here, which will influence the form of your Matrix user IDs: 1) Use the machine's own hostname as available on public DNS in the form of - its A or AAAA records. This is easier to set up initially, perhaps for + its A records. This is easier to set up initially, perhaps for testing, but lacks the flexibility of SRV. 2) Set up a SRV record for your domain name. This requires you create a SRV From efeb6176c169835465eeb6184ead940a89b93b4e Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 26 Jul 2016 10:49:52 +0100 Subject: [PATCH 295/414] Don't add rejected events if we've seen them befrore. Add some comments to explain what the code is doing mechanically --- synapse/storage/events.py | 53 +++++++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c38a631081..25a2be2795 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -397,6 +397,12 @@ class EventsStore(SQLBaseStore): @log_function def _persist_events_txn(self, txn, events_and_contexts, backfilled): + """Insert some number of room events into the necessary database tables. + + Rejected events are only inserted into the events table, the events_json table, + and the rejections table. Things reading from those table will need to check + whether the event was rejected. + """ depth_updates = {} for event, context in events_and_contexts: # Remove the any existing cache entries for the event_ids @@ -427,15 +433,21 @@ class EventsStore(SQLBaseStore): for event_id, outlier in txn.fetchall() } + # Remove the events that we've seen before. event_map = {} to_remove = set() for event, context in events_and_contexts: + if context.rejected: + # If the event is rejected then we don't care if the event + # was an outlier or not. + if event.event_id in have_persisted: + # If we have already seen the event then ignore it. + to_remove.add(event) + continue + # Handle the case of the list including the same event multiple # times. The tricky thing here is when they differ by whether # they are an outlier. - if context.rejected: - continue - if event.event_id in event_map: other = event_map[event.event_id] @@ -457,6 +469,12 @@ class EventsStore(SQLBaseStore): outlier_persisted = have_persisted[event.event_id] if not event.internal_metadata.is_outlier() and outlier_persisted: + # We received a copy of an event that we had already stored as + # an outlier in the database. We now have some state at that + # so we need to update the state_groups table with that state. + + # insert into the state_group, state_groups_state and + # event_to_state_groups tables. self._store_mult_state_groups_txn(txn, ((event, context),)) metadata_json = encode_json( @@ -472,6 +490,8 @@ class EventsStore(SQLBaseStore): (metadata_json, event.event_id,) ) + # Add an entry to the ex_outlier_stream table to replicate the + # change in outlier status to our workers. stream_order = event.internal_metadata.stream_ordering state_group_id = context.state_group or context.new_state_group_id self._simple_insert_txn( @@ -493,6 +513,8 @@ class EventsStore(SQLBaseStore): (False, event.event_id,) ) + # Update the event_backward_extremities table now that this + # event isn't an outlier any more. self._update_extremeties(txn, [event]) events_and_contexts = [ @@ -557,24 +579,30 @@ class EventsStore(SQLBaseStore): ], ) + # Remove the rejected events from the list now that we've added them + # to the events table and the events_json table. to_remove = set() for event, context in events_and_contexts: if context.rejected: + # Insert the event_id into the rejections table self._store_rejections_txn( txn, event.event_id, context.rejected ) - to_remove.add(event.event_id) + to_remove.add(event) events_and_contexts = [ - ec for ec in events_and_contexts if ec[0].event_id not in to_remove + ec for ec in events_and_contexts if ec[0] not in to_remove ] if not events_and_contexts: + # Make sure we don't pass an empty list to functions that expect to + # be storing at least one element. return # From this point onwards the events are only ones that weren't rejected. for event, context in events_and_contexts: + # Insert all the push actions into the event_push_actions table. if context.push_actions: self._set_push_actions_for_event_and_users_txn( txn, event, context.push_actions @@ -595,12 +623,18 @@ class EventsStore(SQLBaseStore): ) if event.type == EventTypes.Redaction and event.redacts is not None: + # Remove the entries in the event_push_actions table for the + # redacted event. self._remove_push_actions_for_event_id_txn( txn, event.room_id, event.redacts ) + # Insert into the state_groups, state_groups_state, and + # event_to_state_groups tables. self._store_mult_state_groups_txn(txn, events_and_contexts) + # Update the event_forward_extremities, event_backward_extremities and + # event_edges tables. self._handle_mult_prev_events( txn, events=[event for event, _ in events_and_contexts], @@ -608,18 +642,25 @@ class EventsStore(SQLBaseStore): for event, _ in events_and_contexts: if event.type == EventTypes.Name: + # Insert into the room_names and event_search tables. self._store_room_name_txn(txn, event) elif event.type == EventTypes.Topic: + # Insert into the topics table and event_search table. self._store_room_topic_txn(txn, event) elif event.type == EventTypes.Message: + # Insert into the event_search table. self._store_room_message_txn(txn, event) elif event.type == EventTypes.Redaction: + # Insert into the redactions table. self._store_redaction(txn, event) elif event.type == EventTypes.RoomHistoryVisibility: + # Insert into the event_search table. self._store_history_visibility_txn(txn, event) elif event.type == EventTypes.GuestAccess: + # Insert into the event_search table. self._store_guest_access_txn(txn, event) + # Insert into the room_memberships table. self._store_room_members_txn( txn, [ @@ -630,6 +671,7 @@ class EventsStore(SQLBaseStore): backfilled=backfilled, ) + # Insert event_reference_hashes table. self._store_event_reference_hashes_txn( txn, [event for event, _ in events_and_contexts] ) @@ -674,6 +716,7 @@ class EventsStore(SQLBaseStore): ], ) + # Prefil the event cache self._add_to_cache(txn, events_and_contexts) if backfilled: From a6f06ce3e280cfa18f51748a7d4327001658db40 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 26 Jul 2016 11:05:39 +0100 Subject: [PATCH 296/414] Fix how push_actions are redacted. --- synapse/storage/events.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 25a2be2795..c63ca36df6 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -522,6 +522,8 @@ class EventsStore(SQLBaseStore): ] if not events_and_contexts: + # Make sure we don't pass an empty list to functions that expect to + # be storing at least one element. return # From this point onwards the events are only events that we haven't @@ -608,6 +610,13 @@ class EventsStore(SQLBaseStore): txn, event, context.push_actions ) + if event.type == EventTypes.Redaction and event.redacts is not None: + # Remove the entries in the event_push_actions table for the + # redacted event. + self._remove_push_actions_for_event_id_txn( + txn, event.room_id, event.redacts + ) + self._simple_insert_many_txn( txn, table="event_auth", @@ -622,13 +631,6 @@ class EventsStore(SQLBaseStore): ], ) - if event.type == EventTypes.Redaction and event.redacts is not None: - # Remove the entries in the event_push_actions table for the - # redacted event. - self._remove_push_actions_for_event_id_txn( - txn, event.room_id, event.redacts - ) - # Insert into the state_groups, state_groups_state, and # event_to_state_groups tables. self._store_mult_state_groups_txn(txn, events_and_contexts) @@ -716,7 +718,7 @@ class EventsStore(SQLBaseStore): ], ) - # Prefil the event cache + # Prefill the event cache self._add_to_cache(txn, events_and_contexts) if backfilled: From 8e0249416643f20f0c4cd8f2e19cf45ea63289d3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 11:09:47 +0100 Subject: [PATCH 297/414] Delete refresh tokens when deleting devices --- synapse/handlers/device.py | 6 ++-- synapse/storage/registration.py | 58 +++++++++++++++++++++++------- tests/storage/test_registration.py | 34 ++++++++++++++++++ 3 files changed, 83 insertions(+), 15 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 9e65d85e6d..eaead50800 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -138,8 +138,10 @@ class DeviceHandler(BaseHandler): else: raise - yield self.store.user_delete_access_tokens(user_id, - device_id=device_id) + yield self.store.user_delete_access_tokens( + user_id, device_id=device_id, + delete_refresh_tokens=True, + ) @defer.inlineCallbacks def update_device(self, user_id, device_id, content): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 935e82bf7a..d9555e073a 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -252,20 +252,36 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): @defer.inlineCallbacks def user_delete_access_tokens(self, user_id, except_token_ids=[], - device_id=None): - def f(txn): - sql = "SELECT token FROM access_tokens WHERE user_id = ?" + device_id=None, + delete_refresh_tokens=False): + """ + Invalidate access/refresh tokens belonging to a user + + Args: + user_id (str): ID of user the tokens belong to + except_token_ids (list[str]): list of access_tokens which should + *not* be deleted + device_id (str|None): ID of device the tokens are associated with. + If None, tokens associated with any device (or no device) will + be deleted + delete_refresh_tokens (bool): True to delete refresh tokens as + well as access tokens. + Returns: + defer.Deferred: + """ + def f(txn, table, except_tokens, call_after_delete): + sql = "SELECT token FROM %s WHERE user_id = ?" % table clauses = [user_id] if device_id is not None: sql += " AND device_id = ?" clauses.append(device_id) - if except_token_ids: + if except_tokens: sql += " AND id NOT IN (%s)" % ( - ",".join(["?" for _ in except_token_ids]), + ",".join(["?" for _ in except_tokens]), ) - clauses += except_token_ids + clauses += except_tokens txn.execute(sql, clauses) @@ -274,16 +290,33 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): n = 100 chunks = [rows[i:i + n] for i in xrange(0, len(rows), n)] for chunk in chunks: - for row in chunk: - txn.call_after(self.get_user_by_access_token.invalidate, (row[0],)) + if call_after_delete: + for row in chunk: + txn.call_after(call_after_delete, (row[0],)) txn.execute( - "DELETE FROM access_tokens WHERE token in (%s)" % ( + "DELETE FROM %s WHERE token in (%s)" % ( + table, ",".join(["?" for _ in chunk]), ), [r[0] for r in chunk] ) - yield self.runInteraction("user_delete_access_tokens", f) + # delete refresh tokens first, to stop new access tokens being + # allocated while our backs are turned + if delete_refresh_tokens: + yield self.runInteraction( + "user_delete_access_tokens", f, + table="refresh_tokens", + except_tokens=[], + call_after_delete=None, + ) + + yield self.runInteraction( + "user_delete_access_tokens", f, + table="access_tokens", + except_tokens=except_token_ids, + call_after_delete=self.get_user_by_access_token.invalidate, + ) def delete_access_token(self, access_token): def f(txn): @@ -306,9 +339,8 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): Args: token (str): The access token of a user. Returns: - dict: Including the name (user_id) and the ID of their access token. - Raises: - StoreError if no user was found. + defer.Deferred: None, if the token did not match, ootherwise dict + including the keys `name`, `is_guest`, `device_id`, `token_id`. """ return self.runInteraction( "get_user_by_access_token", diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index b03ca303a2..f7d74dea8e 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -128,6 +128,40 @@ class RegistrationStoreTestCase(unittest.TestCase): with self.assertRaises(StoreError): yield self.store.exchange_refresh_token(last_token, generator.generate) + @defer.inlineCallbacks + def test_user_delete_access_tokens(self): + # add some tokens + generator = TokenGenerator() + refresh_token = generator.generate(self.user_id) + yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.add_access_token_to_user(self.user_id, self.tokens[1], + self.device_id) + yield self.store.add_refresh_token_to_user(self.user_id, refresh_token, + self.device_id) + + # now delete some + yield self.store.user_delete_access_tokens( + self.user_id, device_id=self.device_id, delete_refresh_tokens=True) + + # check they were deleted + user = yield self.store.get_user_by_access_token(self.tokens[1]) + self.assertIsNone(user, "access token was not deleted by device_id") + with self.assertRaises(StoreError): + yield self.store.exchange_refresh_token(refresh_token, + generator.generate) + + # check the one not associated with the device was not deleted + user = yield self.store.get_user_by_access_token(self.tokens[0]) + self.assertEqual(self.user_id, user["name"]) + + # now delete the rest + yield self.store.user_delete_access_tokens( + self.user_id, delete_refresh_tokens=True) + + user = yield self.store.get_user_by_access_token(self.tokens[0]) + self.assertIsNone(user, + "access token was not deleted without device_id") + class TokenGenerator: def __init__(self): From db4f823d34c6ebe30ce8f4f957c20f6e0a627ecc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 11:49:40 +0100 Subject: [PATCH 298/414] Fix flake8 configuration Apparently flake8 v3 doesn't like trailing comments on config settings. Also remove the pep8 config, which didn't work (because it was missing W503) and duplicated the flake8 config. We don't use pep8 on its own, so the config was duplicative. --- setup.cfg | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index 5ebce1c56b..da8eafbb39 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,5 @@ ignore = [flake8] max-line-length = 90 -ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it. - -[pep8] -max-line-length = 90 +# W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it. +ignore = W503 From 05e7e5e972446b639997f0ea461c2eea39617342 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 11:59:08 +0100 Subject: [PATCH 299/414] Fix flake8 violation Apparently flake8 v3 puts the error on a different line to v2. Easiest way to make sure that happens is by putting the whole statement on one line :) --- synapse/app/__init__.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index 1bc4279807..9c2b627590 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -16,13 +16,11 @@ import sys sys.dont_write_bytecode = True -from synapse.python_dependencies import ( - check_requirements, MissingRequirementError -) # NOQA +from synapse import python_dependencies # noqa: E402 try: - check_requirements() -except MissingRequirementError as e: + python_dependencies.check_requirements() +except python_dependencies.MissingRequirementError as e: message = "\n".join([ "Missing Requirement: %s" % (e.message,), "To install run:", From 33d777647325501d2a1d18d95efc5f9f64eeb46e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 13:32:15 +0100 Subject: [PATCH 300/414] Fix typo --- synapse/storage/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index d9555e073a..7e7d32eb66 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -339,7 +339,7 @@ class RegistrationStore(background_updates.BackgroundUpdateStore): Args: token (str): The access token of a user. Returns: - defer.Deferred: None, if the token did not match, ootherwise dict + defer.Deferred: None, if the token did not match, otherwise dict including the keys `name`, `is_guest`, `device_id`, `token_id`. """ return self.runInteraction( From c824b29e77cd1745f8ac14f2a73c3b8590acaac9 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 26 Jul 2016 16:39:14 +0100 Subject: [PATCH 301/414] Check if the user is banned when handling 3pid invites --- synapse/api/auth.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index eca8513905..f399aa8c7c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -376,6 +376,10 @@ class Auth(object): if Membership.INVITE == membership and "third_party_invite" in event.content: if not self._verify_third_party_invite(event, auth_events): raise AuthError(403, "You are not invited to this room.") + if target_banned: + raise AuthError( + 403, "%s is banned from the room" % (target_user_id,) + ) return True if Membership.JOIN != membership: From eb359eced44407b1ee9648f10fdf3df63c8d40ad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 16:46:53 +0100 Subject: [PATCH 302/414] Add `create_requester` function Wrap the `Requester` constructor with a function which provides sensible defaults, and use it throughout --- synapse/api/auth.py | 30 ++++++++++++------------- synapse/handlers/_base.py | 13 ++++++----- synapse/handlers/profile.py | 12 +++++----- synapse/handlers/register.py | 16 ++++++++------ synapse/handlers/room_member.py | 20 ++++++++--------- synapse/rest/client/v2_alpha/keys.py | 10 ++++----- synapse/types.py | 33 +++++++++++++++++++++++++++- tests/handlers/test_profile.py | 10 +++++---- tests/replication/test_resource.py | 24 ++++++++++---------- tests/rest/client/v1/test_profile.py | 13 +++++------ tests/utils.py | 5 ----- 11 files changed, 106 insertions(+), 80 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index eca8513905..eecf3b0b2a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -13,22 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +import pymacaroons from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from signedjson.sign import verify_signed_json, SignatureVerifyException - from twisted.internet import defer - -from synapse.api.constants import EventTypes, Membership, JoinRules -from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError -from synapse.types import Requester, UserID, get_domain_from_id -from synapse.util.logutils import log_function -from synapse.util.logcontext import preserve_context_over_fn -from synapse.util.metrics import Measure from unpaddedbase64 import decode_base64 -import logging -import pymacaroons +import synapse.types +from synapse.api.constants import EventTypes, Membership, JoinRules +from synapse.api.errors import AuthError, Codes, SynapseError, EventSizeError +from synapse.types import UserID, get_domain_from_id +from synapse.util.logcontext import preserve_context_over_fn +from synapse.util.logutils import log_function +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) @@ -566,8 +566,7 @@ class Auth(object): Args: request - An HTTP request with an access_token query parameter. Returns: - defer.Deferred: resolves to a namedtuple including "user" (UserID) - "access_token_id" (int), "is_guest" (bool) + defer.Deferred: resolves to a ``synapse.types.Requester`` object Raises: AuthError if no user by that token exists or the token is invalid. """ @@ -576,9 +575,7 @@ class Auth(object): user_id = yield self._get_appservice_user_id(request.args) if user_id: request.authenticated_entity = user_id - defer.returnValue( - Requester(UserID.from_string(user_id), "", False) - ) + defer.returnValue(synapse.types.create_requester(user_id)) access_token = request.args["access_token"][0] user_info = yield self.get_user_by_access_token(access_token, rights) @@ -612,7 +609,8 @@ class Auth(object): request.authenticated_entity = user.to_string() - defer.returnValue(Requester(user, token_id, is_guest)) + defer.returnValue(synapse.types.create_requester( + user, token_id, is_guest, device_id)) except KeyError: raise AuthError( self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.", diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index 6264aa0d9a..11081a0cd5 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer -from synapse.api.errors import LimitExceededError +import synapse.types from synapse.api.constants import Membership, EventTypes -from synapse.types import UserID, Requester - - -import logging +from synapse.api.errors import LimitExceededError +from synapse.types import UserID logger = logging.getLogger(__name__) @@ -124,7 +124,8 @@ class BaseHandler(object): # and having homeservers have their own users leave keeps more # of that decision-making and control local to the guest-having # homeserver. - requester = Requester(target_user, "", True) + requester = synapse.types.create_requester( + target_user, is_guest=True) handler = self.hs.get_handlers().room_member_handler yield handler.update_membership( requester, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 711a6a567f..d9ac09078d 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -13,15 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer +import synapse.types from synapse.api.errors import SynapseError, AuthError, CodeMessageException -from synapse.types import UserID, Requester - +from synapse.types import UserID from ._base import BaseHandler -import logging - logger = logging.getLogger(__name__) @@ -165,7 +165,9 @@ class ProfileHandler(BaseHandler): try: # Assume the user isn't a guest because we don't let guests set # profile or avatar data. - requester = Requester(user, "", False) + # XXX why are we recreating `requester` here for each room? + # what was wrong with the `requester` we were passed? + requester = synapse.types.create_requester(user) yield handler.update_membership( requester, user, diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 94b19d0cb0..b9b5880d64 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -14,18 +14,19 @@ # limitations under the License. """Contains functions for registering clients.""" +import logging +import urllib + from twisted.internet import defer -from synapse.types import UserID, Requester +import synapse.types from synapse.api.errors import ( AuthError, Codes, SynapseError, RegistrationError, InvalidCaptchaError ) -from ._base import BaseHandler -from synapse.util.async import run_on_reactor from synapse.http.client import CaptchaServerHttpClient - -import logging -import urllib +from synapse.types import UserID +from synapse.util.async import run_on_reactor +from ._base import BaseHandler logger = logging.getLogger(__name__) @@ -410,8 +411,9 @@ class RegistrationHandler(BaseHandler): if displayname is not None: logger.info("setting user display name: %s -> %s", user_id, displayname) profile_handler = self.hs.get_handlers().profile_handler + requester = synapse.types.create_requester(user) yield profile_handler.set_displayname( - user, Requester(user, token, False), displayname + user, requester, displayname ) defer.returnValue((user_id, token)) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 7e616f44fd..8cec8fc4ed 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -14,24 +14,22 @@ # limitations under the License. +import logging + +from signedjson.key import decode_verify_key_bytes +from signedjson.sign import verify_signed_json from twisted.internet import defer +from unpaddedbase64 import decode_base64 -from ._base import BaseHandler - -from synapse.types import UserID, RoomID, Requester +import synapse.types from synapse.api.constants import ( EventTypes, Membership, ) from synapse.api.errors import AuthError, SynapseError, Codes +from synapse.types import UserID, RoomID from synapse.util.async import Linearizer from synapse.util.distributor import user_left_room, user_joined_room - -from signedjson.sign import verify_signed_json -from signedjson.key import decode_verify_key_bytes - -from unpaddedbase64 import decode_base64 - -import logging +from ._base import BaseHandler logger = logging.getLogger(__name__) @@ -315,7 +313,7 @@ class RoomMemberHandler(BaseHandler): ) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: - requester = Requester(target_user, None, False) + requester = synapse.types.create_requester(target_user) message_handler = self.hs.get_handlers().message_handler prev_event = message_handler.deduplicate_state_event(event, context) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 89ab39491c..56364af337 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -13,18 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +import simplejson as json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID - -from canonicaljson import encode_canonical_json - from ._base import client_v2_patterns -import logging -import simplejson as json - logger = logging.getLogger(__name__) diff --git a/synapse/types.py b/synapse/types.py index f639651a73..5349b0c450 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -18,7 +18,38 @@ from synapse.api.errors import SynapseError from collections import namedtuple -Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"]) +Requester = namedtuple("Requester", + ["user", "access_token_id", "is_guest", "device_id"]) +""" +Represents the user making a request + +Attributes: + user (UserID): id of the user making the request + access_token_id (int|None): *ID* of the access token used for this + request, or None if it came via the appservice API or similar + is_guest (bool): True if the user making this request is a guest user + device_id (str|None): device_id which was set at authentication time +""" + + +def create_requester(user_id, access_token_id=None, is_guest=False, + device_id=None): + """ + Create a new ``Requester`` object + + Args: + user_id (str|UserID): id of the user making the request + access_token_id (int|None): *ID* of the access token used for this + request, or None if it came via the appservice API or similar + is_guest (bool): True if the user making this request is a guest user + device_id (str|None): device_id which was set at authentication time + + Returns: + Requester + """ + if not isinstance(user_id, UserID): + user_id = UserID.from_string(user_id) + return Requester(user_id, access_token_id, is_guest, device_id) def get_domain_from_id(string): diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 4f2c14e4ff..f1f664275f 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -19,11 +19,12 @@ from twisted.internet import defer from mock import Mock, NonCallableMock +import synapse.types from synapse.api.errors import AuthError from synapse.handlers.profile import ProfileHandler from synapse.types import UserID -from tests.utils import setup_test_homeserver, requester_for_user +from tests.utils import setup_test_homeserver class ProfileHandlers(object): @@ -86,7 +87,7 @@ class ProfileTestCase(unittest.TestCase): def test_set_my_name(self): yield self.handler.set_displayname( self.frank, - requester_for_user(self.frank), + synapse.types.create_requester(self.frank), "Frank Jr." ) @@ -99,7 +100,7 @@ class ProfileTestCase(unittest.TestCase): def test_set_my_name_noauth(self): d = self.handler.set_displayname( self.frank, - requester_for_user(self.bob), + synapse.types.create_requester(self.bob), "Frank Jr." ) @@ -144,7 +145,8 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_set_my_avatar(self): yield self.handler.set_avatar_url( - self.frank, requester_for_user(self.frank), "http://my.server/pic.gif" + self.frank, synapse.types.create_requester(self.frank), + "http://my.server/pic.gif" ) self.assertEquals( diff --git a/tests/replication/test_resource.py b/tests/replication/test_resource.py index 842e3d29d7..e70ac6f14d 100644 --- a/tests/replication/test_resource.py +++ b/tests/replication/test_resource.py @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.replication.resource import ReplicationResource -from synapse.types import Requester, UserID - -from twisted.internet import defer -from tests import unittest -from tests.utils import setup_test_homeserver, requester_for_user -from mock import Mock, NonCallableMock -import json import contextlib +import json + +from mock import Mock, NonCallableMock +from twisted.internet import defer + +import synapse.types +from synapse.replication.resource import ReplicationResource +from synapse.types import UserID +from tests import unittest +from tests.utils import setup_test_homeserver class ReplicationResourceCase(unittest.TestCase): @@ -61,7 +63,7 @@ class ReplicationResourceCase(unittest.TestCase): def test_events_and_state(self): get = self.get(events="-1", state="-1", timeout="0") yield self.hs.get_handlers().room_creation_handler.create_room( - Requester(self.user, "", False), {} + synapse.types.create_requester(self.user), {} ) code, body = yield get self.assertEquals(code, 200) @@ -144,7 +146,7 @@ class ReplicationResourceCase(unittest.TestCase): def send_text_message(self, room_id, message): handler = self.hs.get_handlers().message_handler event = yield handler.create_and_send_nonmember_event( - requester_for_user(self.user), + synapse.types.create_requester(self.user), { "type": "m.room.message", "content": {"body": "message", "msgtype": "m.text"}, @@ -157,7 +159,7 @@ class ReplicationResourceCase(unittest.TestCase): @defer.inlineCallbacks def create_room(self): result = yield self.hs.get_handlers().room_creation_handler.create_room( - Requester(self.user, "", False), {} + synapse.types.create_requester(self.user), {} ) defer.returnValue(result["room_id"]) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index af02fce8fb..1e95e97538 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -14,17 +14,14 @@ # limitations under the License. """Tests REST events for /profile paths.""" -from tests import unittest +from mock import Mock from twisted.internet import defer -from mock import Mock - -from ....utils import MockHttpResource, setup_test_homeserver - +import synapse.types from synapse.api.errors import SynapseError, AuthError -from synapse.types import Requester, UserID - from synapse.rest.client.v1 import profile +from tests import unittest +from ....utils import MockHttpResource, setup_test_homeserver myid = "@1234ABCD:test" PATH_PREFIX = "/_matrix/client/api/v1" @@ -52,7 +49,7 @@ class ProfileTestCase(unittest.TestCase): ) def _get_user_by_req(request=None, allow_guest=False): - return Requester(UserID.from_string(myid), "", False) + return synapse.types.create_requester(myid) hs.get_v1auth().get_user_by_req = _get_user_by_req diff --git a/tests/utils.py b/tests/utils.py index ed547bc39b..915b934e94 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -20,7 +20,6 @@ from synapse.storage.prepare_database import prepare_database from synapse.storage.engines import create_engine from synapse.server import HomeServer from synapse.federation.transport import server -from synapse.types import Requester from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.logcontext import LoggingContext @@ -512,7 +511,3 @@ class DeferredMockCallable(object): "call(%s)" % _format_call(c[0], c[1]) for c in calls ]) ) - - -def requester_for_user(user): - return Requester(user, None, False) From 87ffd21b291a503fd47ba938b32658c9f475aed5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 26 Jul 2016 19:19:08 +0100 Subject: [PATCH 303/414] Fix a couple of bugs in the transaction and keyring code --- synapse/crypto/keyring.py | 17 +++++++++-------- synapse/storage/transactions.py | 3 ++- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d08ee0aa91..826845f695 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -275,14 +275,15 @@ class Keyring(object): for server_name, groups in missing_groups.items() } - for group in missing_groups.values(): - group_id_to_deferred[group.group_id].errback(SynapseError( - 401, - "No key for %s with id %s" % ( - group.server_name, group.key_ids, - ), - Codes.UNAUTHORIZED, - )) + for groups in missing_groups.values(): + for group in groups: + group_id_to_deferred[group.group_id].errback(SynapseError( + 401, + "No key for %s with id %s" % ( + group.server_name, group.key_ids, + ), + Codes.UNAUTHORIZED, + )) def on_err(err): for deferred in group_id_to_deferred.values(): diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 6c7481a728..6258ff1725 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -24,6 +24,7 @@ from collections import namedtuple import itertools import logging +import ujson as json logger = logging.getLogger(__name__) @@ -101,7 +102,7 @@ class TransactionStore(SQLBaseStore): ) if result and result["response_code"]: - return result["response_code"], result["response_json"] + return result["response_code"], json.loads(str(result["response_json"])) else: return None From a4b06b619c81f4a212323cc02565c7c893d5c2e5 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Tue, 26 Jul 2016 19:50:11 +0100 Subject: [PATCH 304/414] Add a couple more checks to the keyring --- synapse/crypto/keyring.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d08ee0aa91..627bd0d222 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -447,7 +447,7 @@ class Keyring(object): ) processed_response = yield self.process_v2_response( - perspective_name, response + perspective_name, response, only_from_server=False ) for server_name, response_keys in processed_response.items(): @@ -527,7 +527,7 @@ class Keyring(object): @defer.inlineCallbacks def process_v2_response(self, from_server, response_json, - requested_ids=[]): + requested_ids=[], only_from_server=True): time_now_ms = self.clock.time_msec() response_keys = {} verify_keys = {} @@ -551,6 +551,13 @@ class Keyring(object): results = {} server_name = response_json["server_name"] + if only_from_server: + if server_name != from_server: + raise ValueError( + "Expected a response for server %r not %r" % ( + from_server, server_name + ) + ) for key_id in response_json["signatures"].get(server_name, {}): if key_id not in response_json["verify_keys"]: raise ValueError( From 2e3d90d67c8255300b226d6d2fdc2acef80e58ba Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Jul 2016 23:38:12 +0100 Subject: [PATCH 305/414] Make the device id on e2e key upload optional We should now be able to get our device_id from the access_token, so the device_id on the upload request is optional. Where it is supplied, we should check that it matches. For active access_tokens without an associated device_id, we ought to register the device in the devices table. Also update the table on upgrade so that all of the existing e2e keys are associated with real devices. --- synapse/rest/client/v2_alpha/keys.py | 47 ++++++++++++++----- .../schema/delta/33/devices_for_e2e_keys.sql | 19 ++++++++ 2 files changed, 54 insertions(+), 12 deletions(-) create mode 100644 synapse/storage/schema/delta/33/devices_for_e2e_keys.sql diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 56364af337..0bf32a089b 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -19,6 +19,9 @@ import simplejson as json from canonicaljson import encode_canonical_json from twisted.internet import defer +import synapse.api.errors +import synapse.server +import synapse.types from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID from ._base import client_v2_patterns @@ -28,7 +31,7 @@ logger = logging.getLogger(__name__) class KeyUploadServlet(RestServlet): """ - POST /keys/upload/ HTTP/1.1 + POST /keys/upload HTTP/1.1 Content-Type: application/json { @@ -51,23 +54,51 @@ class KeyUploadServlet(RestServlet): }, } """ - PATTERNS = client_v2_patterns("/keys/upload/(?P[^/]*)", releases=()) + PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$", + releases=(), v2_alpha=False) def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ super(KeyUploadServlet, self).__init__() self.store = hs.get_datastore() self.clock = hs.get_clock() self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() @defer.inlineCallbacks def on_POST(self, request, device_id): requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() - # TODO: Check that the device_id matches that in the authentication - # or derive the device_id from the authentication instead. body = parse_json_object_from_request(request) + if device_id is not None: + # passing the device_id here is deprecated; however, we allow it + # for now for compatibility with older clients. But if a device_id + # was given here and in the auth, they must match. + + if (requester.device_id is not None and + device_id != requester.device_id): + raise synapse.api.errors.SynapseError( + 400, "Can only upload keys for current device" + ) + + self.device_handler.check_device_registered( + user_id, device_id, "unknown device" + ) + else: + device_id = requester.device_id + + if device_id is None: + raise synapse.api.errors.SynapseError( + 400, + "To upload keys, you must pass device_id when authenticating" + ) + time_now = self.clock.time_msec() # TODO: Validate the JSON to make sure it has the right keys. @@ -103,14 +134,6 @@ class KeyUploadServlet(RestServlet): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) defer.returnValue((200, {"one_time_key_counts": result})) - @defer.inlineCallbacks - def on_GET(self, request, device_id): - requester = yield self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue((200, {"one_time_key_counts": result})) - class KeyQueryServlet(RestServlet): """ diff --git a/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql new file mode 100644 index 0000000000..2908c4d232 --- /dev/null +++ b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql @@ -0,0 +1,19 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- make sure that we have a device record for each set of E2E keys, so that the +-- user can delete them if they like. +INSERT INTO devices + SELECT user_id, device_id, "unknown device" FROM e2e_device_keys_json; From d47115ff8bf3ab5952f053db578a519e8e3f930c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 27 Jul 2016 12:18:03 +0100 Subject: [PATCH 306/414] Delete e2e keys on device delete --- synapse/handlers/device.py | 4 ++++ synapse/rest/client/v2_alpha/keys.py | 13 +++++++++---- synapse/storage/end_to_end_keys.py | 15 +++++++++++++++ 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index eaead50800..f4bf159bb5 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -143,6 +143,10 @@ class DeviceHandler(BaseHandler): delete_refresh_tokens=True, ) + yield self.store.delete_e2e_keys_by_device( + user_id=user_id, device_id=device_id + ) + @defer.inlineCallbacks def update_device(self, user_id, device_id, content): """ Update the given device diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 0bf32a089b..4629f4bfde 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -86,10 +86,6 @@ class KeyUploadServlet(RestServlet): raise synapse.api.errors.SynapseError( 400, "Can only upload keys for current device" ) - - self.device_handler.check_device_registered( - user_id, device_id, "unknown device" - ) else: device_id = requester.device_id @@ -131,6 +127,15 @@ class KeyUploadServlet(RestServlet): user_id, device_id, time_now, key_list ) + # the device should have been registered already, but it may have been + # deleted due to a race with a DELETE request. Or we may be using an + # old access_token without an associated device_id. Either way, we + # need to double-check the device is registered to avoid ending up with + # keys without a corresponding device. + self.device_handler.check_device_registered( + user_id, device_id, "unknown device" + ) + result = yield self.store.count_e2e_one_time_keys(user_id, device_id) defer.returnValue((200, {"one_time_key_counts": result})) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 2e89066515..62b7790e91 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import twisted.internet.defer + from ._base import SQLBaseStore @@ -123,3 +125,16 @@ class EndToEndKeyStore(SQLBaseStore): return self.runInteraction( "claim_e2e_one_time_keys", _claim_e2e_one_time_keys ) + + @twisted.internet.defer.inlineCallbacks + def delete_e2e_keys_by_device(self, user_id, device_id): + yield self._simple_delete( + table="e2e_device_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + desc="delete_e2e_device_keys_by_device" + ) + yield self._simple_delete( + table="e2e_one_time_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + desc="delete_e2e_one_time_keys_by_device" + ) From 26cb0efa88c2fa84089c74e3de02fa2ce832f47a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 27 Jul 2016 12:30:22 +0100 Subject: [PATCH 307/414] SQL syntax fix --- synapse/storage/schema/delta/33/devices_for_e2e_keys.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql index 2908c4d232..140f2b63e0 100644 --- a/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql +++ b/synapse/storage/schema/delta/33/devices_for_e2e_keys.sql @@ -16,4 +16,4 @@ -- make sure that we have a device record for each set of E2E keys, so that the -- user can delete them if they like. INSERT INTO devices - SELECT user_id, device_id, "unknown device" FROM e2e_device_keys_json; + SELECT user_id, device_id, 'unknown device' FROM e2e_device_keys_json; From fe1b36994643ed57b511d9caf834e3e131cd404c Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Wed, 27 Jul 2016 14:10:43 +0100 Subject: [PATCH 308/414] Clean up verify_json_objects_for_server --- synapse/crypto/keyring.py | 141 ++++++++++++++++++++------------------ 1 file changed, 74 insertions(+), 67 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d08ee0aa91..f3924e23d8 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -44,7 +44,21 @@ import logging logger = logging.getLogger(__name__) -KeyGroup = namedtuple("KeyGroup", ("server_name", "group_id", "key_ids")) +VerifyKeyRequest = namedtuple("VerifyRequest", ( + "server_name", "key_ids", "json_object", "deferred" +)) +""" +A request for a verify key to verify a JSON object. + +Attributes: + server_name(str): The name of the server to verify against. + key_ids(set(str)): The set of key_ids to that could be used to verify the + JSON object + json_object(dict): The JSON object to verify. + deferred(twisted.internet.defer.Deferred): + A deferred (server_name, key_id, verify_key) tuple that resolves when + a verify key has been fetched +""" class Keyring(object): @@ -74,39 +88,32 @@ class Keyring(object): list of deferreds indicating success or failure to verify each json object's signature for the given server_name. """ - group_id_to_json = {} - group_id_to_group = {} - group_ids = [] - - next_group_id = 0 - deferreds = {} + verify_requests = [] for server_name, json_object in server_and_json: logger.debug("Verifying for %s", server_name) - group_id = next_group_id - next_group_id += 1 - group_ids.append(group_id) key_ids = signature_ids(json_object, server_name) if not key_ids: - deferreds[group_id] = defer.fail(SynapseError( + deferred = defer.fail(SynapseError( 400, "Not signed with a supported algorithm", Codes.UNAUTHORIZED, )) else: - deferreds[group_id] = defer.Deferred() + deferred = defer.Deferred() - group = KeyGroup(server_name, group_id, key_ids) + verify_request = VerifyKeyRequest( + server_name, key_ids, json_object, deferred + ) - group_id_to_group[group_id] = group - group_id_to_json[group_id] = json_object + verify_requests.append(verify_request) @defer.inlineCallbacks - def handle_key_deferred(group, deferred): - server_name = group.server_name + def handle_key_deferred(verify_request): + server_name = verify_request.server_name try: - _, _, key_id, verify_key = yield deferred + _, key_id, verify_key = yield verify_request.deferred except IOError as e: logger.warn( "Got IOError when downloading keys for %s: %s %s", @@ -128,7 +135,7 @@ class Keyring(object): Codes.UNAUTHORIZED, ) - json_object = group_id_to_json[group.group_id] + json_object = verify_request.json_object try: verify_signed_json(json_object, server_name, verify_key) @@ -157,36 +164,34 @@ class Keyring(object): # Actually start fetching keys. wait_on_deferred.addBoth( - lambda _: self.get_server_verify_keys(group_id_to_group, deferreds) + lambda _: self.get_server_verify_keys(verify_requests) ) # When we've finished fetching all the keys for a given server_name, # resolve the deferred passed to `wait_for_previous_lookups` so that # any lookups waiting will proceed. - server_to_gids = {} + server_to_request_ids = {} - def remove_deferreds(res, server_name, group_id): - server_to_gids[server_name].discard(group_id) - if not server_to_gids[server_name]: + def remove_deferreds(res, server_name, verify_request): + request_id = id(verify_request) + server_to_request_ids[server_name].discard(request_id) + if not server_to_request_ids[server_name]: d = server_to_deferred.pop(server_name, None) if d: d.callback(None) return res - for g_id, deferred in deferreds.items(): - server_name = group_id_to_group[g_id].server_name - server_to_gids.setdefault(server_name, set()).add(g_id) - deferred.addBoth(remove_deferreds, server_name, g_id) + for verify_request in verify_requests: + server_name = verify_request.server_name + request_id = id(verify_request) + server_to_request_ids.setdefault(server_name, set()).add(request_id) + deferred.addBoth(remove_deferreds, server_name, verify_request) # Pass those keys to handle_key_deferred so that the json object # signatures can be verified return [ - preserve_context_over_fn( - handle_key_deferred, - group_id_to_group[g_id], - deferreds[g_id], - ) - for g_id in group_ids + preserve_context_over_fn(handle_key_deferred, verify_request) + for verify_request in verify_requests ] @defer.inlineCallbacks @@ -220,7 +225,7 @@ class Keyring(object): d.addBoth(rm, server_name) - def get_server_verify_keys(self, group_id_to_group, group_id_to_deferred): + def get_server_verify_keys(self, verify_requests): """Takes a dict of KeyGroups and tries to find at least one key for each group. """ @@ -237,62 +242,64 @@ class Keyring(object): merged_results = {} missing_keys = {} - for group in group_id_to_group.values(): - missing_keys.setdefault(group.server_name, set()).update( - group.key_ids + for verify_request in verify_requests: + missing_keys.setdefault(verify_request.server_name, set()).update( + verify_request.key_ids ) for fn in key_fetch_fns: results = yield fn(missing_keys.items()) merged_results.update(results) - # We now need to figure out which groups we have keys for - # and which we don't - missing_groups = {} - for group in group_id_to_group.values(): - for key_id in group.key_ids: - if key_id in merged_results[group.server_name]: + # We now need to figure out which verify requests we have keys + # for and which we don't + missing_keys = {} + requests_missing_keys = [] + for verify_request in verify_requests: + server_name = verify_request.server_name + result_keys = merged_results[server_name] + + if verify_request.deferred.called: + # We've already called this deferred, which probably + # means that we've already found a key for it. + continue + + for key_id in verify_request.key_ids: + if key_id in result_keys: with PreserveLoggingContext(): - group_id_to_deferred[group.group_id].callback(( - group.group_id, - group.server_name, + verify_request.deferred.callback(( + server_name, key_id, - merged_results[group.server_name][key_id], + result_keys[key_id], )) break else: - missing_groups.setdefault( - group.server_name, [] - ).append(group) + # The else block is only reached if the loop above + # doesn't break. + missing_keys.setdefault(server_name, set()).update( + verify_request.key_ids + ) + requests_missing_keys.append(verify_request) - if not missing_groups: + if not missing_keys: break - missing_keys = { - server_name: set( - key_id for group in groups for key_id in group.key_ids - ) - for server_name, groups in missing_groups.items() - } - - for group in missing_groups.values(): - group_id_to_deferred[group.group_id].errback(SynapseError( + for verify_request in requests_missing_keys.values(): + verify_request.deferred.errback(SynapseError( 401, "No key for %s with id %s" % ( - group.server_name, group.key_ids, + verify_request.server_name, verify_request.key_ids, ), Codes.UNAUTHORIZED, )) def on_err(err): - for deferred in group_id_to_deferred.values(): - if not deferred.called: - deferred.errback(err) + for verify_request in verify_requests: + if not verify_request.deferred.called: + verify_request.deferred.errback(err) do_iterations().addErrback(on_err) - return group_id_to_deferred - @defer.inlineCallbacks def get_keys_from_store(self, server_name_and_key_ids): res = yield defer.gatherResults( From ccec25e2c6270c1cae916b8ca8a775a166ea7e7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 27 Jul 2016 16:41:06 +0100 Subject: [PATCH 309/414] key upload tweaks 1. Add v2_alpha URL back in, since things seem to be using it. 2. Don't reject the request if the device_id in the upload request fails to match that in the access_token. --- synapse/rest/client/v2_alpha/keys.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 4629f4bfde..dc1d4d8fc6 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -55,7 +55,7 @@ class KeyUploadServlet(RestServlet): } """ PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$", - releases=(), v2_alpha=False) + releases=()) def __init__(self, hs): """ @@ -78,14 +78,12 @@ class KeyUploadServlet(RestServlet): if device_id is not None: # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. But if a device_id - # was given here and in the auth, they must match. - + # for now for compatibility with older clients. if (requester.device_id is not None and device_id != requester.device_id): - raise synapse.api.errors.SynapseError( - 400, "Can only upload keys for current device" - ) + logger.warning("Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, device_id) else: device_id = requester.device_id From 5238960850b4aa4b318f7c794fdadaf12dfe3841 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jul 2016 17:33:09 +0100 Subject: [PATCH 310/414] Bump CHANGES and version --- CHANGES.rst | 56 +++++++++++++++++++++++++++++++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index e1d5e876dc..799c14575c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,59 @@ +Changes in synapse v0.17.0-r1 (2016-07-27) +========================================== + +This release changes the LDAP configuration format in a backwards incompatible +way, see PR #843 for details. + + +Features: + +* Add purge_media_cache admin API (PR #902) +* Add deactivate account admin API (PR #903) +* Add optional pepper to password hashing (PR #907, #910) +* Add an admin option to shared secret registration (breaks backwards compat) + (PR #909) +* Add purge local room history API (PR #911, #923, #924) +* Add requestToken endpoints (PR #915) +* Add an /account/deactivate endpoint (PR #921) +* Add filter param to /messages. Add 'contains_url' to filter. (PR #922) +* Add device_id support to /login (PR #929) +* Add device_id support to /v2/register flow. (PR #937, #942) +* Add GET /devices endpoint (PR #939, #944) +* Add GET /device/{deviceId} (PR #943) +* Add update and delete APIs for devices (PR #949) + + +Changes: + +* Rewrite LDAP Authentication against ldap3 (PR #843) +* Linearize some federation endpoints based on (origin, room_id) (PR #879) +* Remove the legacy v0 content upload API. (PR #888) +* Use similar naming we use in email notifs for push (PR #894) +* Optionally include password hash in createUser endpoint (PR #905) +* Use a query that postgresql optimises better for get_events_around (PR #906) +* Fall back to 'username' if 'user' is not given for appservice registration. + (PR #927) +* Add metrics for psutil derived memory usage (PR #936) +* Record device_id in client_ips (PR #938) +* Log the hostname the reCAPTCHA was completed on (PR #946) + + +Bug fixes: + +* Fix substitution failure in mail template (PR #887) +* Put most recent 20 messages in email notif (PR #892) +* Ensure that the guest user is in the database when upgrading accounts + (PR #914) +* Fix various edge cases in auth handling (PR #919) +* Fix 500 ISE when sending alias event without a state_key (PR #925) +* Fix bug where we stored rejections in the state_group, persist all + rejections (PR #948) +* Fix lack of check of if the user is banned when handling 3pid invites + (PR #952) +* Fix a couple of bugs in the transaction and keyring code (PR #954, #955) + + + Changes in synapse v0.16.1-r1 (2016-07-08) ========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 2750ad3f7a..b0bd7254c5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.1-r1" +__version__ = "0.16.17" From 05f6447301ddc72cec7564f9d39f3e16aaa728c6 Mon Sep 17 00:00:00 2001 From: "Paul \"LeoNerd\" Evans" Date: Wed, 27 Jul 2016 17:54:26 +0100 Subject: [PATCH 311/414] Forbid non-ASes from registering users whose names begin with '_' (SYN-738) --- synapse/handlers/register.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index b9b5880d64..dd75c4fecf 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -53,6 +53,13 @@ class RegistrationHandler(BaseHandler): Codes.INVALID_USERNAME ) + if localpart[0] == '_': + raise SynapseError( + 400, + "User ID may not begin with _", + Codes.INVALID_USERNAME + ) + user = UserID(localpart, self.hs.hostname) user_id = user.to_string() From e8d212d92ea84419c1ef083f96680ec7edef80ed Mon Sep 17 00:00:00 2001 From: evelynmitchell Date: Wed, 27 Jul 2016 12:20:46 -0600 Subject: [PATCH 312/414] 3PID defined on first mention --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index ebcb15a977..8cbd28fb8e 100644 --- a/README.rst +++ b/README.rst @@ -11,8 +11,8 @@ VoIP. The basics you need to know to get up and running are: like ``#matrix:matrix.org`` or ``#test:localhost:8448``. - Matrix user IDs look like ``@matthew:matrix.org`` (although in the future - you will normally refer to yourself and others using a 3PID: email - address, phone number, etc rather than manipulating Matrix user IDs) + you will normally refer to yourself and others using a third party identifier + (3PID): email address, phone number, etc rather than manipulating Matrix user IDs) The overall architecture is:: From fda078f995265adb0ecee5734c516eb55adc9355 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 28 Jul 2016 09:14:21 +0100 Subject: [PATCH 313/414] Add r0.2.0 to the "supported versions" list --- synapse/rest/client/versions.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index ca5468c402..1fe31abb42 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -26,7 +26,10 @@ class VersionsRestServlet(RestServlet): def on_GET(self, request): return (200, { - "versions": ["r0.0.1"] + "versions": [ + "r0.0.1", + "r0.2.0", + ] }) From ecd5e6bfa4b84b6beb47b27d476f0bdba66f7a23 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 28 Jul 2016 10:04:37 +0100 Subject: [PATCH 314/414] Typo --- synapse/push/push_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 6f2d1ad57d..d555a33e9a 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -54,7 +54,7 @@ def get_context_for_event(state_handler, ev, user_id): room_state = yield state_handler.get_current_state(ev.room_id) # we no longer bother setting room_alias, and make room_name the - # human-readable name instead, be that m.room.namer, an alias or + # human-readable name instead, be that m.room.name, an alias or # a list of people in the room name = calculate_room_name( room_state, user_id, fallback_to_single_member=False From f6f8f81a4800cae83684cd1d75eb9a132c5bde6e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 28 Jul 2016 10:14:07 +0100 Subject: [PATCH 315/414] Add r0.1.0 to the "supported versions" list --- synapse/rest/client/versions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 1fe31abb42..e984ea47db 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -28,6 +28,7 @@ class VersionsRestServlet(RestServlet): return (200, { "versions": [ "r0.0.1", + "r0.1.0", "r0.2.0", ] }) From 389c890f14c456a157d973fd29b49d64e5fa9226 Mon Sep 17 00:00:00 2001 From: David Baker Date: Thu, 28 Jul 2016 10:20:47 +0100 Subject: [PATCH 316/414] Don't include name of room for invites in push Avoids insane pushes like, "Bob invited you to invite from Bob" --- synapse/util/presentable_names.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/util/presentable_names.py b/synapse/util/presentable_names.py index 4c54812e6f..f68676e9e7 100644 --- a/synapse/util/presentable_names.py +++ b/synapse/util/presentable_names.py @@ -83,7 +83,10 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True, ): if ("m.room.member", my_member_event.sender) in room_state: inviter_member_event = room_state[("m.room.member", my_member_event.sender)] - return "Invite from %s" % (name_from_member_event(inviter_member_event),) + if fallback_to_single_member: + return "Invite from %s" % (name_from_member_event(inviter_member_event),) + else: + return None else: return "Room Invite" From 7871790db1b38d10783d88ebfc9bd4e0356195c7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 10:38:56 +0100 Subject: [PATCH 317/414] Bump version and changelog --- CHANGES.rst | 7 +++++-- synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 799c14575c..65566adda1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,5 @@ -Changes in synapse v0.17.0-r1 (2016-07-27) -========================================== +Changes in synapse v0.17.0-rc1 (2016-07-28) +=========================================== This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details. @@ -36,6 +36,9 @@ Changes: * Add metrics for psutil derived memory usage (PR #936) * Record device_id in client_ips (PR #938) * Log the hostname the reCAPTCHA was completed on (PR #946) +* Make the device id on e2e key upload optional (PR #956) +* Add r0.2.0 to the "supported versions" list (PR #960) +* Don't include name of room for invites in push (PR #961) Bug fixes: diff --git a/synapse/__init__.py b/synapse/__init__.py index b0bd7254c5..8f0176e182 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.16.17" +__version__ = "0.17.0-rc1" From bf81e38d365b79130b5e04053de0eaff94b0d472 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 28 Jul 2016 10:29:12 +0100 Subject: [PATCH 318/414] Fix retry utils to check if the exception is a subclass of CME --- synapse/util/retryutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 43cf11f3f6..49527f4d21 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -128,7 +128,7 @@ class RetryDestinationLimiter(object): ) valid_err_code = False - if exc_type is CodeMessageException: + if exc_type is not None and issubclass(exc_type, CodeMessageException): valid_err_code = 0 <= exc_val.code < 500 if exc_type is None or valid_err_code: From 019cf013d6ea4a8182189d068dc44ec403cc58ce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 10:47:45 +0100 Subject: [PATCH 319/414] Update changelog --- CHANGES.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 65566adda1..c2fb982478 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,9 @@ Changes in synapse v0.17.0-rc1 (2016-07-28) This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details. +This release contains significant security bug fixes regarding authenticating +events received over federation. Please upgrade. + Features: @@ -35,6 +38,7 @@ Changes: (PR #927) * Add metrics for psutil derived memory usage (PR #936) * Record device_id in client_ips (PR #938) +* Send the correct host header when fetching keys (PR #941) * Log the hostname the reCAPTCHA was completed on (PR #946) * Make the device id on e2e key upload optional (PR #956) * Add r0.2.0 to the "supported versions" list (PR #960) From 7861cfec0aaed29b4bea0aab8fe7e89c7f23adcb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 14:35:05 +0100 Subject: [PATCH 320/414] Add authors to changelog --- CHANGES.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c2fb982478..03668370a9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -12,7 +12,7 @@ Features: * Add purge_media_cache admin API (PR #902) * Add deactivate account admin API (PR #903) -* Add optional pepper to password hashing (PR #907, #910) +* Add optional pepper to password hashing (PR #907, #910 by KentShikama) * Add an admin option to shared secret registration (breaks backwards compat) (PR #909) * Add purge local room history API (PR #911, #923, #924) @@ -28,14 +28,15 @@ Features: Changes: -* Rewrite LDAP Authentication against ldap3 (PR #843) +* Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt) * Linearize some federation endpoints based on (origin, room_id) (PR #879) * Remove the legacy v0 content upload API. (PR #888) * Use similar naming we use in email notifs for push (PR #894) -* Optionally include password hash in createUser endpoint (PR #905) +* Optionally include password hash in createUser endpoint (PR #905 by + KentShikama) * Use a query that postgresql optimises better for get_events_around (PR #906) * Fall back to 'username' if 'user' is not given for appservice registration. - (PR #927) + (PR #927 by Half-Shot) * Add metrics for psutil derived memory usage (PR #936) * Record device_id in client_ips (PR #938) * Send the correct host header when fetching keys (PR #941) From 367b594183c553436bb0338e9f26e42fa46424dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 14:56:09 +0100 Subject: [PATCH 321/414] Add some basic admin API docs --- docs/admin_api/README.rst | 12 ++++++++++++ docs/admin_api/purge_history_api.rst | 15 +++++++++++++++ docs/admin_api/purge_remote_media.rst | 19 +++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 docs/admin_api/README.rst create mode 100644 docs/admin_api/purge_history_api.rst create mode 100644 docs/admin_api/purge_remote_media.rst diff --git a/docs/admin_api/README.rst b/docs/admin_api/README.rst new file mode 100644 index 0000000000..d4f564cfae --- /dev/null +++ b/docs/admin_api/README.rst @@ -0,0 +1,12 @@ +Admin APIs +========== + +This directory includes documentation for the various synapse specific admin +APIs available. + +Only users that are server admins can use these APIs. A user can be marked as a +server admin by updating the database directly, e.g.: + +``UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'`` + +Restarting may be required for the changes to register. diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst new file mode 100644 index 0000000000..986efe40f9 --- /dev/null +++ b/docs/admin_api/purge_history_api.rst @@ -0,0 +1,15 @@ +Purge History API +================= + +The purge history API allows server admins to purge historic events from their +database, reclaiming disk space. + +Depending on the amount of history being purged a call to the API may take +several minutes or longer. During this period users will not be able to +paginate further back in the room from the point being purged from. + +The API is simply: + +``POST /_matrix/client/r0/admin/purge_history//`` + +including an ``access_token`` of a server admin. diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst new file mode 100644 index 0000000000..749ed1b2b9 --- /dev/null +++ b/docs/admin_api/purge_remote_media.rst @@ -0,0 +1,19 @@ +Purge Remote Media API +====================== + +The purge remote media API allows server admins to purge old cached remote +media. + +The API is:: + + POST /_matrix/client/r0/admin/purge_history/ + + { + "before_ts": + } + +Which will remove all cached media that was last accessed before +````. + +If the user re-requests purged remote media, synapse will re-request the media +from the originating server. From 3c3246c078134124610afa40ec55626568c5627c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 15:08:37 +0100 Subject: [PATCH 322/414] Use correct path --- docs/admin_api/purge_remote_media.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst index 749ed1b2b9..b26c6a9e7b 100644 --- a/docs/admin_api/purge_remote_media.rst +++ b/docs/admin_api/purge_remote_media.rst @@ -6,7 +6,7 @@ media. The API is:: - POST /_matrix/client/r0/admin/purge_history/ + POST /_matrix/client/r0/admin/purge_media_cache { "before_ts": From 6ede23ff1b956e72b3a2864e85accb8c05fff6f0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jul 2016 15:51:43 +0100 Subject: [PATCH 323/414] Add more key storage funcs into slave store --- synapse/replication/slave/storage/keys.py | 16 +++++++---- synapse/storage/keys.py | 34 +++++++++++------------ 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index c1c895439d..dd2ae49e48 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -13,17 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from ._base import BaseSlavedStore from synapse.storage import DataStore from synapse.storage.keys import KeyStore class SlavedKeyStore(BaseSlavedStore): - # TODO: use the cached version and invalidate deleted tokens - get_all_server_verify_keys = defer.inlineCallbacks(KeyStore.__dict__[ - "get_all_server_verify_keys" - ].orig) + _get_server_verify_key = KeyStore.__dict__[ + "_get_server_verify_key" + ] get_server_verify_keys = DataStore.get_server_verify_keys.__func__ + store_server_verify_key = DataStore.store_server_verify_key.__func__ + + get_server_certificate = DataStore.get_server_certificate.__func__ + store_server_certificate = DataStore.store_server_certificate.__func__ + + get_server_keys_json = DataStore.get_server_keys_json.__func__ + store_server_keys_json = DataStore.store_server_keys_json.__func__ diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 1195efec08..86b37b9ddd 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -78,22 +78,22 @@ class KeyStore(SQLBaseStore): ) @cachedInlineCallbacks() - def get_all_server_verify_keys(self, server_name): - rows = yield self._simple_select_list( + def _get_server_verify_key(self, server_name, key_id): + verify_key_bytes = yield self._simple_select_one_onecol( table="server_signature_keys", keyvalues={ "server_name": server_name, + "key_id": key_id, }, - retcols=["key_id", "verify_key"], - desc="get_all_server_verify_keys", + retcol="verify_key", + desc="_get_server_verify_key", + allow_none=True, ) - defer.returnValue({ - row["key_id"]: decode_verify_key_bytes( - row["key_id"], str(row["verify_key"]) - ) - for row in rows - }) + if verify_key_bytes: + defer.returnValue(decode_verify_key_bytes( + key_id, str(verify_key_bytes) + )) @defer.inlineCallbacks def get_server_verify_keys(self, server_name, key_ids): @@ -105,12 +105,12 @@ class KeyStore(SQLBaseStore): Returns: (list of VerifyKey): The verification keys. """ - keys = yield self.get_all_server_verify_keys(server_name) - defer.returnValue({ - k: keys[k] - for k in key_ids - if k in keys and keys[k] - }) + keys = {} + for key_id in key_ids: + key = yield self._get_server_verify_key(server_name, key_id) + if key: + keys[key_id] = key + defer.returnValue(keys) @defer.inlineCallbacks def store_server_verify_key(self, server_name, from_server, time_now_ms, @@ -137,8 +137,6 @@ class KeyStore(SQLBaseStore): desc="store_server_verify_key", ) - self.get_all_server_verify_keys.invalidate((server_name,)) - def store_server_keys_json(self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes): """Stores the JSON bytes for a set of keys from a server From 1e2740caabe348e4131fe6bd2d777fc7483909a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 16:08:33 +0100 Subject: [PATCH 324/414] Handle the case of missing auth events when joining a room --- synapse/handlers/federation.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3f138daf17..cab7efb5db 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -124,7 +124,7 @@ class FederationHandler(BaseHandler): try: event_stream_id, max_stream_id = yield self._persist_auth_tree( - auth_chain, state, event + origin, auth_chain, state, event ) except AuthError as e: raise FederationError( @@ -637,7 +637,7 @@ class FederationHandler(BaseHandler): pass event_stream_id, max_stream_id = yield self._persist_auth_tree( - auth_chain, state, event + origin, auth_chain, state, event ) with PreserveLoggingContext(): @@ -1155,7 +1155,7 @@ class FederationHandler(BaseHandler): ) @defer.inlineCallbacks - def _persist_auth_tree(self, auth_events, state, event): + def _persist_auth_tree(self, origin, auth_events, state, event): """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event seperately. @@ -1172,7 +1172,7 @@ class FederationHandler(BaseHandler): event_map = { e.event_id: e - for e in auth_events + for e in itertools.chain(auth_events, state, [event]) } create_event = None @@ -1181,10 +1181,29 @@ class FederationHandler(BaseHandler): create_event = e break + missing_auth_events = set() + for e in itertools.chain(auth_events, state, [event]): + for e_id, _ in e.auth_events: + if e_id not in event_map: + missing_auth_events.add(e_id) + + for e_id in missing_auth_events: + m_ev = yield self.replication_layer.get_pdu( + [origin], + e_id, + outlier=True, + timeout=10000, + ) + if m_ev and m_ev.event_id == e_id: + event_map[e_id] = m_ev + else: + logger.info("Failed to find auth event %r", e_id) + for e in itertools.chain(auth_events, state, [event]): auth_for_e = { (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id] for e_id, _ in e.auth_events + if e_id in event_map } if create_event: auth_for_e[(EventTypes.Create, "")] = create_event From 0fcbca531f448e3cef50074404cbf7af457105f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 16:36:28 +0100 Subject: [PATCH 325/414] Add get_auth_chain to slave store --- synapse/replication/slave/storage/events.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 2ba1e6b803..fcd0f14a6c 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -147,6 +147,10 @@ class SlavedEventStore(BaseSlavedStore): get_missing_events = DataStore.get_missing_events.__func__ _get_missing_events = DataStore._get_missing_events.__func__ + get_auth_chain = DataStore.get_auth_chain.__func__ + get_auth_chain_ids = DataStore.get_auth_chain_ids.__func__ + _get_auth_chain_ids_txn = DataStore._get_auth_chain_ids_txn.__func__ + def stream_positions(self): result = super(SlavedEventStore, self).stream_positions() result["events"] = self._stream_id_gen.get_current_token() From 370135ad0b7cf7ded04e9f2ca0c99f5470f5efc1 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Thu, 28 Jul 2016 16:47:37 +0100 Subject: [PATCH 326/414] Comment get_unread_push_actions_for_user_in_range function --- synapse/storage/event_push_actions.py | 28 +++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 3d93285f84..958dbcc22b 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -119,9 +119,28 @@ class EventPushActionsStore(SQLBaseStore): @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range(self, user_id, min_stream_ordering, - max_stream_ordering=None, + max_stream_ordering, limit=20): + """Get a list of the most recent unread push actions for a given user, + within the given stream ordering range. + + Args: + user_id (str) + min_stream_ordering + max_stream_ordering + limit (int) + Returns: + A promise which resolves to a list of dicts with the keys "event_id", + "room_id", "stream_ordering", "actions", "received_ts". + The list will have between 0~limit entries. + """ + # find rooms that have a read receipt in them and return the most recent + # push actions def get_after_receipt(txn): + # XXX: Do we really need to GROUP BY user_id on the inner SELECT? + # XXX: NATURAL JOIN obfuscates which columns are being joined on the + # inner SELECT (the room_id and event_id), can we + # INNER JOIN ... USING instead? sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, " "e.received_ts " @@ -160,7 +179,12 @@ class EventPushActionsStore(SQLBaseStore): "get_unread_push_actions_for_user_in_range", get_after_receipt ) + # There are rooms with push actions in them but you don't have a read receipt in + # them e.g. rooms you've been invited to, so get push actions for rooms which do + # not have read receipts in them too. def get_no_receipt(txn): + # XXX: Does the inner SELECT really need to select from the events table? + # We're just extracting the room_id, so isn't receipts_linearized enough? sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," " e.received_ts" @@ -198,7 +222,7 @@ class EventPushActionsStore(SQLBaseStore): # Now sort it so it's ordered correctly, since currently it will # contain results from the first query, correctly ordered, followed # by results from the second query, but we want them all ordered - # by received_ts + # by received_ts (most recent first) notifs.sort(key=lambda r: -(r['received_ts'] or 0)) # Now return the first `limit` From 76b89d0edb9df7c5d8b595b85ff895367631fdf2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 17:03:40 +0100 Subject: [PATCH 327/414] Add slace storage functions for public room list --- synapse/app/federation_reader.py | 4 ++++ .../replication/slave/storage/directory.py | 23 +++++++++++++++++++ synapse/replication/slave/storage/room.py | 21 +++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 synapse/replication/slave/storage/directory.py create mode 100644 synapse/replication/slave/storage/room.py diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 98a18f9b3d..2e5ba09014 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -24,6 +24,8 @@ from synapse.metrics.resource import MetricsResource, METRICS_PREFIX from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore +from synapse.replication.slave.storage.room import RoomStore +from synapse.replication.slave.storage.directory import DirectoryStore from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.async import sleep @@ -52,6 +54,8 @@ logger = logging.getLogger("synapse.app.federation_reader") class FederationReaderSlavedStore( SlavedEventStore, SlavedKeyStore, + RoomStore, + DirectoryStore, BaseSlavedStore, ): pass diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py new file mode 100644 index 0000000000..5fbe3a303a --- /dev/null +++ b/synapse/replication/slave/storage/directory.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from synapse.storage.directory import DirectoryStore + + +class DirectoryStore(BaseSlavedStore): + get_aliases_for_room = DirectoryStore.__dict__[ + "get_aliases_for_room" + ].orig diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py new file mode 100644 index 0000000000..d5bb0f98ea --- /dev/null +++ b/synapse/replication/slave/storage/room.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import BaseSlavedStore +from synapse.storage import DataStore + + +class RoomStore(BaseSlavedStore): + get_public_room_ids = DataStore.get_public_room_ids.__func__ From ec8b217722be15fe110be77c7c7909a7758202cb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jul 2016 17:35:53 +0100 Subject: [PATCH 328/414] Add destination retry to slave store --- synapse/app/federation_reader.py | 2 ++ .../replication/slave/storage/transactions.py | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 synapse/replication/slave/storage/transactions.py diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 2e5ba09014..58d425f9ac 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -25,6 +25,7 @@ from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.room import RoomStore +from synapse.replication.slave.storage.transactions import TransactionStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.server import HomeServer from synapse.storage.engines import create_engine @@ -56,6 +57,7 @@ class FederationReaderSlavedStore( SlavedKeyStore, RoomStore, DirectoryStore, + TransactionStore, BaseSlavedStore, ): pass diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py new file mode 100644 index 0000000000..6f2ba98af5 --- /dev/null +++ b/synapse/replication/slave/storage/transactions.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer +from ._base import BaseSlavedStore +from synapse.storage import DataStore +from synapse.storage.transactions import TransactionStore + + +class TransactionStore(BaseSlavedStore): + get_destination_retry_timings = TransactionStore.__dict__[ + "get_destination_retry_timings" + ].orig + _get_destination_retry_timings = DataStore._get_destination_retry_timings.__func__ + + # For now, don't record the destination rety timings + def set_destination_retry_timings(*args, **kwargs): + return defer.succeed(None) From 0a7d3cd00f8b7e3ad0ba458c3ab9b40a2496545b Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Thu, 28 Jul 2016 20:24:24 +0100 Subject: [PATCH 329/414] Create separate methods for getting messages to push for the email and http pushers rather than trying to make a single method that will work with their conflicting requirements. The http pusher needs to get the messages in ascending stream order, and doesn't want to miss a message. The email pusher needs to get the messages in descending timestamp order, and doesn't mind if it misses messages. --- synapse/push/emailpusher.py | 5 +- synapse/push/httppusher.py | 3 +- synapse/replication/slave/storage/events.py | 7 +- synapse/storage/event_push_actions.py | 199 +++++++++++++++----- tests/storage/test_event_push_actions.py | 41 ++++ 5 files changed, 204 insertions(+), 51 deletions(-) create mode 100644 tests/storage/test_event_push_actions.py diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 12a3ec7fd8..e224b68291 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -140,9 +140,8 @@ class EmailPusher(object): being run. """ start = 0 if INCLUDE_ALL_UNREAD_NOTIFS else self.last_stream_ordering - unprocessed = yield self.store.get_unread_push_actions_for_user_in_range( - self.user_id, start, self.max_stream_ordering - ) + fn = self.store.get_unread_push_actions_for_user_in_range_for_email + unprocessed = yield fn(self.user_id, start, self.max_stream_ordering) soonest_due_at = None diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 2acc6cc214..9a7db61220 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -141,7 +141,8 @@ class HttpPusher(object): run once per pusher. """ - unprocessed = yield self.store.get_unread_push_actions_for_user_in_range( + fn = self.store.get_unread_push_actions_for_user_in_range_for_http + unprocessed = yield fn( self.user_id, self.last_stream_ordering, self.max_stream_ordering ) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 369d839464..6a644f1386 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -93,8 +93,11 @@ class SlavedEventStore(BaseSlavedStore): StreamStore.__dict__["get_recent_event_ids_for_room"] ) - get_unread_push_actions_for_user_in_range = ( - DataStore.get_unread_push_actions_for_user_in_range.__func__ + get_unread_push_actions_for_user_in_range_for_http = ( + DataStore.get_unread_push_actions_for_user_in_range_for_http.__func__ + ) + get_unread_push_actions_for_user_in_range_for_email = ( + DataStore.get_unread_push_actions_for_user_in_range_for_email.__func__ ) get_push_action_users_in_range = ( DataStore.get_push_action_users_in_range.__func__ diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 958dbcc22b..5ab362bef2 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -117,40 +117,149 @@ class EventPushActionsStore(SQLBaseStore): defer.returnValue(ret) @defer.inlineCallbacks - def get_unread_push_actions_for_user_in_range(self, user_id, - min_stream_ordering, - max_stream_ordering, - limit=20): + def get_unread_push_actions_for_user_in_range_for_http( + self, user_id, min_stream_ordering, max_stream_ordering, limit=20 + ): """Get a list of the most recent unread push actions for a given user, - within the given stream ordering range. + within the given stream ordering range. Called by the httppusher. Args: - user_id (str) - min_stream_ordering - max_stream_ordering - limit (int) + user_id (str): The user to fetch push actions for. + min_stream_ordering(int): The exclusive lower bound on the + stream ordering of event push actions to fetch. + max_stream_ordering(int): The inclusive upper bound on the + stream ordering of event push actions to fetch. + limit (int): The maximum number of rows to return. + Returns: + A promise which resolves to a list of dicts with the keys "event_id", + "room_id", "stream_ordering", "actions". + The list will be ordered by ascending stream_ordering. + The list will have between 0~limit entries. + """ + # find rooms that have a read receipt in them and return the next + # push actions + def get_after_receipt(txn): + # find rooms that have a read receipt in them and return the next + # push actions + sql = ( + "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions" + " FROM (" + " SELECT room_id," + " MAX(topological_ordering) as topological_ordering," + " MAX(stream_ordering) as stream_ordering" + " FROM events" + " INNER JOIN receipts_linearized USING (room_id, event_id)" + " WHERE receipt_type = 'm.read' AND user_id = ?" + " GROUP BY room_id" + ") AS rl," + " event_push_actions AS ep" + " WHERE" + " ep.room_id = rl.room_id" + " AND (" + " ep.topological_ordering > rl.topological_ordering" + " OR (" + " ep.topological_ordering = rl.topological_ordering" + " AND ep.stream_ordering > rl.stream_ordering" + " )" + " )" + " AND ep.user_id = ?" + " AND ep.stream_ordering > ?" + " AND ep.stream_ordering <= ?" + " ORDER BY ep.stream_ordering ASC LIMIT ?" + ) + args = [ + user_id, user_id, + min_stream_ordering, max_stream_ordering, limit, + ] + txn.execute(sql, args) + return txn.fetchall() + after_read_receipt = yield self.runInteraction( + "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt + ) + + # There are rooms with push actions in them but you don't have a read receipt in + # them e.g. rooms you've been invited to, so get push actions for rooms which do + # not have read receipts in them too. + def get_no_receipt(txn): + sql = ( + "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," + " e.received_ts" + " FROM event_push_actions AS ep" + " INNER JOIN events AS e USING (room_id, event_id)" + " WHERE" + " ep.room_id NOT IN (" + " SELECT room_id FROM receipts_linearized" + " WHERE receipt_type = 'm.read' AND user_id = ?" + " GROUP BY room_id" + " )" + " AND ep.user_id = ?" + " AND ep.stream_ordering > ?" + " AND ep.stream_ordering <= ?" + " ORDER BY ep.stream_ordering ASC LIMIT ?" + ) + args = [ + user_id, user_id, + min_stream_ordering, max_stream_ordering, limit, + ] + txn.execute(sql, args) + return txn.fetchall() + no_read_receipt = yield self.runInteraction( + "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt + ) + + notifs = [ + { + "event_id": row[0], + "room_id": row[1], + "stream_ordering": row[2], + "actions": json.loads(row[3]), + } for row in after_read_receipt + no_read_receipt + ] + + # Now sort it so it's ordered correctly, since currently it will + # contain results from the first query, correctly ordered, followed + # by results from the second query, but we want them all ordered + # by stream_ordering, oldest first. + notifs.sort(key=lambda r: r['stream_ordering']) + + # Take only up to the limit. We have to stop at the limit because + # one of the subqueries may have hit the limit. + defer.returnValue(notifs[:limit]) + + @defer.inlineCallbacks + def get_unread_push_actions_for_user_in_range_for_email( + self, user_id, min_stream_ordering, max_stream_ordering, limit=20 + ): + """Get a list of the most recent unread push actions for a given user, + within the given stream ordering range. Called by the emailpusher + + Args: + user_id (str): The user to fetch push actions for. + min_stream_ordering(int): The exclusive lower bound on the + stream ordering of event push actions to fetch. + max_stream_ordering(int): The inclusive upper bound on the + stream ordering of event push actions to fetch. + limit (int): The maximum number of rows to return. Returns: A promise which resolves to a list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions", "received_ts". + The list will be ordered by descending received_ts. The list will have between 0~limit entries. """ # find rooms that have a read receipt in them and return the most recent # push actions def get_after_receipt(txn): - # XXX: Do we really need to GROUP BY user_id on the inner SELECT? - # XXX: NATURAL JOIN obfuscates which columns are being joined on the - # inner SELECT (the room_id and event_id), can we - # INNER JOIN ... USING instead? sql = ( - "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, " - "e.received_ts " - "FROM (" - " SELECT room_id, user_id, " - " max(topological_ordering) as topological_ordering, " - " max(stream_ordering) as stream_ordering " - " FROM events" - " NATURAL JOIN receipts_linearized WHERE receipt_type = 'm.read'" - " GROUP BY room_id, user_id" + "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," + " e.received_ts" + " FROM (" + " SELECT room_id," + " MAX(topological_ordering) as topological_ordering," + " MAX(stream_ordering) as stream_ordering" + " FROM events" + " INNER JOIN receipts_linearized USING (room_id, event_id)" + " WHERE receipt_type = 'm.read' AND user_id = ?" + " GROUP BY room_id" ") AS rl," " event_push_actions AS ep" " INNER JOIN events AS e USING (room_id, event_id)" @@ -165,47 +274,47 @@ class EventPushActionsStore(SQLBaseStore): " )" " AND ep.stream_ordering > ?" " AND ep.user_id = ?" - " AND ep.user_id = rl.user_id" + " AND ep.stream_ordering <= ?" + " ORDER BY ep.stream_ordering DESC LIMIT ?" ) - args = [min_stream_ordering, user_id] - if max_stream_ordering is not None: - sql += " AND ep.stream_ordering <= ?" - args.append(max_stream_ordering) - sql += " ORDER BY ep.stream_ordering DESC LIMIT ?" - args.append(limit) + args = [ + user_id, user_id, + min_stream_ordering, max_stream_ordering, limit, + ] txn.execute(sql, args) return txn.fetchall() after_read_receipt = yield self.runInteraction( - "get_unread_push_actions_for_user_in_range", get_after_receipt + "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt ) # There are rooms with push actions in them but you don't have a read receipt in # them e.g. rooms you've been invited to, so get push actions for rooms which do # not have read receipts in them too. def get_no_receipt(txn): - # XXX: Does the inner SELECT really need to select from the events table? - # We're just extracting the room_id, so isn't receipts_linearized enough? sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," " e.received_ts" " FROM event_push_actions AS ep" - " JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id" - " WHERE ep.room_id not in (" - " SELECT room_id FROM events NATURAL JOIN receipts_linearized" - " WHERE receipt_type = 'm.read' AND user_id = ?" - " GROUP BY room_id" - ") AND ep.user_id = ? AND ep.stream_ordering > ?" + " INNER JOIN events AS e USING (room_id, event_id)" + " WHERE" + " ep.room_id NOT IN (" + " SELECT room_id FROM receipts_linearized" + " WHERE receipt_type = 'm.read' AND user_id = ?" + " GROUP BY room_id" + " )" + " AND ep.user_id = ?" + " AND ep.stream_ordering > ?" + " AND ep.stream_ordering <= ?" + " ORDER BY ep.stream_ordering DESC LIMIT ?" ) - args = [user_id, user_id, min_stream_ordering] - if max_stream_ordering is not None: - sql += " AND ep.stream_ordering <= ?" - args.append(max_stream_ordering) - sql += " ORDER BY ep.stream_ordering DESC LIMIT ?" - args.append(limit) + args = [ + user_id, user_id, + min_stream_ordering, max_stream_ordering, limit, + ] txn.execute(sql, args) return txn.fetchall() no_read_receipt = yield self.runInteraction( - "get_unread_push_actions_for_user_in_range", get_no_receipt + "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt ) # Make a list of dicts from the two sets of results. diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py new file mode 100644 index 0000000000..e9044afa2e --- /dev/null +++ b/tests/storage/test_event_push_actions.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +import tests.unittest +import tests.utils + +USER_ID = "@user:example.com" + + +class EventPushActionsStoreTestCase(tests.unittest.TestCase): + + @defer.inlineCallbacks + def setUp(self): + hs = yield tests.utils.setup_test_homeserver() + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def test_get_unread_push_actions_for_user_in_range_for_http(self): + yield self.store.get_unread_push_actions_for_user_in_range_for_http( + USER_ID, 0, 1000, 20 + ) + + @defer.inlineCallbacks + def test_get_unread_push_actions_for_user_in_range_for_email(self): + yield self.store.get_unread_push_actions_for_user_in_range_for_email( + USER_ID, 0, 1000, 20 + ) From 8dad08a9509103f38d9eec5dc28d46e4a757fad8 Mon Sep 17 00:00:00 2001 From: Mark Haines Date: Fri, 29 Jul 2016 09:57:13 +0100 Subject: [PATCH 330/414] Fix SQL to supply arguments in the same order --- synapse/storage/event_push_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index 5ab362bef2..df4000d0da 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -272,8 +272,8 @@ class EventPushActionsStore(SQLBaseStore): " AND ep.stream_ordering > rl.stream_ordering" " )" " )" - " AND ep.stream_ordering > ?" " AND ep.user_id = ?" + " AND ep.stream_ordering > ?" " AND ep.stream_ordering <= ?" " ORDER BY ep.stream_ordering DESC LIMIT ?" ) From 3d13c3a2952263c38111fcf95d625e316416b52b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jul 2016 10:45:05 +0100 Subject: [PATCH 331/414] Update docstring --- synapse/handlers/federation.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cab7efb5db..9583629388 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1160,6 +1160,12 @@ class FederationHandler(BaseHandler): state and event. Then persists the auth chain and state atomically. Persists the event seperately. + Args: + origin (str): Where the events came from + auth_events (list) + state (list) + event (Event) + Returns: 2-tuple of (event_stream_id, max_stream_id) from the persist_event call for `event` From c51a52f3002abf4597952e07759c6ab3016e3497 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jul 2016 11:17:04 +0100 Subject: [PATCH 332/414] Mention that func will fetch auth events --- synapse/handlers/federation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9583629388..1323235b62 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1160,6 +1160,8 @@ class FederationHandler(BaseHandler): state and event. Then persists the auth chain and state atomically. Persists the event seperately. + Will attempt to fetch missing auth events. + Args: origin (str): Where the events came from auth_events (list) From 74106ba17177db837bea06c35b39dbf1adc75648 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jul 2016 11:45:03 +0100 Subject: [PATCH 333/414] Make jenkins dendron test federation read apis --- jenkins-dendron-postgres.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 50268e0982..9e3b2df9cf 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -82,6 +82,7 @@ echo >&2 "Running sytest with PostgreSQL"; --dendron $WORKSPACE/dendron/bin/dendron \ --pusher \ --synchrotron \ + --federation-reader \ --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) cd .. From 271d3e78652ef7a477af2b058bdd7c13e4816076 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 29 Jul 2016 15:25:24 +0100 Subject: [PATCH 334/414] Fix adding emails on registration Synapse was not adding email addresses to accounts registered with an email address, due to too many different variables called 'result'. Rename both of them. Also remove the defer.returnValue() with no params because that's not a thing. --- synapse/rest/client/v2_alpha/register.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 9f599ea8bb..943f5676a3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -196,12 +196,12 @@ class RegisterRestServlet(RestServlet): [LoginType.EMAIL_IDENTITY] ] - authed, result, params, session_id = yield self.auth_handler.check_auth( + authed, auth_result, params, session_id = yield self.auth_handler.check_auth( flows, body, self.hs.get_ip_from_request(request) ) if not authed: - defer.returnValue((401, result)) + defer.returnValue((401, auth_result)) return if registered_user_id is not None: @@ -236,18 +236,18 @@ class RegisterRestServlet(RestServlet): add_email = True - result = yield self._create_registration_details( + return_dict = yield self._create_registration_details( registered_user_id, params ) - if add_email and result and LoginType.EMAIL_IDENTITY in result: - threepid = result[LoginType.EMAIL_IDENTITY] + if add_email and auth_result and LoginType.EMAIL_IDENTITY in auth_result: + threepid = auth_result[LoginType.EMAIL_IDENTITY] yield self._register_email_threepid( - registered_user_id, threepid, result["access_token"], + registered_user_id, threepid, return_dict["access_token"], params.get("bind_email") ) - defer.returnValue((200, result)) + defer.returnValue((200, return_dict)) def on_OPTIONS(self, _): return 200, {} @@ -356,8 +356,6 @@ class RegisterRestServlet(RestServlet): else: logger.info("bind_email not specified: not binding email") - defer.returnValue() - @defer.inlineCallbacks def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user From b260f92936e7e80ee9885755d608d58ffb9101ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Sun, 31 Jul 2016 15:30:13 +0100 Subject: [PATCH 335/414] Ignore AlreadyCalled errors on timer cancel --- synapse/push/emailpusher.py | 12 ++++++++++-- synapse/push/httppusher.py | 7 ++++++- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index e224b68291..6600c9cd55 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -14,6 +14,7 @@ # limitations under the License. from twisted.internet import defer, reactor +from twisted.internet.error import AlreadyCalled, AlreadyCancelled import logging @@ -92,7 +93,11 @@ class EmailPusher(object): def on_stop(self): if self.timed_call: - self.timed_call.cancel() + try: + self.timed_call.cancel() + except (AlreadyCalled, AlreadyCancelled): + pass + self.timed_call = None @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): @@ -189,7 +194,10 @@ class EmailPusher(object): soonest_due_at = should_notify_at if self.timed_call is not None: - self.timed_call.cancel() + try: + self.timed_call.cancel() + except (AlreadyCalled, AlreadyCancelled): + pass self.timed_call = None if soonest_due_at is not None: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 9a7db61220..feedb075e2 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -16,6 +16,7 @@ from synapse.push import PusherConfigException from twisted.internet import defer, reactor +from twisted.internet.error import AlreadyCalled, AlreadyCancelled import logging import push_rule_evaluator @@ -109,7 +110,11 @@ class HttpPusher(object): def on_stop(self): if self.timed_call: - self.timed_call.cancel() + try: + self.timed_call.cancel() + except (AlreadyCalled, AlreadyCancelled): + pass + self.timed_call = None @defer.inlineCallbacks def _process(self): From bfeaab6dfc84adc38e5990a7f26c5b7148606a28 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 1 Aug 2016 17:12:02 +0100 Subject: [PATCH 336/414] missing --upgrade --- UPGRADE.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index 699f04c2c2..9f044719a0 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -27,7 +27,7 @@ running: # Pull the latest version of the master branch. git pull # Update the versions of synapse's python dependencies. - python synapse/python_dependencies.py | xargs -n1 pip install + python synapse/python_dependencies.py | xargs -n1 pip install --upgrade Upgrading to v0.15.0 From 986615b0b21271959adb9d64291761244e4175bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Aug 2016 18:02:07 +0100 Subject: [PATCH 337/414] Move e2e query logic into a handler --- synapse/handlers/e2e_keys.py | 67 ++++++++++++++++++++++++++++ synapse/rest/client/v2_alpha/keys.py | 46 +++---------------- synapse/server.py | 65 ++++++++++++++------------- synapse/server.pyi | 4 ++ 4 files changed, 112 insertions(+), 70 deletions(-) create mode 100644 synapse/handlers/e2e_keys.py diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py new file mode 100644 index 0000000000..73a14cf952 --- /dev/null +++ b/synapse/handlers/e2e_keys.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging + +from twisted.internet import defer + +import synapse.types +from ._base import BaseHandler + +logger = logging.getLogger(__name__) + + +class E2eKeysHandler(BaseHandler): + def __init__(self, hs): + super(E2eKeysHandler, self).__init__(hs) + self.store = hs.get_datastore() + self.federation = hs.get_replication_layer() + self.is_mine = hs.is_mine + + @defer.inlineCallbacks + def query_devices(self, query_body): + local_query = [] + remote_queries = {} + for user_id, device_ids in query_body.get("device_keys", {}).items(): + user = synapse.types.UserID.from_string(user_id) + if self.is_mine(user): + if not device_ids: + local_query.append((user_id, None)) + else: + for device_id in device_ids: + local_query.append((user_id, device_id)) + else: + remote_queries.setdefault(user.domain, {})[user_id] = list( + device_ids + ) + results = yield self.store.get_e2e_device_keys(local_query) + + json_result = {} + for user_id, device_keys in results.items(): + for device_id, json_bytes in device_keys.items(): + json_result.setdefault(user_id, {})[ + device_id] = json.loads( + json_bytes + ) + + for destination, device_keys in remote_queries.items(): + remote_result = yield self.federation.query_client_keys( + destination, {"device_keys": device_keys} + ) + for user_id, keys in remote_result["device_keys"].items(): + if user_id in device_keys: + json_result[user_id] = keys + defer.returnValue((200, {"device_keys": json_result})) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index dc1d4d8fc6..705a0b6c17 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -186,17 +186,19 @@ class KeyQueryServlet(RestServlet): ) def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): + """ super(KeyQueryServlet, self).__init__() - self.store = hs.get_datastore() self.auth = hs.get_auth() - self.federation = hs.get_replication_layer() - self.is_mine = hs.is_mine + self.e2e_keys_handler = hs.get_e2e_keys_handler() @defer.inlineCallbacks def on_POST(self, request, user_id, device_id): yield self.auth.get_user_by_req(request) body = parse_json_object_from_request(request) - result = yield self.handle_request(body) + result = yield self.e2e_keys_handler.query_devices(body) defer.returnValue(result) @defer.inlineCallbacks @@ -205,45 +207,11 @@ class KeyQueryServlet(RestServlet): auth_user_id = requester.user.to_string() user_id = user_id if user_id else auth_user_id device_ids = [device_id] if device_id else [] - result = yield self.handle_request( + result = yield self.e2e_keys_handler.query_devices( {"device_keys": {user_id: device_ids}} ) defer.returnValue(result) - @defer.inlineCallbacks - def handle_request(self, body): - local_query = [] - remote_queries = {} - for user_id, device_ids in body.get("device_keys", {}).items(): - user = UserID.from_string(user_id) - if self.is_mine(user): - if not device_ids: - local_query.append((user_id, None)) - else: - for device_id in device_ids: - local_query.append((user_id, device_id)) - else: - remote_queries.setdefault(user.domain, {})[user_id] = list( - device_ids - ) - results = yield self.store.get_e2e_device_keys(local_query) - - json_result = {} - for user_id, device_keys in results.items(): - for device_id, json_bytes in device_keys.items(): - json_result.setdefault(user_id, {})[device_id] = json.loads( - json_bytes - ) - - for destination, device_keys in remote_queries.items(): - remote_result = yield self.federation.query_client_keys( - destination, {"device_keys": device_keys} - ) - for user_id, keys in remote_result["device_keys"].items(): - if user_id in device_keys: - json_result[user_id] = keys - defer.returnValue((200, {"device_keys": json_result})) - class OneTimeKeyServlet(RestServlet): """ diff --git a/synapse/server.py b/synapse/server.py index e8b166990d..6bb4988309 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -19,39 +19,38 @@ # partial one for unit test mocking. # Imports required for the default HomeServer() implementation -from twisted.web.client import BrowserLikePolicyForHTTPS -from twisted.enterprise import adbapi - -from synapse.appservice.scheduler import ApplicationServiceScheduler -from synapse.appservice.api import ApplicationServiceApi -from synapse.federation import initialize_http_replication -from synapse.handlers.device import DeviceHandler -from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory -from synapse.notifier import Notifier -from synapse.api.auth import Auth -from synapse.handlers import Handlers -from synapse.handlers.presence import PresenceHandler -from synapse.handlers.sync import SyncHandler -from synapse.handlers.typing import TypingHandler -from synapse.handlers.room import RoomListHandler -from synapse.handlers.auth import AuthHandler -from synapse.handlers.appservice import ApplicationServicesHandler -from synapse.state import StateHandler -from synapse.storage import DataStore -from synapse.util import Clock -from synapse.util.distributor import Distributor -from synapse.streams.events import EventSources -from synapse.api.ratelimiting import Ratelimiter -from synapse.crypto.keyring import Keyring -from synapse.push.pusherpool import PusherPool -from synapse.events.builder import EventBuilderFactory -from synapse.api.filtering import Filtering -from synapse.rest.media.v1.media_repository import MediaRepository - -from synapse.http.matrixfederationclient import MatrixFederationHttpClient - import logging +from twisted.enterprise import adbapi +from twisted.web.client import BrowserLikePolicyForHTTPS + +from synapse.api.auth import Auth +from synapse.api.filtering import Filtering +from synapse.api.ratelimiting import Ratelimiter +from synapse.appservice.api import ApplicationServiceApi +from synapse.appservice.scheduler import ApplicationServiceScheduler +from synapse.crypto.keyring import Keyring +from synapse.events.builder import EventBuilderFactory +from synapse.federation import initialize_http_replication +from synapse.handlers import Handlers +from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.handlers.auth import AuthHandler +from synapse.handlers.device import DeviceHandler +from synapse.handlers.e2e_keys import E2eKeysHandler +from synapse.handlers.presence import PresenceHandler +from synapse.handlers.room import RoomListHandler +from synapse.handlers.sync import SyncHandler +from synapse.handlers.typing import TypingHandler +from synapse.http.client import SimpleHttpClient, InsecureInterceptableContextFactory +from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.notifier import Notifier +from synapse.push.pusherpool import PusherPool +from synapse.rest.media.v1.media_repository import MediaRepository +from synapse.state import StateHandler +from synapse.storage import DataStore +from synapse.streams.events import EventSources +from synapse.util import Clock +from synapse.util.distributor import Distributor logger = logging.getLogger(__name__) @@ -94,6 +93,7 @@ class HomeServer(object): 'room_list_handler', 'auth_handler', 'device_handler', + 'e2e_keys_handler', 'application_service_api', 'application_service_scheduler', 'application_service_handler', @@ -202,6 +202,9 @@ class HomeServer(object): def build_device_handler(self): return DeviceHandler(self) + def build_e2e_keys_handler(self): + return E2eKeysHandler(self) + def build_application_service_api(self): return ApplicationServiceApi(self) diff --git a/synapse/server.pyi b/synapse/server.pyi index 902f725c06..c0aa868c4f 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -1,6 +1,7 @@ import synapse.handlers import synapse.handlers.auth import synapse.handlers.device +import synapse.handlers.e2e_keys import synapse.storage import synapse.state @@ -14,6 +15,9 @@ class HomeServer(object): def get_device_handler(self) -> synapse.handlers.device.DeviceHandler: pass + def get_e2e_keys_handler(self) -> synapse.handlers.e2e_keys.E2eKeysHandler: + pass + def get_handlers(self) -> synapse.handlers.Handlers: pass From 55e8a8788895b0c6b6b5a27d153f6d9e7e21d68b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 13:41:17 +0100 Subject: [PATCH 338/414] Change default jenkins port base and count --- jenkins-dendron-postgres.sh | 4 ++-- jenkins-postgres.sh | 4 ++-- jenkins-sqlite.sh | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh index 9e3b2df9cf..f715cd559a 100755 --- a/jenkins-dendron-postgres.sh +++ b/jenkins-dendron-postgres.sh @@ -69,8 +69,8 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PORT_BASE:=8000} -: ${PORT_COUNT=20} +: ${PORT_BASE:=20000} +: ${PORT_COUNT=100} ./jenkins/prep_sytest_for_postgres.sh diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh index 2f0768fcb7..7a43df0d58 100755 --- a/jenkins-postgres.sh +++ b/jenkins-postgres.sh @@ -43,8 +43,8 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PORT_BASE:=8000} -: ${PORT_COUNT=20} +: ${PORT_BASE:=20000} +: ${PORT_COUNT=100} ./jenkins/prep_sytest_for_postgres.sh diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh index da603c5af8..27e61af6ee 100755 --- a/jenkins-sqlite.sh +++ b/jenkins-sqlite.sh @@ -41,8 +41,9 @@ cd sytest git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop) -: ${PORT_COUNT=20} -: ${PORT_BASE:=8000} +: ${PORT_BASE:=20000} +: ${PORT_COUNT=100} + ./jenkins/install_and_run.sh --coverage \ --python $TOX_BIN/python \ --synapse-directory $WORKSPACE \ From fcde5b2a9782d1f49f56d0e8ce694e66eeb6c04f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 15:06:08 +0100 Subject: [PATCH 339/414] Print authorization header for federation_client.py --- scripts-dev/federation_client.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index ea62dceb36..caa3cee4e7 100644 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -116,11 +116,12 @@ def get_json(origin_name, origin_key, destination, path): authorization_headers = [] for key, sig in signed_json["signatures"][origin_name].items(): - authorization_headers.append(bytes( - "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( - origin_name, key, sig, - ) - )) + header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( + origin_name, key, sig, + ) + authorization_headers.append(bytes(header)) + sys.stderr.write(header) + sys.stderr.write("\n") result = requests.get( lookup(destination, path), From 8f650bd3381d8dbf9a41d2c09a37a036ba944724 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 15:43:52 +0100 Subject: [PATCH 340/414] Bump changeog and version --- CHANGES.rst | 17 +++++++++++++++++ synapse/__init__.py | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 03668370a9..7254385f7e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,20 @@ +Changes in synapse v0.17.0-rc2 (2016-08-02) +=========================================== + +Changes: + +* Forbid non-ASes from registering users whose names begin with '_' (PR #958) +* Add some basic admin API docs (PR #963) + + +Bug fixes: + +* Send the correct host header when fetching keys (PR #941) +* Fix joining a room that has missing auth events (PR #964) +* Fix various bush bugs (PR #966, #970) +* Fix adding emails on registration (PR #968) + + Changes in synapse v0.17.0-rc1 (2016-07-28) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 8f0176e182..9899944977 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.17.0-rc1" +__version__ = "0.17.0-rc2" From d199f2248ff2a0a460d526f08f06f33ef0df6f8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 15:48:43 +0100 Subject: [PATCH 341/414] Change wording --- CHANGES.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 7254385f7e..1030a213ad 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -21,8 +21,8 @@ Changes in synapse v0.17.0-rc1 (2016-07-28) This release changes the LDAP configuration format in a backwards incompatible way, see PR #843 for details. -This release contains significant security bug fixes regarding authenticating -events received over federation. Please upgrade. +The 0.17 release will contain significant security bug fixes regarding +authenticating events received over federation Features: From 456544b621adc5ee67b9458106b5ab4adf118dc5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 16:12:53 +0100 Subject: [PATCH 342/414] Typo --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1030a213ad..cacf4b8947 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -11,7 +11,7 @@ Bug fixes: * Send the correct host header when fetching keys (PR #941) * Fix joining a room that has missing auth events (PR #964) -* Fix various bush bugs (PR #966, #970) +* Fix various push bugs (PR #966, #970) * Fix adding emails on registration (PR #968) From b3d5c4ad9d0c6d858cae1c46bebf0c9442f0187b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 16:42:21 +0100 Subject: [PATCH 343/414] Fix response cache --- synapse/federation/federation_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d15c7e1b40..8f6955ac18 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -205,7 +205,7 @@ class FederationServer(FederationBase): result = self._state_resp_cache.get((room_id, event_id)) if not result: with (yield self._server_linearizer.queue((origin, room_id))): - resp = yield self.response_cache.set( + resp = yield self._state_resp_cache.set( (room_id, event_id), self._on_context_state_request_compute(room_id, event_id) ) From c9154b970c0af5eb19c43a401f44de95afd3f7de Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 16:45:53 +0100 Subject: [PATCH 344/414] Don't double wrap 200 --- synapse/federation/federation_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8f6955ac18..612d274bdb 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -235,10 +235,10 @@ class FederationServer(FederationBase): ) ) - defer.returnValue((200, { + defer.returnValue({ "pdus": [pdu.get_pdu_json() for pdu in pdus], "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], - })) + }) @defer.inlineCallbacks @log_function From 49e047c55ed76977afb2ee227d6052ed28166983 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 17:10:26 +0100 Subject: [PATCH 345/414] Bump version and changelog --- CHANGES.rst | 8 +++++++- synapse/__init__.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index cacf4b8947..d1c0a1b76a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -Changes in synapse v0.17.0-rc2 (2016-08-02) +Changes in synapse v0.17.0-rc3 (2016-08-02) =========================================== Changes: @@ -15,6 +15,12 @@ Bug fixes: * Fix adding emails on registration (PR #968) +Changes in synapse v0.17.0-rc2 (2016-08-02) +=========================================== + +(This release did not include the changes advertised and was identical to RC1) + + Changes in synapse v0.17.0-rc1 (2016-07-28) =========================================== diff --git a/synapse/__init__.py b/synapse/__init__.py index 9899944977..67231e8d97 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -16,4 +16,4 @@ """ This is a reference implementation of a Matrix home server. """ -__version__ = "0.17.0-rc2" +__version__ = "0.17.0-rc3" From 1efee2f52b931ddcd90e87d06c7ea614da2c9cd0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 2 Aug 2016 18:06:31 +0100 Subject: [PATCH 346/414] E2E keys: Make federation query share code with client query Refactor the e2e query handler to separate out the local query, and then make the federation handler use it. --- synapse/federation/federation_server.py | 20 +---- synapse/federation/transport/server.py | 4 +- synapse/handlers/e2e_keys.py | 115 ++++++++++++++++++------ 3 files changed, 92 insertions(+), 47 deletions(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 85f5e752fe..e637f2a8bd 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -348,27 +348,9 @@ class FederationServer(FederationBase): (200, send_content) ) - @defer.inlineCallbacks @log_function def on_query_client_keys(self, origin, content): - query = [] - for user_id, device_ids in content.get("device_keys", {}).items(): - if not device_ids: - query.append((user_id, None)) - else: - for device_id in device_ids: - query.append((user_id, device_id)) - - results = yield self.store.get_e2e_device_keys(query) - - json_result = {} - for user_id, device_keys in results.items(): - for device_id, json_bytes in device_keys.items(): - json_result.setdefault(user_id, {})[device_id] = json.loads( - json_bytes - ) - - defer.returnValue({"device_keys": json_result}) + return self.on_query_request("client_keys", content) @defer.inlineCallbacks @log_function diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 26fa88ae84..1a88413d18 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -367,10 +367,8 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet): class FederationClientKeysQueryServlet(BaseFederationServlet): PATH = "/user/keys/query" - @defer.inlineCallbacks def on_POST(self, origin, content, query): - response = yield self.handler.on_query_client_keys(origin, content) - defer.returnValue((200, response)) + return self.handler.on_query_client_keys(origin, content) class FederationClientKeysClaimServlet(BaseFederationServlet): diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 73a14cf952..9c7e9494d6 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -13,12 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import json import logging from twisted.internet import defer +from synapse.api import errors import synapse.types + from ._base import BaseHandler logger = logging.getLogger(__name__) @@ -29,39 +32,101 @@ class E2eKeysHandler(BaseHandler): super(E2eKeysHandler, self).__init__(hs) self.store = hs.get_datastore() self.federation = hs.get_replication_layer() - self.is_mine = hs.is_mine + self.is_mine_id = hs.is_mine_id + + # doesn't really work as part of the generic query API, because the + # query request requires an object POST, but we abuse the + # "query handler" interface. + self.federation.register_query_handler( + "client_keys", self.on_federation_query_client_keys + ) @defer.inlineCallbacks def query_devices(self, query_body): - local_query = [] - remote_queries = {} - for user_id, device_ids in query_body.get("device_keys", {}).items(): + """ Handle a device key query from a client + + { + "device_keys": { + "": [""] + } + } + -> + { + "device_keys": { + "": { + "": { + ... + } + } + } + } + """ + device_keys_query = query_body.get("device_keys", {}) + + # separate users by domain. + # make a map from domain to user_id to device_ids + queries_by_domain = collections.defaultdict(dict) + for user_id, device_ids in device_keys_query.items(): user = synapse.types.UserID.from_string(user_id) - if self.is_mine(user): - if not device_ids: - local_query.append((user_id, None)) - else: - for device_id in device_ids: - local_query.append((user_id, device_id)) + queries_by_domain[user.domain][user_id] = device_ids + + # do the queries + # TODO: do these in parallel + results = {} + for destination, destination_query in queries_by_domain.items(): + if destination == self.hs.hostname: + res = yield self.query_local_devices(destination_query) else: - remote_queries.setdefault(user.domain, {})[user_id] = list( - device_ids + res = yield self.federation.query_client_keys( + destination, {"device_keys": destination_query} ) + res = res["device_keys"] + for user_id, keys in res.items(): + if user_id in destination_query: + results[user_id] = keys + + defer.returnValue((200, {"device_keys": results})) + + @defer.inlineCallbacks + def query_local_devices(self, query): + """Get E2E device keys for local users + + Args: + query (dict[string, list[string]|None): map from user_id to a list + of devices to query (None for all devices) + + Returns: + defer.Deferred: (resolves to dict[string, dict[string, dict]]): + map from user_id -> device_id -> device details + """ + local_query = [] + + for user_id, device_ids in query.items(): + if not self.is_mine_id(user_id): + logger.warning("Request for keys for non-local user %s", + user_id) + raise errors.SynapseError(400, "Not a user here") + + if not device_ids: + local_query.append((user_id, None)) + else: + for device_id in device_ids: + local_query.append((user_id, device_id)) + results = yield self.store.get_e2e_device_keys(local_query) - json_result = {} + # un-jsonify the results + json_result = collections.defaultdict(dict) for user_id, device_keys in results.items(): for device_id, json_bytes in device_keys.items(): - json_result.setdefault(user_id, {})[ - device_id] = json.loads( - json_bytes - ) + json_result[user_id][device_id] = json.loads(json_bytes) - for destination, device_keys in remote_queries.items(): - remote_result = yield self.federation.query_client_keys( - destination, {"device_keys": device_keys} - ) - for user_id, keys in remote_result["device_keys"].items(): - if user_id in device_keys: - json_result[user_id] = keys - defer.returnValue((200, {"device_keys": json_result})) + defer.returnValue(json_result) + + @defer.inlineCallbacks + def on_federation_query_client_keys(self, query_body): + """ Handle a device key query from a federated server + """ + device_keys_query = query_body.get("device_keys", {}) + res = yield self.query_local_devices(device_keys_query) + defer.returnValue({"device_keys": res}) From aecaec3e104fc8aebb4f2e3e9ce29bb7dee4dc0c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Aug 2016 18:25:53 +0100 Subject: [PATCH 347/414] Change the way we summarize URLs Using XPath is slow on some machines (for unknown reasons), so use a different approach to get a list of text nodes. Try to generate a summary that respect paragraph and then word boundaries, adding ellipses when appropriate. --- synapse/rest/media/v1/preview_url_resource.py | 78 ++++++++++++++++--- 1 file changed, 67 insertions(+), 11 deletions(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 74c64f1371..ea46b8aa1b 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -29,6 +29,8 @@ from synapse.http.server import ( from synapse.util.async import ObservableDeferred from synapse.util.stringutils import is_ascii +from copy import deepcopy + import os import re import fnmatch @@ -329,20 +331,74 @@ class PreviewUrlResource(Resource): # ...or if they are within a