From 855af069a494f826ef941d722c811287b3fc4a8c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jan 2020 18:56:18 +0000 Subject: [PATCH 001/213] Fix instantiation of message retention purge jobs When figuring out which topological token to start a purge job at, we need to do the following: 1. Figure out a timestamp before which events will be purged 2. Select the first stream ordering after that timestamp 3. Select info about the first event after that stream ordering 4. Build a topological token from that info In some situations (e.g. quiet rooms with a short max_lifetime), there might not be an event after the stream ordering at step 3, therefore we abort the purge with the error `No event found`. To mitigate that, this patch fetches the first event _before_ the stream ordering, instead of after. --- synapse/handlers/pagination.py | 2 +- synapse/storage/data_stores/main/stream.py | 59 +++++++++++++++++----- 2 files changed, 48 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 00a6afc963..3ee6a091c5 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -156,7 +156,7 @@ class PaginationHandler(object): stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts) - r = yield self.store.get_room_event_after_stream_ordering( + r = yield self.store.get_room_event_before_stream_ordering( room_id, stream_ordering, ) if not r: diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 140da8dad6..223ce7fedb 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -536,20 +536,55 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Deferred[(int, int, str)]: (stream ordering, topological ordering, event_id) """ + return self.db.runInteraction( + "get_room_event_after_stream_ordering", + self.get_room_event_around_stream_ordering_txn, + room_id, stream_ordering, "f", + ) - def _f(txn): - sql = ( - "SELECT stream_ordering, topological_ordering, event_id" - " FROM events" - " WHERE room_id = ? AND stream_ordering >= ?" - " AND NOT outlier" - " ORDER BY stream_ordering" - " LIMIT 1" - ) - txn.execute(sql, (room_id, stream_ordering)) - return txn.fetchone() + def get_room_event_before_stream_ordering(self, room_id, stream_ordering): + """Gets details of the first event in a room at or before a stream ordering - return self.db.runInteraction("get_room_event_after_stream_ordering", _f) + Args: + room_id (str): + stream_ordering (int): + + Returns: + Deferred[(int, int, str)]: + (stream ordering, topological ordering, event_id) + """ + return self.db.runInteraction( + "get_room_event_before_stream_ordering", + self.get_room_event_around_stream_ordering_txn, + room_id, stream_ordering, "f", + ) + + def get_room_event_around_stream_ordering_txn( + self, txn, room_id, stream_ordering, dir="f" + ): + """Gets details of the first event in a room at or either after or before a + stream ordering, depending on the provided direction. + + Args: + room_id (str): + stream_ordering (int): + dir (str): Direction in which we're looking towards in the room's history, + either "f" (forward) or "b" (backward). + + Returns: + Deferred[(int, int, str)]: + (stream ordering, topological ordering, event_id) + """ + sql = ( + "SELECT stream_ordering, topological_ordering, event_id" + " FROM events" + " WHERE room_id = ? AND stream_ordering %s ?" + " AND NOT outlier" + " ORDER BY stream_ordering" + " LIMIT 1" + ) % ("<=" if dir == "b" else ">=",) + txn.execute(sql, (room_id, stream_ordering)) + return txn.fetchone() @defer.inlineCallbacks def get_room_events_max_id(self, room_id=None): From 83635882379ecddb1509ea3d071eefdedefb647e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jan 2020 19:13:22 +0000 Subject: [PATCH 002/213] Fix typo --- synapse/storage/data_stores/main/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 223ce7fedb..9fa5e1f203 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -556,7 +556,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return self.db.runInteraction( "get_room_event_before_stream_ordering", self.get_room_event_around_stream_ordering_txn, - room_id, stream_ordering, "f", + room_id, stream_ordering, "b", ) def get_room_event_around_stream_ordering_txn( From 066b9f52b80c172eec6074ca01fb24670200fd80 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jan 2020 19:32:47 +0000 Subject: [PATCH 003/213] Correctly order when selecting before stream ordering --- synapse/storage/data_stores/main/stream.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 9fa5e1f203..451f38296b 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -580,9 +580,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): " FROM events" " WHERE room_id = ? AND stream_ordering %s ?" " AND NOT outlier" - " ORDER BY stream_ordering" + " ORDER BY stream_ordering %s" " LIMIT 1" - ) % ("<=" if dir == "b" else ">=",) + ) % ( + "<=" if dir == "b" else ">=", + "DESC" if dir == "b" else "ASC", + ) txn.execute(sql, (room_id, stream_ordering)) return txn.fetchone() From 914e73cdd9053d6fd050e5ad04910db74a7b5cd9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jan 2020 19:36:19 +0000 Subject: [PATCH 004/213] Changelog --- changelog.d/6713.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6713.bugfix diff --git a/changelog.d/6713.bugfix b/changelog.d/6713.bugfix new file mode 100644 index 0000000000..3924f1ad79 --- /dev/null +++ b/changelog.d/6713.bugfix @@ -0,0 +1 @@ +Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies. From 48e57a6452be3fef4372832f9e8f8f630325a648 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jan 2020 19:40:46 +0000 Subject: [PATCH 005/213] Rename changelog --- changelog.d/{6713.bugfix => 6714.bugfix} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{6713.bugfix => 6714.bugfix} (100%) diff --git a/changelog.d/6713.bugfix b/changelog.d/6714.bugfix similarity index 100% rename from changelog.d/6713.bugfix rename to changelog.d/6714.bugfix From e601f35d3b562495b2f8b071bd4c812fd783d6a7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 Jan 2020 09:55:11 +0000 Subject: [PATCH 006/213] Lint --- synapse/storage/data_stores/main/stream.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 451f38296b..652cecd59b 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -539,7 +539,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return self.db.runInteraction( "get_room_event_after_stream_ordering", self.get_room_event_around_stream_ordering_txn, - room_id, stream_ordering, "f", + room_id, + stream_ordering, + "f", ) def get_room_event_before_stream_ordering(self, room_id, stream_ordering): @@ -556,7 +558,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return self.db.runInteraction( "get_room_event_before_stream_ordering", self.get_room_event_around_stream_ordering_txn, - room_id, stream_ordering, "b", + room_id, + stream_ordering, + "b", ) def get_room_event_around_stream_ordering_txn( @@ -575,6 +579,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Deferred[(int, int, str)]: (stream ordering, topological ordering, event_id) """ + # Figure out which comparison operation to perform and how to order the results, + # using the provided direction. + op = "<=" if dir == "b" else ">=" + order = "DESC" if dir == "b" else "ASC" + sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" @@ -582,10 +591,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): " AND NOT outlier" " ORDER BY stream_ordering %s" " LIMIT 1" - ) % ( - "<=" if dir == "b" else ">=", - "DESC" if dir == "b" else "ASC", - ) + ) % (op, order) txn.execute(sql, (room_id, stream_ordering)) return txn.fetchone() From 842c2cfbf1e9f3e0d9251fa0c572eba9d6af6dbe Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 Jan 2020 20:24:17 +0000 Subject: [PATCH 007/213] Remove get_room_event_after_stream_ordering entirely --- synapse/rest/admin/__init__.py | 2 +- synapse/storage/data_stores/main/stream.py | 69 ++++------------------ 2 files changed, 13 insertions(+), 58 deletions(-) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index a10b4a9b72..2932fe2123 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -107,7 +107,7 @@ class PurgeHistoryRestServlet(RestServlet): stream_ordering = await self.store.find_first_stream_ordering_after_ts(ts) - r = await self.store.get_room_event_after_stream_ordering( + r = await self.store.get_room_event_before_stream_ordering( room_id, stream_ordering ) if not r: diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 652cecd59b..a20c3d1012 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -525,25 +525,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return rows, token - def get_room_event_after_stream_ordering(self, room_id, stream_ordering): - """Gets details of the first event in a room at or after a stream ordering - - Args: - room_id (str): - stream_ordering (int): - - Returns: - Deferred[(int, int, str)]: - (stream ordering, topological ordering, event_id) - """ - return self.db.runInteraction( - "get_room_event_after_stream_ordering", - self.get_room_event_around_stream_ordering_txn, - room_id, - stream_ordering, - "f", - ) - def get_room_event_before_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or before a stream ordering @@ -555,45 +536,19 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Deferred[(int, int, str)]: (stream ordering, topological ordering, event_id) """ - return self.db.runInteraction( - "get_room_event_before_stream_ordering", - self.get_room_event_around_stream_ordering_txn, - room_id, - stream_ordering, - "b", - ) + def _f(txn): + sql = ( + "SELECT stream_ordering, topological_ordering, event_id" + " FROM events" + " WHERE room_id = ? AND stream_ordering <= ?" + " AND NOT outlier" + " ORDER BY stream_ordering DESC" + " LIMIT 1" + ) + txn.execute(sql, (room_id, stream_ordering)) + return txn.fetchone() - def get_room_event_around_stream_ordering_txn( - self, txn, room_id, stream_ordering, dir="f" - ): - """Gets details of the first event in a room at or either after or before a - stream ordering, depending on the provided direction. - - Args: - room_id (str): - stream_ordering (int): - dir (str): Direction in which we're looking towards in the room's history, - either "f" (forward) or "b" (backward). - - Returns: - Deferred[(int, int, str)]: - (stream ordering, topological ordering, event_id) - """ - # Figure out which comparison operation to perform and how to order the results, - # using the provided direction. - op = "<=" if dir == "b" else ">=" - order = "DESC" if dir == "b" else "ASC" - - sql = ( - "SELECT stream_ordering, topological_ordering, event_id" - " FROM events" - " WHERE room_id = ? AND stream_ordering %s ?" - " AND NOT outlier" - " ORDER BY stream_ordering %s" - " LIMIT 1" - ) % (op, order) - txn.execute(sql, (room_id, stream_ordering)) - return txn.fetchone() + return self.db.runInteraction("get_room_event_before_stream_ordering", _f) @defer.inlineCallbacks def get_room_events_max_id(self, room_id=None): From dac148341ba2638cc9486cf0b00005932dab939d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 Jan 2020 20:25:09 +0000 Subject: [PATCH 008/213] Fixup diff --- synapse/storage/data_stores/main/stream.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index a20c3d1012..056b25b13a 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -536,14 +536,15 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Deferred[(int, int, str)]: (stream ordering, topological ordering, event_id) """ + def _f(txn): sql = ( - "SELECT stream_ordering, topological_ordering, event_id" - " FROM events" - " WHERE room_id = ? AND stream_ordering <= ?" - " AND NOT outlier" - " ORDER BY stream_ordering DESC" - " LIMIT 1" + "SELECT stream_ordering, topological_ordering, event_id" + " FROM events" + " WHERE room_id = ? AND stream_ordering <= ?" + " AND NOT outlier" + " ORDER BY stream_ordering DESC" + " LIMIT 1" ) txn.execute(sql, (room_id, stream_ordering)) return txn.fetchone() From 4fb3cb208a17ba36a5da050b19e3997cf4808f9a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 Jan 2020 20:27:07 +0000 Subject: [PATCH 009/213] Precise changelog --- changelog.d/6714.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6714.bugfix b/changelog.d/6714.bugfix index 3924f1ad79..410516694f 100644 --- a/changelog.d/6714.bugfix +++ b/changelog.d/6714.bugfix @@ -1 +1 @@ -Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies. +Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. From 14d8f342d5cae86d93d9ba2b411d486690ff54f5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 Jan 2020 11:58:02 +0000 Subject: [PATCH 010/213] move batch_iter to a separate module --- synapse/storage/data_stores/main/cache.py | 2 +- synapse/storage/data_stores/main/devices.py | 2 +- synapse/storage/data_stores/main/events.py | 2 +- .../storage/data_stores/main/events_worker.py | 2 +- synapse/storage/data_stores/main/keys.py | 2 +- synapse/storage/data_stores/main/presence.py | 2 +- synapse/util/__init__.py | 17 --------- synapse/util/iterutils.py | 35 +++++++++++++++++++ 8 files changed, 41 insertions(+), 23 deletions(-) create mode 100644 synapse/util/iterutils.py diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py index 54ed8574c4..bf91512daf 100644 --- a/synapse/storage/data_stores/main/cache.py +++ b/synapse/storage/data_stores/main/cache.py @@ -21,7 +21,7 @@ from twisted.internet import defer from synapse.storage._base import SQLBaseStore from synapse.storage.engines import PostgresEngine -from synapse.util import batch_iter +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 9a828231c4..f0a7962dd0 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -33,13 +33,13 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import Database from synapse.types import get_verify_key_from_cross_signing_key -from synapse.util import batch_iter from synapse.util.caches.descriptors import ( Cache, cached, cachedInlineCallbacks, cachedList, ) +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index e9fe63037b..bb69c20448 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -43,9 +43,9 @@ from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.state import StateGroupWorkerStore from synapse.storage.database import Database from synapse.types import RoomStreamToken, get_domain_from_id -from synapse.util import batch_iter from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 0cce5232f5..3b93e0597a 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -37,8 +37,8 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.database import Database from synapse.types import get_domain_from_id -from synapse.util import batch_iter from synapse.util.caches.descriptors import Cache +from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure logger = logging.getLogger(__name__) diff --git a/synapse/storage/data_stores/main/keys.py b/synapse/storage/data_stores/main/keys.py index 6b12f5a75f..ba89c68c9f 100644 --- a/synapse/storage/data_stores/main/keys.py +++ b/synapse/storage/data_stores/main/keys.py @@ -23,8 +23,8 @@ from signedjson.key import decode_verify_key_bytes from synapse.storage._base import SQLBaseStore from synapse.storage.keys import FetchKeyResult -from synapse.util import batch_iter from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) diff --git a/synapse/storage/data_stores/main/presence.py b/synapse/storage/data_stores/main/presence.py index a2c83e0867..604c8b7ddd 100644 --- a/synapse/storage/data_stores/main/presence.py +++ b/synapse/storage/data_stores/main/presence.py @@ -17,8 +17,8 @@ from twisted.internet import defer from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause from synapse.storage.presence import UserPresenceState -from synapse.util import batch_iter from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.iterutils import batch_iter class PresenceStore(SQLBaseStore): diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 7856353002..60f0de70f7 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -15,7 +15,6 @@ import logging import re -from itertools import islice import attr @@ -107,22 +106,6 @@ class Clock(object): raise -def batch_iter(iterable, size): - """batch an iterable up into tuples with a maximum size - - Args: - iterable (iterable): the iterable to slice - size (int): the maximum batch size - - Returns: - an iterator over the chunks - """ - # make sure we can deal with iterables like lists too - sourceiter = iter(iterable) - # call islice until it returns an empty tuple - return iter(lambda: tuple(islice(sourceiter, size)), ()) - - def log_failure(failure, msg, consumeErrors=True): """Creates a function suitable for passing to `Deferred.addErrback` that logs any failures that occur. diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py new file mode 100644 index 0000000000..c10016fbc5 --- /dev/null +++ b/synapse/util/iterutils.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from itertools import islice +from typing import Iterable, Iterator, Sequence, Tuple, TypeVar + +T = TypeVar("T") + + +def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]: + """batch an iterable up into tuples with a maximum size + + Args: + iterable (iterable): the iterable to slice + size (int): the maximum batch size + + Returns: + an iterator over the chunks + """ + # make sure we can deal with iterables like lists too + sourceiter = iter(iterable) + # call islice until it returns an empty tuple + return iter(lambda: tuple(islice(sourceiter, size)), ()) From acc7820574426cf27673d941b1b0362272113351 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 16 Jan 2020 22:26:34 +0000 Subject: [PATCH 011/213] Log saml assertions rather than the whole response ... since the whole response is huge. We even need to break up the assertions, since kibana otherwise truncates them. --- synapse/handlers/saml_handler.py | 13 ++++++++- synapse/util/iterutils.py | 13 +++++++++ tests/util/test_itertools.py | 47 ++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 tests/util/test_itertools.py diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 107f97032b..32638671c9 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -32,6 +32,7 @@ from synapse.types import ( mxid_localpart_allowed_characters, ) from synapse.util.async_helpers import Linearizer +from synapse.util.iterutils import chunk_seq logger = logging.getLogger(__name__) @@ -132,7 +133,17 @@ class SamlHandler: logger.warning("SAML2 response was not signed") raise SynapseError(400, "SAML2 response was not signed") - logger.info("SAML2 response: %s", saml2_auth.origxml) + logger.debug("SAML2 response: %s", saml2_auth.origxml) + for assertion in saml2_auth.assertions: + # kibana limits the length of a log field, whereas this is all rather + # useful, so split it up. + count = 0 + for part in chunk_seq(str(assertion), 10000): + logger.info( + "SAML2 assertion: %s%s", "(%i)..." % (count,) if count else "", part + ) + count += 1 + logger.info("SAML2 mapped attributes: %s", saml2_auth.ava) try: diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index c10016fbc5..06faeebe7f 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -33,3 +33,16 @@ def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]: sourceiter = iter(iterable) # call islice until it returns an empty tuple return iter(lambda: tuple(islice(sourceiter, size)), ()) + + +ISeq = TypeVar("ISeq", bound=Sequence, covariant=True) + + +def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]: + """Split the given sequence into chunks of the given size + + The last chunk may be shorter than the given size. + + If the input is empty, no chunks are returned. + """ + return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen)) diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py new file mode 100644 index 0000000000..0ab0a91483 --- /dev/null +++ b/tests/util/test_itertools.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.util.iterutils import chunk_seq + +from tests.unittest import TestCase + + +class ChunkSeqTests(TestCase): + def test_short_seq(self): + parts = chunk_seq("123", 8) + + self.assertEqual( + list(parts), ["123"], + ) + + def test_long_seq(self): + parts = chunk_seq("abcdefghijklmnop", 8) + + self.assertEqual( + list(parts), ["abcdefgh", "ijklmnop"], + ) + + def test_uneven_parts(self): + parts = chunk_seq("abcdefghijklmnop", 5) + + self.assertEqual( + list(parts), ["abcde", "fghij", "klmno", "p"], + ) + + def test_empty_input(self): + parts = chunk_seq([], 5) + + self.assertEqual( + list(parts), [], + ) From 95c5b9bfb3506d06e6b0a7d42adfb1f76f2cb7ca Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 16 Jan 2020 22:29:06 +0000 Subject: [PATCH 012/213] changelog --- changelog.d/6724.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6724.misc diff --git a/changelog.d/6724.misc b/changelog.d/6724.misc new file mode 100644 index 0000000000..5256be75fa --- /dev/null +++ b/changelog.d/6724.misc @@ -0,0 +1 @@ +When processing a SAML response, log the assertions for easier configuration. From 5ce0b17e38404fceb8867fdb3b4b59c00db6b1e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 Jan 2020 10:04:15 +0000 Subject: [PATCH 013/213] Clarify the `account_validity` and `email` sections of the sample configuration. (#6685) Generally try to make this more comprehensible, and make it match the conventions. I've removed the documentation for all the settings which allow you to change the names of the template files, because I can't really see why they are useful. --- changelog.d/6685.doc | 1 + docs/sample_config.yaml | 284 ++++++++++++++++++--------------- synapse/config/emailconfig.py | 222 +++++++++++++------------- synapse/config/push.py | 2 +- synapse/config/registration.py | 83 ++++++---- 5 files changed, 320 insertions(+), 272 deletions(-) create mode 100644 changelog.d/6685.doc diff --git a/changelog.d/6685.doc b/changelog.d/6685.doc new file mode 100644 index 0000000000..7cf750fe3f --- /dev/null +++ b/changelog.d/6685.doc @@ -0,0 +1 @@ +Clarify the `account_validity` and `email` sections of the sample configuration. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0a2505e7bb..8e8cf513b0 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -874,23 +874,6 @@ media_store_path: "DATADIR/media_store" # Optional account validity configuration. This allows for accounts to be denied # any request after a given period. # -# ``enabled`` defines whether the account validity feature is enabled. Defaults -# to False. -# -# ``period`` allows setting the period after which an account is valid -# after its registration. When renewing the account, its validity period -# will be extended by this amount of time. This parameter is required when using -# the account validity feature. -# -# ``renew_at`` is the amount of time before an account's expiry date at which -# Synapse will send an email to the account's email address with a renewal link. -# This needs the ``email`` and ``public_baseurl`` configuration sections to be -# filled. -# -# ``renew_email_subject`` is the subject of the email sent out with the renewal -# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter -# from the ``email`` section. -# # Once this feature is enabled, Synapse will look for registered users without an # expiration date at startup and will add one to every account it found using the # current settings at that time. @@ -901,21 +884,55 @@ media_store_path: "DATADIR/media_store" # date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10% of the validity period. # -#account_validity: -# enabled: true -# period: 6w -# renew_at: 1w -# renew_email_subject: "Renew your %(app)s account" -# # Directory in which Synapse will try to find the HTML files to serve to the -# # user when trying to renew an account. Optional, defaults to -# # synapse/res/templates. -# template_dir: "res/templates" -# # HTML to be displayed to the user after they successfully renewed their -# # account. Optional. -# account_renewed_html_path: "account_renewed.html" -# # HTML to be displayed when the user tries to renew an account with an invalid -# # renewal token. Optional. -# invalid_token_html_path: "invalid_token.html" +account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + #template_dir: "res/templates" + + # File within 'template_dir' giving the HTML to be displayed to the user after + # they successfully renewed their account. If not set, default text is used. + # + #account_renewed_html_path: "account_renewed.html" + + # File within 'template_dir' giving the HTML to be displayed when the user + # tries to renew an account with an invalid renewal token. If not set, + # default text is used. + # + #invalid_token_html_path: "invalid_token.html" # Time that a user's session remains valid for, after they log in. # @@ -1353,107 +1370,110 @@ password_config: #pepper: "EVEN_MORE_SECRET" +# Configuration for sending emails from Synapse. +# +email: + # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. + # + #smtp_host: mail.server -# Enable sending emails for password resets, notification events or -# account expiry notices -# -# If your SMTP server requires authentication, the optional smtp_user & -# smtp_pass variables should be used -# -#email: -# enable_notifs: false -# smtp_host: "localhost" -# smtp_port: 25 # SSL: 465, STARTTLS: 587 -# smtp_user: "exampleusername" -# smtp_pass: "examplepassword" -# require_transport_security: false -# -# # notif_from defines the "From" address to use when sending emails. -# # It must be set if email sending is enabled. -# # -# # The placeholder '%(app)s' will be replaced by the application name, -# # which is normally 'app_name' (below), but may be overridden by the -# # Matrix client application. -# # -# # Note that the placeholder must be written '%(app)s', including the -# # trailing 's'. -# # -# notif_from: "Your Friendly %(app)s homeserver " -# -# # app_name defines the default value for '%(app)s' in notif_from. It -# # defaults to 'Matrix'. -# # -# #app_name: my_branded_matrix_server -# -# # Enable email notifications by default -# # -# notif_for_new_users: true -# -# # Defining a custom URL for Riot is only needed if email notifications -# # should contain links to a self-hosted installation of Riot; when set -# # the "app_name" setting is ignored -# # -# riot_base_url: "http://localhost/riot" -# -# # Configure the time that a validation email or text message code -# # will expire after sending -# # -# # This is currently used for password resets -# # -# #validation_token_lifetime: 1h -# -# # Template directory. All template files should be stored within this -# # directory. If not set, default templates from within the Synapse -# # package will be used -# # -# # For the list of default templates, please see -# # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates -# # -# #template_dir: res/templates -# -# # Templates for email notifications -# # -# notif_template_html: notif_mail.html -# notif_template_text: notif_mail.txt -# -# # Templates for account expiry notices -# # -# expiry_template_html: notice_expiry.html -# expiry_template_text: notice_expiry.txt -# -# # Templates for password reset emails sent by the homeserver -# # -# #password_reset_template_html: password_reset.html -# #password_reset_template_text: password_reset.txt -# -# # Templates for registration emails sent by the homeserver -# # -# #registration_template_html: registration.html -# #registration_template_text: registration.txt -# -# # Templates for validation emails sent by the homeserver when adding an email to -# # your user account -# # -# #add_threepid_template_html: add_threepid.html -# #add_threepid_template_text: add_threepid.txt -# -# # Templates for password reset success and failure pages that a user -# # will see after attempting to reset their password -# # -# #password_reset_template_success_html: password_reset_success.html -# #password_reset_template_failure_html: password_reset_failure.html -# -# # Templates for registration success and failure pages that a user -# # will see after attempting to register using an email or phone -# # -# #registration_template_success_html: registration_success.html -# #registration_template_failure_html: registration_failure.html -# -# # Templates for success and failure pages that a user will see after attempting -# # to add an email or phone to their account -# # -# #add_threepid_success_html: add_threepid_success.html -# #add_threepid_failure_html: add_threepid_failure.html + # The port on the mail server for outgoing SMTP. Defaults to 25. + # + #smtp_port: 587 + + # Username/password for authentication to the SMTP server. By default, no + # authentication is attempted. + # + # smtp_user: "exampleusername" + # smtp_pass: "examplepassword" + + # Uncomment the following to require TLS transport security for SMTP. + # By default, Synapse will connect over plain text, and will then switch to + # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, + # Synapse will refuse to connect unless the server supports STARTTLS. + # + #require_transport_security: true + + # Enable sending emails for messages that the user has missed + # + #enable_notifs: false + + # notif_from defines the "From" address to use when sending emails. + # It must be set if email sending is enabled. + # + # The placeholder '%(app)s' will be replaced by the application name, + # which is normally 'app_name' (below), but may be overridden by the + # Matrix client application. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + #notif_from: "Your Friendly %(app)s homeserver " + + # app_name defines the default value for '%(app)s' in notif_from. It + # defaults to 'Matrix'. + # + #app_name: my_branded_matrix_server + + # Uncomment the following to disable automatic subscription to email + # notifications for new users. Enabled by default. + # + #notif_for_new_users: false + + # Custom URL for client links within the email notifications. By default + # links will be based on "https://matrix.to". + # + # (This setting used to be called riot_base_url; the old name is still + # supported for backwards-compatibility but is now deprecated.) + # + #client_base_url: "http://localhost/riot" + + # Configure the time that a validation email will expire after sending. + # Defaults to 1h. + # + #validation_token_lifetime: 15m + + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. + # + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. + # + # Synapse will look for the following templates in this directory: + # + # * The contents of email notifications of missed events: 'notif_mail.html' and + # 'notif_mail.txt'. + # + # * The contents of account expiry notice emails: 'notice_expiry.html' and + # 'notice_expiry.txt'. + # + # * The contents of password reset emails sent by the homeserver: + # 'password_reset.html' and 'password_reset.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in the password reset email: 'password_reset_success.html' and + # 'password_reset_failure.html' + # + # * The contents of address verification emails sent during registration: + # 'registration.html' and 'registration.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in an address verification email sent during registration: + # 'registration_success.html' and 'registration_failure.html' + # + # * The contents of address verification emails sent when an address is added + # to a Matrix account: 'add_threepid.html' and 'add_threepid.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in an address verification email sent when an address is added + # to a Matrix account: 'add_threepid_success.html' and + # 'add_threepid_failure.html' + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" #password_providers: diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 35756bed87..74853f9faa 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -37,10 +37,12 @@ class EmailConfig(Config): self.email_enable_notifs = False - email_config = config.get("email", {}) + email_config = config.get("email") + if email_config is None: + email_config = {} - self.email_smtp_host = email_config.get("smtp_host", None) - self.email_smtp_port = email_config.get("smtp_port", None) + self.email_smtp_host = email_config.get("smtp_host", "localhost") + self.email_smtp_port = email_config.get("smtp_port", 25) self.email_smtp_user = email_config.get("smtp_user", None) self.email_smtp_pass = email_config.get("smtp_pass", None) self.require_transport_security = email_config.get( @@ -74,9 +76,9 @@ class EmailConfig(Config): self.email_template_dir = os.path.abspath(template_dir) self.email_enable_notifs = email_config.get("enable_notifs", False) - account_validity_renewal_enabled = config.get("account_validity", {}).get( - "renew_at" - ) + + account_validity_config = config.get("account_validity") or {} + account_validity_renewal_enabled = account_validity_config.get("renew_at") self.threepid_behaviour_email = ( # Have Synapse handle the email sending if account_threepid_delegates.email @@ -278,7 +280,9 @@ class EmailConfig(Config): self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True ) - self.email_riot_base_url = email_config.get("riot_base_url", None) + self.email_riot_base_url = email_config.get( + "client_base_url", email_config.get("riot_base_url", None) + ) if account_validity_renewal_enabled: self.email_expiry_template_html = email_config.get( @@ -294,107 +298,111 @@ class EmailConfig(Config): raise ConfigError("Unable to find email template file %s" % (p,)) def generate_config_section(self, config_dir_path, server_name, **kwargs): - return """ - # Enable sending emails for password resets, notification events or - # account expiry notices + return """\ + # Configuration for sending emails from Synapse. # - # If your SMTP server requires authentication, the optional smtp_user & - # smtp_pass variables should be used - # - #email: - # enable_notifs: false - # smtp_host: "localhost" - # smtp_port: 25 # SSL: 465, STARTTLS: 587 - # smtp_user: "exampleusername" - # smtp_pass: "examplepassword" - # require_transport_security: false - # - # # notif_from defines the "From" address to use when sending emails. - # # It must be set if email sending is enabled. - # # - # # The placeholder '%(app)s' will be replaced by the application name, - # # which is normally 'app_name' (below), but may be overridden by the - # # Matrix client application. - # # - # # Note that the placeholder must be written '%(app)s', including the - # # trailing 's'. - # # - # notif_from: "Your Friendly %(app)s homeserver " - # - # # app_name defines the default value for '%(app)s' in notif_from. It - # # defaults to 'Matrix'. - # # - # #app_name: my_branded_matrix_server - # - # # Enable email notifications by default - # # - # notif_for_new_users: true - # - # # Defining a custom URL for Riot is only needed if email notifications - # # should contain links to a self-hosted installation of Riot; when set - # # the "app_name" setting is ignored - # # - # riot_base_url: "http://localhost/riot" - # - # # Configure the time that a validation email or text message code - # # will expire after sending - # # - # # This is currently used for password resets - # # - # #validation_token_lifetime: 1h - # - # # Template directory. All template files should be stored within this - # # directory. If not set, default templates from within the Synapse - # # package will be used - # # - # # For the list of default templates, please see - # # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates - # # - # #template_dir: res/templates - # - # # Templates for email notifications - # # - # notif_template_html: notif_mail.html - # notif_template_text: notif_mail.txt - # - # # Templates for account expiry notices - # # - # expiry_template_html: notice_expiry.html - # expiry_template_text: notice_expiry.txt - # - # # Templates for password reset emails sent by the homeserver - # # - # #password_reset_template_html: password_reset.html - # #password_reset_template_text: password_reset.txt - # - # # Templates for registration emails sent by the homeserver - # # - # #registration_template_html: registration.html - # #registration_template_text: registration.txt - # - # # Templates for validation emails sent by the homeserver when adding an email to - # # your user account - # # - # #add_threepid_template_html: add_threepid.html - # #add_threepid_template_text: add_threepid.txt - # - # # Templates for password reset success and failure pages that a user - # # will see after attempting to reset their password - # # - # #password_reset_template_success_html: password_reset_success.html - # #password_reset_template_failure_html: password_reset_failure.html - # - # # Templates for registration success and failure pages that a user - # # will see after attempting to register using an email or phone - # # - # #registration_template_success_html: registration_success.html - # #registration_template_failure_html: registration_failure.html - # - # # Templates for success and failure pages that a user will see after attempting - # # to add an email or phone to their account - # # - # #add_threepid_success_html: add_threepid_success.html - # #add_threepid_failure_html: add_threepid_failure.html + email: + # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. + # + #smtp_host: mail.server + + # The port on the mail server for outgoing SMTP. Defaults to 25. + # + #smtp_port: 587 + + # Username/password for authentication to the SMTP server. By default, no + # authentication is attempted. + # + # smtp_user: "exampleusername" + # smtp_pass: "examplepassword" + + # Uncomment the following to require TLS transport security for SMTP. + # By default, Synapse will connect over plain text, and will then switch to + # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, + # Synapse will refuse to connect unless the server supports STARTTLS. + # + #require_transport_security: true + + # Enable sending emails for messages that the user has missed + # + #enable_notifs: false + + # notif_from defines the "From" address to use when sending emails. + # It must be set if email sending is enabled. + # + # The placeholder '%(app)s' will be replaced by the application name, + # which is normally 'app_name' (below), but may be overridden by the + # Matrix client application. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + #notif_from: "Your Friendly %(app)s homeserver " + + # app_name defines the default value for '%(app)s' in notif_from. It + # defaults to 'Matrix'. + # + #app_name: my_branded_matrix_server + + # Uncomment the following to disable automatic subscription to email + # notifications for new users. Enabled by default. + # + #notif_for_new_users: false + + # Custom URL for client links within the email notifications. By default + # links will be based on "https://matrix.to". + # + # (This setting used to be called riot_base_url; the old name is still + # supported for backwards-compatibility but is now deprecated.) + # + #client_base_url: "http://localhost/riot" + + # Configure the time that a validation email will expire after sending. + # Defaults to 1h. + # + #validation_token_lifetime: 15m + + # Directory in which Synapse will try to find the template files below. + # If not set, default templates from within the Synapse package will be used. + # + # DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates. + # If you *do* uncomment it, you will need to make sure that all the templates + # below are in the directory. + # + # Synapse will look for the following templates in this directory: + # + # * The contents of email notifications of missed events: 'notif_mail.html' and + # 'notif_mail.txt'. + # + # * The contents of account expiry notice emails: 'notice_expiry.html' and + # 'notice_expiry.txt'. + # + # * The contents of password reset emails sent by the homeserver: + # 'password_reset.html' and 'password_reset.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in the password reset email: 'password_reset_success.html' and + # 'password_reset_failure.html' + # + # * The contents of address verification emails sent during registration: + # 'registration.html' and 'registration.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in an address verification email sent during registration: + # 'registration_success.html' and 'registration_failure.html' + # + # * The contents of address verification emails sent when an address is added + # to a Matrix account: 'add_threepid.html' and 'add_threepid.txt' + # + # * HTML pages for success and failure that a user will see when they follow + # the link in an address verification email sent when an address is added + # to a Matrix account: 'add_threepid_success.html' and + # 'add_threepid_failure.html' + # + # You can see the default templates at: + # https://github.com/matrix-org/synapse/tree/master/synapse/res/templates + # + #template_dir: "res/templates" """ diff --git a/synapse/config/push.py b/synapse/config/push.py index 0910958649..6f2b3a7faa 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -35,7 +35,7 @@ class PushConfig(Config): # Now check for the one in the 'email' section and honour it, # with a warning. - push_config = config.get("email", {}) + push_config = config.get("email") or {} redact_content = push_config.get("redact_content") if redact_content is not None: print( diff --git a/synapse/config/registration.py b/synapse/config/registration.py index ee9614c5f7..b873995a49 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -27,6 +27,8 @@ class AccountValidityConfig(Config): section = "accountvalidity" def __init__(self, config, synapse_config): + if config is None: + return self.enabled = config.get("enabled", False) self.renew_by_email_enabled = "renew_at" in config @@ -159,23 +161,6 @@ class RegistrationConfig(Config): # Optional account validity configuration. This allows for accounts to be denied # any request after a given period. # - # ``enabled`` defines whether the account validity feature is enabled. Defaults - # to False. - # - # ``period`` allows setting the period after which an account is valid - # after its registration. When renewing the account, its validity period - # will be extended by this amount of time. This parameter is required when using - # the account validity feature. - # - # ``renew_at`` is the amount of time before an account's expiry date at which - # Synapse will send an email to the account's email address with a renewal link. - # This needs the ``email`` and ``public_baseurl`` configuration sections to be - # filled. - # - # ``renew_email_subject`` is the subject of the email sent out with the renewal - # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter - # from the ``email`` section. - # # Once this feature is enabled, Synapse will look for registered users without an # expiration date at startup and will add one to every account it found using the # current settings at that time. @@ -186,21 +171,55 @@ class RegistrationConfig(Config): # date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10%% of the validity period. # - #account_validity: - # enabled: true - # period: 6w - # renew_at: 1w - # renew_email_subject: "Renew your %%(app)s account" - # # Directory in which Synapse will try to find the HTML files to serve to the - # # user when trying to renew an account. Optional, defaults to - # # synapse/res/templates. - # template_dir: "res/templates" - # # HTML to be displayed to the user after they successfully renewed their - # # account. Optional. - # account_renewed_html_path: "account_renewed.html" - # # HTML to be displayed when the user tries to renew an account with an invalid - # # renewal token. Optional. - # invalid_token_html_path: "invalid_token.html" + account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %%(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + #template_dir: "res/templates" + + # File within 'template_dir' giving the HTML to be displayed to the user after + # they successfully renewed their account. If not set, default text is used. + # + #account_renewed_html_path: "account_renewed.html" + + # File within 'template_dir' giving the HTML to be displayed when the user + # tries to renew an account with an invalid renewal token. If not set, + # default text is used. + # + #invalid_token_html_path: "invalid_token.html" # Time that a user's session remains valid for, after they log in. # From a8a50f5b5746279379b4511c8ecb2a40b143fe32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jan 2020 10:27:19 +0000 Subject: [PATCH 014/213] Wake up transaction queue when remote server comes back online (#6706) This will be used to retry outbound transactions to a remote server if we think it might have come back up. --- changelog.d/6706.misc | 1 + docs/tcp_replication.md | 6 ++++- synapse/app/federation_sender.py | 12 +++++++++- synapse/federation/sender/__init__.py | 18 +++++++++++++-- synapse/federation/transport/server.py | 19 +++++++++++++++- synapse/notifier.py | 31 +++++++++++++++++++++++--- synapse/replication/tcp/client.py | 3 +++ synapse/replication/tcp/commands.py | 17 ++++++++++++++ synapse/replication/tcp/protocol.py | 15 +++++++++++++ synapse/replication/tcp/resource.py | 9 ++++++++ synapse/server.pyi | 12 ++++++++++ 11 files changed, 135 insertions(+), 8 deletions(-) create mode 100644 changelog.d/6706.misc diff --git a/changelog.d/6706.misc b/changelog.d/6706.misc new file mode 100644 index 0000000000..1ac11cc04b --- /dev/null +++ b/changelog.d/6706.misc @@ -0,0 +1 @@ +Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index ba9e874d07..a0b1d563ff 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -209,7 +209,7 @@ Where `` may be either: * a numeric stream_id to stream updates since (exclusive) * `NOW` to stream all subsequent updates. -The `` is the name of a replication stream to subscribe +The `` is the name of a replication stream to subscribe to (see [here](../synapse/replication/tcp/streams/_base.py) for a list of streams). It can also be `ALL` to subscribe to all known streams, in which case the `` must be set to `NOW`. @@ -234,6 +234,10 @@ in which case the `` must be set to `NOW`. Used exclusively in tests +### REMOTE_SERVER_UP (S, C) + + Inform other processes that a remote server may have come back online. + See `synapse/replication/tcp/commands.py` for a detailed description and the format of each command. diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index a57cf991ac..38d11fdd0f 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -158,6 +158,13 @@ class FederationSenderReplicationHandler(ReplicationClientHandler): args.update(self.send_handler.stream_positions()) return args + def on_remote_server_up(self, server: str): + """Called when get a new REMOTE_SERVER_UP command.""" + + # Let's wake up the transaction queue for the server in case we have + # pending stuff to send to it. + self.send_handler.wake_destination(server) + def start(config_options): try: @@ -205,7 +212,7 @@ class FederationSenderHandler(object): to the federation sender. """ - def __init__(self, hs, replication_client): + def __init__(self, hs: FederationSenderServer, replication_client): self.store = hs.get_datastore() self._is_mine_id = hs.is_mine_id self.federation_sender = hs.get_federation_sender() @@ -226,6 +233,9 @@ class FederationSenderHandler(object): self.store.get_room_max_stream_ordering() ) + def wake_destination(self, server: str): + self.federation_sender.wake_destination(server) + def stream_positions(self): return {"federation": self.federation_position} diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 4ebb0e8bc0..36c83c3027 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -21,6 +21,7 @@ from prometheus_client import Counter from twisted.internet import defer +import synapse import synapse.metrics from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager @@ -54,7 +55,7 @@ sent_pdus_destination_dist_total = Counter( class FederationSender(object): - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.hs = hs self.server_name = hs.hostname @@ -482,7 +483,20 @@ class FederationSender(object): def send_device_messages(self, destination): if destination == self.server_name: - logger.info("Not sending device update to ourselves") + logger.warning("Not sending device update to ourselves") + return + + self._get_per_destination_queue(destination).attempt_new_transaction() + + def wake_destination(self, destination: str): + """Called when we want to retry sending transactions to a remote. + + This is mainly useful if the remote server has been down and we think it + might have come back. + """ + + if destination == self.server_name: + logger.warning("Not waking up ourselves") return self._get_per_destination_queue(destination).attempt_new_transaction() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index b4cbf23394..d8cf9ed299 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -44,6 +44,7 @@ from synapse.logging.opentracing import ( tags, whitelisted_homeserver, ) +from synapse.server import HomeServer from synapse.types import ThirdPartyInstanceID, get_domain_from_id from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string @@ -101,12 +102,17 @@ class NoAuthenticationError(AuthenticationError): class Authenticator(object): - def __init__(self, hs): + def __init__(self, hs: HomeServer): self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname self.store = hs.get_datastore() self.federation_domain_whitelist = hs.config.federation_domain_whitelist + self.notifer = hs.get_notifier() + + self.replication_client = None + if hs.config.worker.worker_app: + self.replication_client = hs.get_tcp_replication() # A method just so we can pass 'self' as the authenticator to the Servlets async def authenticate_request(self, request, content): @@ -166,6 +172,17 @@ class Authenticator(object): try: logger.info("Marking origin %r as up", origin) await self.store.set_destination_retry_timings(origin, None, 0, 0) + + # Inform the relevant places that the remote server is back up. + self.notifer.notify_remote_server_up(origin) + if self.replication_client: + # If we're on a worker we try and inform master about this. The + # replication client doesn't hook into the notifier to avoid + # infinite loops where we send a `REMOTE_SERVER_UP` command to + # master, which then echoes it back to us which in turn pokes + # the notifier. + self.replication_client.send_remote_server_up(origin) + except Exception: logger.exception("Error resetting retry timings on %s", origin) diff --git a/synapse/notifier.py b/synapse/notifier.py index 5f5f765bea..6132727cbd 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -15,11 +15,13 @@ import logging from collections import namedtuple +from typing import Callable, List from prometheus_client import Counter from twisted.internet import defer +import synapse.server from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state @@ -154,7 +156,7 @@ class Notifier(object): UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.user_to_user_stream = {} self.room_to_user_streams = {} @@ -164,7 +166,12 @@ class Notifier(object): self.store = hs.get_datastore() self.pending_new_room_events = [] - self.replication_callbacks = [] + # Called when there are new things to stream over replication + self.replication_callbacks = [] # type: List[Callable[[], None]] + + # Called when remote servers have come back online after having been + # down. + self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() @@ -205,7 +212,7 @@ class Notifier(object): "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream) ) - def add_replication_callback(self, cb): + def add_replication_callback(self, cb: Callable[[], None]): """Add a callback that will be called when some new data is available. Callback is not given any arguments. It should *not* return a Deferred - if it needs to do any asynchronous work, a background thread should be started and @@ -213,6 +220,12 @@ class Notifier(object): """ self.replication_callbacks.append(cb) + def add_remote_server_up_callback(self, cb: Callable[[str], None]): + """Add a callback that will be called when synapse detects a server + has been + """ + self.remote_server_up_callbacks.append(cb) + def on_new_room_event( self, event, room_stream_id, max_room_stream_id, extra_users=[] ): @@ -522,3 +535,15 @@ class Notifier(object): """Notify the any replication listeners that there's a new event""" for cb in self.replication_callbacks: cb() + + def notify_remote_server_up(self, server: str): + """Notify any replication that a remote server has come back up + """ + # We call federation_sender directly rather than registering as a + # callback as a) we already have a reference to it and b) it introduces + # circular dependencies. + if self.federation_sender: + self.federation_sender.wake_destination(server) + + for cb in self.remote_server_up_callbacks: + cb(server) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 52a0aefe68..fc06a7b053 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -143,6 +143,9 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): if d: d.callback(data) + def on_remote_server_up(self, server: str): + """Called when get a new REMOTE_SERVER_UP command.""" + def get_streams_to_replicate(self) -> Dict[str, int]: """Called when a new connection has been established and we need to subscribe to streams. diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index cbb36b9acf..451671412d 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -387,6 +387,20 @@ class UserIpCommand(Command): ) +class RemoteServerUpCommand(Command): + """Sent when a worker has detected that a remote server is no longer + "down" and retry timings should be reset. + + If sent from a client the server will relay to all other workers. + + Format:: + + REMOTE_SERVER_UP + """ + + NAME = "REMOTE_SERVER_UP" + + _COMMANDS = ( ServerCommand, RdataCommand, @@ -401,6 +415,7 @@ _COMMANDS = ( RemovePusherCommand, InvalidateCacheCommand, UserIpCommand, + RemoteServerUpCommand, ) # type: Tuple[Type[Command], ...] # Map of command name to command type. @@ -414,6 +429,7 @@ VALID_SERVER_COMMANDS = ( ErrorCommand.NAME, PingCommand.NAME, SyncCommand.NAME, + RemoteServerUpCommand.NAME, ) # The commands the client is allowed to send @@ -427,4 +443,5 @@ VALID_CLIENT_COMMANDS = ( InvalidateCacheCommand.NAME, UserIpCommand.NAME, ErrorCommand.NAME, + RemoteServerUpCommand.NAME, ) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 5f4bdf84d2..131e5acb09 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -76,6 +76,7 @@ from synapse.replication.tcp.commands import ( PingCommand, PositionCommand, RdataCommand, + RemoteServerUpCommand, ReplicateCommand, ServerCommand, SyncCommand, @@ -460,6 +461,9 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): async def on_INVALIDATE_CACHE(self, cmd): self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys) + async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): + self.streamer.on_remote_server_up(cmd.data) + async def on_USER_IP(self, cmd): self.streamer.on_user_ip( cmd.user_id, @@ -555,6 +559,9 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): def send_sync(self, data): self.send_command(SyncCommand(data)) + def send_remote_server_up(self, server: str): + self.send_command(RemoteServerUpCommand(server)) + def on_connection_closed(self): BaseReplicationStreamProtocol.on_connection_closed(self) self.streamer.lost_connection(self) @@ -588,6 +595,11 @@ class AbstractReplicationClientHandler(metaclass=abc.ABCMeta): """Called when get a new SYNC command.""" raise NotImplementedError() + @abc.abstractmethod + async def on_remote_server_up(self, server: str): + """Called when get a new REMOTE_SERVER_UP command.""" + raise NotImplementedError() + @abc.abstractmethod def get_streams_to_replicate(self): """Called when a new connection has been established and we need to @@ -707,6 +719,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): async def on_SYNC(self, cmd): self.handler.on_sync(cmd.data) + async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): + self.handler.on_remote_server_up(cmd.data) + def replicate(self, stream_name, token): """Send the subscription request to the server """ diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index b1752e88cd..6ebf944f66 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -120,6 +120,7 @@ class ReplicationStreamer(object): self.federation_sender = hs.get_federation_sender() self.notifier.add_replication_callback(self.on_notifier_poke) + self.notifier.add_remote_server_up_callback(self.send_remote_server_up) # Keeps track of whether we are currently checking for updates self.is_looping = False @@ -288,6 +289,14 @@ class ReplicationStreamer(object): ) await self._server_notices_sender.on_user_ip(user_id) + @measure_func("repl.on_remote_server_up") + def on_remote_server_up(self, server: str): + self.notifier.notify_remote_server_up(server) + + def send_remote_server_up(self, server: str): + for conn in self.connections: + conn.send_remote_server_up(server) + def send_sync_to_all_connections(self, data): """Sends a SYNC command to all clients. diff --git a/synapse/server.pyi b/synapse/server.pyi index b5e0b57095..0731403047 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -1,3 +1,5 @@ +import twisted.internet + import synapse.api.auth import synapse.config.homeserver import synapse.federation.sender @@ -9,10 +11,12 @@ import synapse.handlers.deactivate_account import synapse.handlers.device import synapse.handlers.e2e_keys import synapse.handlers.message +import synapse.handlers.presence import synapse.handlers.room import synapse.handlers.room_member import synapse.handlers.set_password import synapse.http.client +import synapse.notifier import synapse.rest.media.v1.media_repository import synapse.server_notices.server_notices_manager import synapse.server_notices.server_notices_sender @@ -85,3 +89,11 @@ class HomeServer(object): self, ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender: pass + def get_notifier(self) -> synapse.notifier.Notifier: + pass + def get_presence_handler(self) -> synapse.handlers.presence.PresenceHandler: + pass + def get_clock(self) -> synapse.util.Clock: + pass + def get_reactor(self) -> twisted.internet.base.ReactorBase: + pass From 2b6a77fcde8396331a790a5ddeaa744093a8c728 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 Jan 2020 10:32:47 +0000 Subject: [PATCH 015/213] Delegate remote_user_id mapping to the saml mapping provider (#6723) Turns out that figuring out a remote user id for the SAML user isn't quite as obvious as it seems. Factor it out to the SamlMappingProvider so that it's easy to control. --- changelog.d/6723.misc | 1 + synapse/config/saml2_config.py | 1 + synapse/handlers/saml_handler.py | 27 +++++++++++++++++++++------ 3 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6723.misc diff --git a/changelog.d/6723.misc b/changelog.d/6723.misc new file mode 100644 index 0000000000..17f15e73a8 --- /dev/null +++ b/changelog.d/6723.misc @@ -0,0 +1 @@ +Updates to the SAML mapping provider API. diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index b91414aa35..423c158b11 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -121,6 +121,7 @@ class SAML2Config(Config): required_methods = [ "get_saml_attributes", "saml_response_to_user_attributes", + "get_remote_user_id", ] missing_methods = [ method diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index 107f97032b..90e69b49ee 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -135,14 +135,15 @@ class SamlHandler: logger.info("SAML2 response: %s", saml2_auth.origxml) logger.info("SAML2 mapped attributes: %s", saml2_auth.ava) - try: - remote_user_id = saml2_auth.ava["uid"][0] - except KeyError: - logger.warning("SAML2 response lacks a 'uid' attestation") - raise SynapseError(400, "'uid' not in SAML2 response") - self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) + remote_user_id = self._user_mapping_provider.get_remote_user_id( + saml2_auth, client_redirect_url + ) + + if not remote_user_id: + raise Exception("Failed to extract remote user id from SAML response") + with (await self._mapping_lock.queue(self._auth_provider_id)): # first of all, check if we already have a mapping for this user logger.info( @@ -279,6 +280,20 @@ class DefaultSamlMappingProvider(object): self._mxid_source_attribute = parsed_config.mxid_source_attribute self._mxid_mapper = parsed_config.mxid_mapper + self._grandfathered_mxid_source_attribute = ( + module_api._hs.config.saml2_grandfathered_mxid_source_attribute + ) + + def get_remote_user_id( + self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str + ): + """Extracts the remote user id from the SAML response""" + try: + return saml_response.ava["uid"][0] + except KeyError: + logger.warning("SAML2 response lacks a 'uid' attestation") + raise SynapseError(400, "'uid' not in SAML2 response") + def saml_response_to_user_attributes( self, saml_response: saml2.response.AuthnResponse, From 1dee1e900bd3964461d932944883ffaf5df58eab Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 17 Jan 2020 10:43:58 +0000 Subject: [PATCH 016/213] bump version to v1.9.0.dev1 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index 0dd538d804..abd5297390 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.8.0" +__version__ = "1.9.0.dev1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 722b4f302d705f497355f206ecb160de1bef2074 Mon Sep 17 00:00:00 2001 From: Satsuki Yanagi <17376330+u1-liquid@users.noreply.github.com> Date: Fri, 17 Jan 2020 23:30:35 +0900 Subject: [PATCH 017/213] Fix syntax error in run_upgrade for schema 57 (#6728) Fix #6727 Related #6655 Co-authored-by: Erik Johnston --- changelog.d/6728.bugfix | 1 + .../main/schema/delta/57/local_current_membership.py | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6728.bugfix diff --git a/changelog.d/6728.bugfix b/changelog.d/6728.bugfix new file mode 100644 index 0000000000..5a136e17be --- /dev/null +++ b/changelog.d/6728.bugfix @@ -0,0 +1 @@ +Fix a bug causing `ValueError: unsupported format character ''' (0x27) at index 312` error when running the schema 57 upgrade script. diff --git a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py index 601c236c4a..63b5acdcf7 100644 --- a/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/data_stores/main/schema/delta/57/local_current_membership.py @@ -56,7 +56,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): INSERT INTO local_current_membership (room_id, user_id, event_id, membership) SELECT c.room_id, state_key AS user_id, event_id, c.membership FROM current_state_events AS c - WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key like '%' || ? + WHERE type = 'm.room.member' AND c.membership IS NOT NULL AND state_key LIKE ? """ else: # We can't rely on the membership column, so we need to join against @@ -66,9 +66,10 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): SELECT c.room_id, state_key AS user_id, event_id, r.membership FROM current_state_events AS c INNER JOIN room_memberships AS r USING (event_id) - WHERE type = 'm.room.member' and state_key like '%' || ? + WHERE type = 'm.room.member' AND state_key LIKE ? """ - cur.execute(sql, (config.server_name,)) + sql = database_engine.convert_param_style(sql) + cur.execute(sql, ("%:" + config.server_name,)) cur.execute( "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)" From 0b885d62efdc25000ad44a050e669683668dfa3c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 17 Jan 2020 14:58:58 +0000 Subject: [PATCH 018/213] bump version to v1.9.0.dev2 --- synapse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index abd5297390..17a6f691c8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.9.0.dev1" +__version__ = "1.9.0.dev2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5909751936cca2e394cb30fb5da9520db76ee73a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jan 2020 15:13:27 +0000 Subject: [PATCH 019/213] Fix up changelog --- changelog.d/6728.bugfix | 1 - changelog.d/6728.misc | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/6728.bugfix create mode 100644 changelog.d/6728.misc diff --git a/changelog.d/6728.bugfix b/changelog.d/6728.bugfix deleted file mode 100644 index 5a136e17be..0000000000 --- a/changelog.d/6728.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing `ValueError: unsupported format character ''' (0x27) at index 312` error when running the schema 57 upgrade script. diff --git a/changelog.d/6728.misc b/changelog.d/6728.misc new file mode 100644 index 0000000000..01e78bc84e --- /dev/null +++ b/changelog.d/6728.misc @@ -0,0 +1 @@ +Add `local_current_membership` table for tracking local user membership state in rooms. From a17f64361c87f06c67fd7bb5a98b54dc5a2bb4fb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 17 Jan 2020 20:51:44 +0000 Subject: [PATCH 020/213] Add more logging around message retention policies support (#6717) So we can debug issues like #6683 more easily --- changelog.d/6717.misc | 1 + synapse/config/server.py | 8 ++++++++ synapse/handlers/pagination.py | 13 +++++++++++++ 3 files changed, 22 insertions(+) create mode 100644 changelog.d/6717.misc diff --git a/changelog.d/6717.misc b/changelog.d/6717.misc new file mode 100644 index 0000000000..a2a7776126 --- /dev/null +++ b/changelog.d/6717.misc @@ -0,0 +1 @@ +Add more logging around message retention policies support. diff --git a/synapse/config/server.py b/synapse/config/server.py index 9ac112233b..0ec1b0fadd 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -294,6 +294,14 @@ class ServerConfig(Config): self.retention_default_min_lifetime = None self.retention_default_max_lifetime = None + if self.retention_enabled: + logger.info( + "Message retention policies support enabled with the following default" + " policy: min_lifetime = %s ; max_lifetime = %s", + self.retention_default_min_lifetime, + self.retention_default_max_lifetime, + ) + self.retention_allowed_lifetime_min = retention_config.get( "allowed_lifetime_min" ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 3ee6a091c5..71d76202c9 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -88,6 +88,8 @@ class PaginationHandler(object): if hs.config.retention_enabled: # Run the purge jobs described in the configuration file. for job in hs.config.retention_purge_jobs: + logger.info("Setting up purge job with config: %s", job) + self.clock.looping_call( run_as_background_process, job["interval"], @@ -130,11 +132,22 @@ class PaginationHandler(object): else: include_null = False + logger.info( + "[purge] Running purge job for %d < max_lifetime <= %d (include NULLs = %s)", + min_ms, + max_ms, + include_null, + ) + rooms = yield self.store.get_rooms_for_retention_period_in_range( min_ms, max_ms, include_null ) + logger.debug("[purge] Rooms to purge: %s", rooms) + for room_id, retention_policy in iteritems(rooms): + logger.info("[purge] Attempting to purge messages in room %s", room_id) + if room_id in self._purges_in_progress_by_room: logger.warning( "[purge] not purging room %s as there's an ongoing purge running" From 198d52da3a10769b74fcec718ed9855e979f2786 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 20 Jan 2020 14:01:36 +0000 Subject: [PATCH 021/213] Fix empty account_validity config block --- synapse/config/registration.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b873995a49..9bb3beedbc 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -29,6 +29,7 @@ class AccountValidityConfig(Config): def __init__(self, config, synapse_config): if config is None: return + super(AccountValidityConfig, self).__init__() self.enabled = config.get("enabled", False) self.renew_by_email_enabled = "renew_at" in config @@ -93,7 +94,7 @@ class RegistrationConfig(Config): ) self.account_validity = AccountValidityConfig( - config.get("account_validity", {}), config + config.get("account_validity") or {}, config ) self.registrations_require_3pid = config.get("registrations_require_3pid", []) From 026f4bdf3c97513b6b48e1f3857198cdb22a3334 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 20 Jan 2020 14:11:42 +0000 Subject: [PATCH 022/213] Add changelog --- changelog.d/6747.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6747.bugfix diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix new file mode 100644 index 0000000000..cb088873e5 --- /dev/null +++ b/changelog.d/6747.bugfix @@ -0,0 +1 @@ +Fix infinite recursion and dictionary access bug when setting `account_validity` to an empty block in the homeserver config. Thanks to @Sorunome for reporting. \ No newline at end of file From 11c23af465633b8dd641db60793a19e5b7524e2b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 15:11:38 +0000 Subject: [PATCH 023/213] Newsfile --- changelog.d/6748.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6748.misc diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc new file mode 100644 index 0000000000..de320d4cd9 --- /dev/null +++ b/changelog.d/6748.misc @@ -0,0 +1 @@ +Propagate cache invalidates from workers to other workers. From 2f23eb27b30bef922cfffdd22e046985f18302ac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 15:12:58 +0000 Subject: [PATCH 024/213] Revert "Newsfile" This reverts commit 11c23af465633b8dd641db60793a19e5b7524e2b. --- changelog.d/6748.misc | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/6748.misc diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc deleted file mode 100644 index de320d4cd9..0000000000 --- a/changelog.d/6748.misc +++ /dev/null @@ -1 +0,0 @@ -Propagate cache invalidates from workers to other workers. From 351fdfede6e9582f7c365d41c684b9d60b6c98c2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 20 Jan 2020 15:58:44 +0000 Subject: [PATCH 025/213] Update changelog.d/6747.bugfix Co-Authored-By: Erik Johnston --- changelog.d/6747.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix index cb088873e5..c98107e741 100644 --- a/changelog.d/6747.bugfix +++ b/changelog.d/6747.bugfix @@ -1 +1 @@ -Fix infinite recursion and dictionary access bug when setting `account_validity` to an empty block in the homeserver config. Thanks to @Sorunome for reporting. \ No newline at end of file +Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. From ceecedc68ba1af25b0ee60c5cf927fd1fd245b9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 17:23:59 +0000 Subject: [PATCH 026/213] Fix changing password via user admin API. (#6730) --- changelog.d/6730.bugfix | 1 + synapse/rest/admin/users.py | 4 ++-- tests/rest/admin/test_user.py | 13 +++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6730.bugfix diff --git a/changelog.d/6730.bugfix b/changelog.d/6730.bugfix new file mode 100644 index 0000000000..beb444ca66 --- /dev/null +++ b/changelog.d/6730.bugfix @@ -0,0 +1 @@ +Fix changing password via user admin API. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 574cb90c74..c178c960c5 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -193,8 +193,8 @@ class UserRestServletV2(RestServlet): raise SynapseError(400, "Invalid password") else: new_password = body["password"] - await self._set_password_handler.set_password( - target_user, new_password, requester + await self.set_password_handler.set_password( + target_user.to_string(), new_password, requester ) if "deactivated" in body: diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 7352d609e6..8f09f51c61 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -435,6 +435,19 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(0, channel.json_body["is_guest"]) self.assertEqual(0, channel.json_body["deactivated"]) + # Change password + body = json.dumps({"password": "hahaha"}) + + request, channel = self.make_request( + "PUT", + self.url, + access_token=self.admin_user_tok, + content=body.encode(encoding="utf_8"), + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + # Modify user body = json.dumps({"displayname": "foobar", "deactivated": True}) From 0f6e525be309b65e07066c071b2f55ebbaac6862 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 17:34:13 +0000 Subject: [PATCH 027/213] Fixup synapse.api to pass mypy (#6733) --- changelog.d/6733.misc | 1 + mypy.ini | 3 +++ synapse/api/filtering.py | 4 +++- synapse/api/ratelimiting.py | 7 +++++-- synapse/event_auth.py | 2 +- tox.ini | 1 + 6 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6733.misc diff --git a/changelog.d/6733.misc b/changelog.d/6733.misc new file mode 100644 index 0000000000..bf048c0be2 --- /dev/null +++ b/changelog.d/6733.misc @@ -0,0 +1 @@ +Fixup synapse.api to pass mypy. diff --git a/mypy.ini b/mypy.ini index a66434b76b..e3c515e2c4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -7,6 +7,9 @@ show_error_codes = True show_traceback = True mypy_path = stubs +[mypy-pymacaroons.*] +ignore_missing_imports = True + [mypy-zope] ignore_missing_imports = True diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 6eab1f13f0..8b64d0a285 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -15,6 +15,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List + from six import text_type import jsonschema @@ -293,7 +295,7 @@ class Filter(object): room_id = None ev_type = "m.presence" contains_url = False - labels = [] + labels = [] # type: List[str] else: sender = event.get("sender", None) if not sender: diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 172841f595..7a049b3af7 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections +from collections import OrderedDict +from typing import Any, Optional, Tuple from synapse.api.errors import LimitExceededError @@ -23,7 +24,9 @@ class Ratelimiter(object): """ def __init__(self): - self.message_counts = collections.OrderedDict() + self.message_counts = ( + OrderedDict() + ) # type: OrderedDict[Any, Tuple[float, int, Optional[float]]] def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True): """Can the entity (e.g. user or IP address) perform the action? diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 1033e5e121..e3a1ba47a0 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -634,7 +634,7 @@ def get_public_keys(invite_event): return public_keys -def auth_types_for_event(event) -> Set[Tuple[str]]: +def auth_types_for_event(event) -> Set[Tuple[str, str]]: """Given an event, return a list of (EventType, StateKey) that may be needed to auth the event. The returned list may be a superset of what would actually be required depending on the full state of the room. diff --git a/tox.ini b/tox.ini index b73a993053..edf4654177 100644 --- a/tox.ini +++ b/tox.ini @@ -177,6 +177,7 @@ env = MYPYPATH = stubs/ extras = all commands = mypy \ + synapse/api \ synapse/config/ \ synapse/handlers/ui_auth \ synapse/logging/ \ From 74b74462f1c8b2db9b0995cbf64d879cbfce0dc4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 17:38:09 +0000 Subject: [PATCH 028/213] Fix `/events/:event_id` deprecated API. (#6731) --- changelog.d/6731.bugfix | 1 + synapse/rest/client/v1/events.py | 2 +- tests/rest/client/v1/test_events.py | 27 +++++++++++++++++++++++++++ tests/unittest.py | 2 +- 4 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6731.bugfix diff --git a/changelog.d/6731.bugfix b/changelog.d/6731.bugfix new file mode 100644 index 0000000000..21f6e15cbd --- /dev/null +++ b/changelog.d/6731.bugfix @@ -0,0 +1 @@ +Fix `/events/:event_id` deprecated API. diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 4beb617733..25effd0261 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -70,7 +70,6 @@ class EventStreamRestServlet(RestServlet): return 200, {} -# TODO: Unit test gets, with and without auth, with different kinds of events. class EventRestServlet(RestServlet): PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) @@ -78,6 +77,7 @@ class EventRestServlet(RestServlet): super(EventRestServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() + self.auth = hs.get_auth() self._event_serializer = hs.get_event_client_serializer() async def on_GET(self, request, event_id): diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index f340b7e851..ffb2de1505 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -134,3 +134,30 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): # someone else set topic, expect 6 (join,send,topic,join,send,topic) pass + + +class GetEventsTestCase(unittest.HomeserverTestCase): + servlets = [ + events.register_servlets, + room.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] + + def prepare(self, hs, reactor, clock): + + # register an account + self.user_id = self.register_user("sid1", "pass") + self.token = self.login(self.user_id, "pass") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + def test_get_event_via_events(self): + resp = self.helper.send(self.room_id, tok=self.token) + event_id = resp["event_id"] + + request, channel = self.make_request( + "GET", "/events/" + event_id, access_token=self.token, + ) + self.render(request) + self.assertEquals(channel.code, 200, msg=channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index ddcd4becfe..b56e249386 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -463,7 +463,7 @@ class HomeserverTestCase(TestCase): # Create the user request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") self.render(request) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, 200, msg=channel.result) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) From b0a66ab83ce4d67e145a1129b1ebd8fc53c24408 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 17:38:21 +0000 Subject: [PATCH 029/213] Fixup synapse.rest to pass mypy (#6732) --- changelog.d/6732.misc | 1 + mypy.ini | 9 ++++++++ synapse/rest/admin/users.py | 23 ++++++++++--------- synapse/rest/client/v1/login.py | 2 +- synapse/rest/client/v1/room.py | 18 ++++++++++----- synapse/rest/client/v2_alpha/register.py | 3 ++- synapse/rest/client/v2_alpha/sendtodevice.py | 3 ++- synapse/rest/key/v2/remote_key_resource.py | 5 ++-- synapse/rest/media/v1/media_repository.py | 3 ++- synapse/rest/media/v1/preview_url_resource.py | 7 +++--- synapse/rest/media/v1/thumbnail_resource.py | 14 +++++------ tox.ini | 3 +-- 12 files changed, 56 insertions(+), 35 deletions(-) create mode 100644 changelog.d/6732.misc diff --git a/changelog.d/6732.misc b/changelog.d/6732.misc new file mode 100644 index 0000000000..8edd767405 --- /dev/null +++ b/changelog.d/6732.misc @@ -0,0 +1 @@ +Fixup `synapse.rest` to pass mypy. diff --git a/mypy.ini b/mypy.ini index e3c515e2c4..69be2f67ad 100644 --- a/mypy.ini +++ b/mypy.ini @@ -66,3 +66,12 @@ ignore_missing_imports = True [mypy-sentry_sdk] ignore_missing_imports = True + +[mypy-PIL.*] +ignore_missing_imports = True + +[mypy-lxml] +ignore_missing_imports = True + +[mypy-jwt.*] +ignore_missing_imports = True diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index c178c960c5..52d27fa3e3 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -338,21 +338,22 @@ class UserRegisterServlet(RestServlet): got_mac = body["mac"] - want_mac = hmac.new( + want_mac_builder = hmac.new( key=self.hs.config.registration_shared_secret.encode(), digestmod=hashlib.sha1, ) - want_mac.update(nonce.encode("utf8")) - want_mac.update(b"\x00") - want_mac.update(username) - want_mac.update(b"\x00") - want_mac.update(password) - want_mac.update(b"\x00") - want_mac.update(b"admin" if admin else b"notadmin") + want_mac_builder.update(nonce.encode("utf8")) + want_mac_builder.update(b"\x00") + want_mac_builder.update(username) + want_mac_builder.update(b"\x00") + want_mac_builder.update(password) + want_mac_builder.update(b"\x00") + want_mac_builder.update(b"admin" if admin else b"notadmin") if user_type: - want_mac.update(b"\x00") - want_mac.update(user_type.encode("utf8")) - want_mac = want_mac.hexdigest() + want_mac_builder.update(b"\x00") + want_mac_builder.update(user_type.encode("utf8")) + + want_mac = want_mac_builder.hexdigest() if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")): raise SynapseError(403, "HMAC incorrect") diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index ff9c978fe7..1294e080dc 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -514,7 +514,7 @@ class CasTicketServlet(RestServlet): if user is None: raise Exception("CAS response does not contain user") except Exception: - logger.error("Error parsing CAS response", exc_info=1) + logger.exception("Error parsing CAS response") raise LoginError(401, "Invalid CAS response", errcode=Codes.UNAUTHORIZED) if not success: raise LoginError( diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 711d4ad304..5aef8238b8 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -16,6 +16,7 @@ """ This module contains REST servlets to do with rooms: /rooms/ """ import logging +from typing import List, Optional from six.moves.urllib import parse as urlparse @@ -207,7 +208,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): requester, event_dict, txn_id=txn_id ) - ret = {} + ret = {} # type: dict if event: set_tag("event_id", event.event_id) ret = {"event_id": event.event_id} @@ -285,7 +286,7 @@ class JoinRoomAliasServlet(TransactionRestServlet): try: remote_room_hosts = [ x.decode("ascii") for x in request.args[b"server_name"] - ] + ] # type: Optional[List[str]] except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): @@ -375,7 +376,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): server = parse_string(request, "server", default=None) content = parse_json_object_from_request(request) - limit = int(content.get("limit", 100)) + limit = int(content.get("limit", 100)) # type: Optional[int] since_token = content.get("since", None) search_filter = content.get("filter", None) @@ -504,11 +505,16 @@ class RoomMessageListRestServlet(RestServlet): filter_bytes = parse_string(request, b"filter", encoding=None) if filter_bytes: filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) - event_filter = Filter(json.loads(filter_json)) - if event_filter.filter_json.get("event_format", "client") == "federation": + event_filter = Filter(json.loads(filter_json)) # type: Optional[Filter] + if ( + event_filter + and event_filter.filter_json.get("event_format", "client") + == "federation" + ): as_client_event = False else: event_filter = None + msgs = await self.pagination_handler.get_messages( room_id=room_id, requester=requester, @@ -611,7 +617,7 @@ class RoomEventContextServlet(RestServlet): filter_bytes = parse_string(request, "filter") if filter_bytes: filter_json = urlparse.unquote(filter_bytes) - event_filter = Filter(json.loads(filter_json)) + event_filter = Filter(json.loads(filter_json)) # type: Optional[Filter] else: event_filter = None diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 66de16a1fa..1bda9aec7e 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -21,6 +21,7 @@ from typing import List, Union from six import string_types import synapse +import synapse.api.auth import synapse.types from synapse.api.constants import LoginType from synapse.api.errors import ( @@ -405,7 +406,7 @@ class RegisterRestServlet(RestServlet): return ret elif kind != b"user": raise UnrecognizedRequestError( - "Do not understand membership kind: %s" % (kind,) + "Do not understand membership kind: %s" % (kind.decode("utf8"),) ) # we do basic sanity checks here because the auth layer will store these diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 501b52fb6c..db829f3098 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Tuple from synapse.http import servlet from synapse.http.servlet import parse_json_object_from_request @@ -60,7 +61,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): sender_user_id, message_type, content["messages"] ) - response = (200, {}) + response = (200, {}) # type: Tuple[int, dict] return response diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index e7fc3f0431..9d6813a047 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +from typing import Dict, Set from canonicaljson import encode_canonical_json, json from signedjson.sign import sign_json @@ -103,7 +104,7 @@ class RemoteKey(DirectServeResource): async def _async_render_GET(self, request): if len(request.postpath) == 1: (server,) = request.postpath - query = {server.decode("ascii"): {}} + query = {server.decode("ascii"): {}} # type: dict elif len(request.postpath) == 2: server, key_id = request.postpath minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts") @@ -148,7 +149,7 @@ class RemoteKey(DirectServeResource): time_now_ms = self.clock.time_msec() - cache_misses = dict() + cache_misses = dict() # type: Dict[str, Set[str]] for (server_name, key_id, from_server), results in cached.items(): results = [(result["ts_added_ms"], result) for result in results] diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index bd9186fe50..490b1b45a8 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -18,6 +18,7 @@ import errno import logging import os import shutil +from typing import Dict, Tuple from six import iteritems @@ -605,7 +606,7 @@ class MediaRepository(object): # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. - thumbnails = {} + thumbnails = {} # type: Dict[Tuple[int, int, str], str] for r_width, r_height, r_method, r_type in requirements: if r_method == "crop": thumbnails.setdefault((r_width, r_height, r_type), r_method) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 6b978be876..07e395cfd1 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -23,6 +23,7 @@ import re import shutil import sys import traceback +from typing import Dict, Optional import six from six import string_types @@ -237,8 +238,8 @@ class PreviewUrlResource(DirectServeResource): # If we don't find a match, we'll look at the HTTP Content-Type, and # if that doesn't exist, we'll fall back to UTF-8. if not encoding: - match = _content_type_match.match(media_info["media_type"]) - encoding = match.group(1) if match else "utf-8" + content_match = _content_type_match.match(media_info["media_type"]) + encoding = content_match.group(1) if content_match else "utf-8" og = decode_and_calc_og(body, media_info["uri"], encoding) @@ -518,7 +519,7 @@ def _calc_og(tree, media_uri): # "og:video:height" : "720", # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3", - og = {} + og = {} # type: Dict[str, Optional[str]] for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"): if "content" in tag.attrib: # if we've got more than 50 tags, someone is taking the piss diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 931ce79be8..eee93b4313 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -296,8 +296,8 @@ class ThumbnailResource(DirectServeResource): d_h = desired_height if desired_method.lower() == "crop": - info_list = [] - info_list2 = [] + crop_info_list = [] + crop_info_list2 = [] for info in thumbnail_infos: t_w = info["thumbnail_width"] t_h = info["thumbnail_height"] @@ -309,7 +309,7 @@ class ThumbnailResource(DirectServeResource): type_quality = desired_type != info["thumbnail_type"] length_quality = info["thumbnail_length"] if t_w >= d_w or t_h >= d_h: - info_list.append( + crop_info_list.append( ( aspect_quality, min_quality, @@ -320,7 +320,7 @@ class ThumbnailResource(DirectServeResource): ) ) else: - info_list2.append( + crop_info_list2.append( ( aspect_quality, min_quality, @@ -330,10 +330,10 @@ class ThumbnailResource(DirectServeResource): info, ) ) - if info_list: - return min(info_list)[-1] + if crop_info_list: + return min(crop_info_list2)[-1] else: - return min(info_list2)[-1] + return min(crop_info_list2)[-1] else: info_list = [] info_list2 = [] diff --git a/tox.ini b/tox.ini index edf4654177..1d946a02ba 100644 --- a/tox.ini +++ b/tox.ini @@ -183,8 +183,7 @@ commands = mypy \ synapse/logging/ \ synapse/module_api \ synapse/replication \ - synapse/rest/consent \ - synapse/rest/saml2 \ + synapse/rest \ synapse/spam_checker_api \ synapse/storage/engines \ synapse/streams From 0e68760078c0aac57bfaeb681d534231e191315a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Jan 2020 18:07:20 +0000 Subject: [PATCH 030/213] Add a DeltaState to track changes to be made to current state (#6716) --- changelog.d/6716.misc | 1 + synapse/storage/data_stores/main/events.py | 87 +++++++-------- synapse/storage/persist_events.py | 123 ++++++++++++--------- 3 files changed, 112 insertions(+), 99 deletions(-) create mode 100644 changelog.d/6716.misc diff --git a/changelog.d/6716.misc b/changelog.d/6716.misc new file mode 100644 index 0000000000..319aaa4acb --- /dev/null +++ b/changelog.d/6716.misc @@ -0,0 +1 @@ +Add a `DeltaState` to track changes to be made to current state during event persistence. diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index bb69c20448..596daf8909 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -19,6 +19,7 @@ import itertools import logging from collections import Counter as c_counter, OrderedDict, namedtuple from functools import wraps +from typing import Dict, List, Tuple from six import iteritems, text_type from six.moves import range @@ -41,8 +42,9 @@ from synapse.storage._base import make_in_list_sql_clause from synapse.storage.data_stores.main.event_federation import EventFederationStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore from synapse.storage.data_stores.main.state import StateGroupWorkerStore -from synapse.storage.database import Database -from synapse.types import RoomStreamToken, get_domain_from_id +from synapse.storage.database import Database, LoggingTransaction +from synapse.storage.persist_events import DeltaState +from synapse.types import RoomStreamToken, StateMap, get_domain_from_id from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder from synapse.util.iterutils import batch_iter @@ -148,30 +150,26 @@ class EventsStore( @defer.inlineCallbacks def _persist_events_and_state_updates( self, - events_and_contexts, - current_state_for_room, - state_delta_for_room, - new_forward_extremeties, - backfilled=False, - delete_existing=False, + events_and_contexts: List[Tuple[EventBase, EventContext]], + current_state_for_room: Dict[str, StateMap[str]], + state_delta_for_room: Dict[str, DeltaState], + new_forward_extremeties: Dict[str, List[str]], + backfilled: bool = False, + delete_existing: bool = False, ): """Persist a set of events alongside updates to the current state and forward extremities tables. Args: - events_and_contexts (list[(EventBase, EventContext)]): - current_state_for_room (dict[str, dict]): Map from room_id to the - current state of the room based on forward extremities - state_delta_for_room (dict[str, tuple]): Map from room_id to tuple - of `(to_delete, to_insert)` where to_delete is a list - of type/state keys to remove from current state, and to_insert - is a map (type,key)->event_id giving the state delta in each - room. - new_forward_extremities (dict[str, list[str]]): Map from room_id - to list of event IDs that are the new forward extremities of - the room. - backfilled (bool) - delete_existing (bool): + events_and_contexts: + current_state_for_room: Map from room_id to the current state of + the room based on forward extremities + state_delta_for_room: Map from room_id to the delta to apply to + room state + new_forward_extremities: Map from room_id to list of event IDs + that are the new forward extremities of the room. + backfilled + delete_existing Returns: Deferred: resolves when the events have been persisted @@ -352,12 +350,12 @@ class EventsStore( @log_function def _persist_events_txn( self, - txn, - events_and_contexts, - backfilled, - delete_existing=False, - state_delta_for_room={}, - new_forward_extremeties={}, + txn: LoggingTransaction, + events_and_contexts: List[Tuple[EventBase, EventContext]], + backfilled: bool, + delete_existing: bool = False, + state_delta_for_room: Dict[str, DeltaState] = {}, + new_forward_extremeties: Dict[str, List[str]] = {}, ): """Insert some number of room events into the necessary database tables. @@ -366,21 +364,16 @@ class EventsStore( whether the event was rejected. Args: - txn (twisted.enterprise.adbapi.Connection): db connection - events_and_contexts (list[(EventBase, EventContext)]): - events to persist - backfilled (bool): True if the events were backfilled - delete_existing (bool): True to purge existing table rows for the - events from the database. This is useful when retrying due to + txn + events_and_contexts: events to persist + backfilled: True if the events were backfilled + delete_existing True to purge existing table rows for the events + from the database. This is useful when retrying due to IntegrityError. - state_delta_for_room (dict[str, (list, dict)]): - The current-state delta for each room. For each room, a tuple - (to_delete, to_insert), being a list of type/state keys to be - removed from the current state, and a state set to be added to - the current state. - new_forward_extremeties (dict[str, list[str]]): - The new forward extremities for each room. For each room, a - list of the event ids which are the forward extremities. + state_delta_for_room: The current-state delta for each room. + new_forward_extremetie: The new forward extremities for each room. + For each room, a list of the event ids which are the forward + extremities. """ all_events_and_contexts = events_and_contexts @@ -465,9 +458,15 @@ class EventsStore( # room_memberships, where applicable. self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) - def _update_current_state_txn(self, txn, state_delta_by_room, stream_id): - for room_id, current_state_tuple in iteritems(state_delta_by_room): - to_delete, to_insert = current_state_tuple + def _update_current_state_txn( + self, + txn: LoggingTransaction, + state_delta_by_room: Dict[str, DeltaState], + stream_id: int, + ): + for room_id, delta_state in iteritems(state_delta_by_room): + to_delete = delta_state.to_delete + to_insert = delta_state.to_insert # First we add entries to the current_state_delta_stream. We # do this before updating the current_state_events table so diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 1ed44925fc..368c457321 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -17,19 +17,24 @@ import logging from collections import deque, namedtuple +from typing import Iterable, List, Optional, Tuple from six import iteritems from six.moves import range +import attr from prometheus_client import Counter, Histogram from twisted.internet import defer from synapse.api.constants import EventTypes +from synapse.events import FrozenEvent +from synapse.events.snapshot import EventContext from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore from synapse.storage.data_stores import DataStores +from synapse.types import StateMap from synapse.util.async_helpers import ObservableDeferred from synapse.util.metrics import Measure @@ -67,6 +72,19 @@ stale_forward_extremities_counter = Histogram( ) +@attr.s(slots=True, frozen=True) +class DeltaState: + """Deltas to use to update the `current_state_events` table. + + Attributes: + to_delete: List of type/state_keys to delete from current state + to_insert: Map of state to upsert into current state + """ + + to_delete = attr.ib(type=List[Tuple[str, str]]) + to_insert = attr.ib(type=StateMap[str]) + + class _EventPeristenceQueue(object): """Queues up events so that they can be persisted in bulk with only one concurrent transaction per room. @@ -138,13 +156,12 @@ class _EventPeristenceQueue(object): self._currently_persisting_rooms.add(room_id) - @defer.inlineCallbacks - def handle_queue_loop(): + async def handle_queue_loop(): try: queue = self._get_drainining_queue(room_id) for item in queue: try: - ret = yield per_item_callback(item) + ret = await per_item_callback(item) except Exception: with PreserveLoggingContext(): item.deferred.errback() @@ -191,12 +208,16 @@ class EventsPersistenceStorage(object): self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks - def persist_events(self, events_and_contexts, backfilled=False): + def persist_events( + self, + events_and_contexts: List[Tuple[FrozenEvent, EventContext]], + backfilled: bool = False, + ): """ Write events to the database Args: events_and_contexts: list of tuples of (event, context) - backfilled (bool): Whether the results are retrieved from federation + backfilled: Whether the results are retrieved from federation via backfill or not. Used to determine if they're "new" events which might update the current state etc. @@ -226,16 +247,12 @@ class EventsPersistenceStorage(object): return max_persisted_id @defer.inlineCallbacks - def persist_event(self, event, context, backfilled=False): + def persist_event( + self, event: FrozenEvent, context: EventContext, backfilled: bool = False + ): """ - - Args: - event (EventBase): - context (EventContext): - backfilled (bool): - Returns: - Deferred: resolves to (int, int): the stream ordering of ``event``, + Deferred[Tuple[int, int]]: the stream ordering of ``event``, and the stream ordering of the latest persisted event """ deferred = self._event_persist_queue.add_to_queue( @@ -249,28 +266,22 @@ class EventsPersistenceStorage(object): max_persisted_id = yield self.main_store.get_current_events_token() return (event.internal_metadata.stream_ordering, max_persisted_id) - def _maybe_start_persisting(self, room_id): - @defer.inlineCallbacks - def persisting_queue(item): + def _maybe_start_persisting(self, room_id: str): + async def persisting_queue(item): with Measure(self._clock, "persist_events"): - yield self._persist_events( + await self._persist_events( item.events_and_contexts, backfilled=item.backfilled ) self._event_persist_queue.handle_queue(room_id, persisting_queue) - @defer.inlineCallbacks - def _persist_events(self, events_and_contexts, backfilled=False): + async def _persist_events( + self, + events_and_contexts: List[Tuple[FrozenEvent, EventContext]], + backfilled: bool = False, + ): """Calculates the change to current state and forward extremities, and persists the given events and with those updates. - - Args: - events_and_contexts (list[(EventBase, EventContext)]): - backfilled (bool): - delete_existing (bool): - - Returns: - Deferred: resolves when the events have been persisted """ if not events_and_contexts: return @@ -315,10 +326,10 @@ class EventsPersistenceStorage(object): ) for room_id, ev_ctx_rm in iteritems(events_by_room): - latest_event_ids = yield self.main_store.get_latest_event_ids_in_room( + latest_event_ids = await self.main_store.get_latest_event_ids_in_room( room_id ) - new_latest_event_ids = yield self._calculate_new_extremities( + new_latest_event_ids = await self._calculate_new_extremities( room_id, ev_ctx_rm, latest_event_ids ) @@ -374,7 +385,7 @@ class EventsPersistenceStorage(object): with Measure( self._clock, "persist_events.get_new_state_after_events" ): - res = yield self._get_new_state_after_events( + res = await self._get_new_state_after_events( room_id, ev_ctx_rm, latest_event_ids, @@ -389,12 +400,12 @@ class EventsPersistenceStorage(object): # If there is a delta we know that we've # only added or replaced state, never # removed keys entirely. - state_delta_for_room[room_id] = ([], delta_ids) + state_delta_for_room[room_id] = DeltaState([], delta_ids) elif current_state is not None: with Measure( self._clock, "persist_events.calculate_state_delta" ): - delta = yield self._calculate_state_delta( + delta = await self._calculate_state_delta( room_id, current_state ) state_delta_for_room[room_id] = delta @@ -404,7 +415,7 @@ class EventsPersistenceStorage(object): if current_state is not None: current_state_for_room[room_id] = current_state - yield self.main_store._persist_events_and_state_updates( + await self.main_store._persist_events_and_state_updates( chunk, current_state_for_room=current_state_for_room, state_delta_for_room=state_delta_for_room, @@ -412,8 +423,12 @@ class EventsPersistenceStorage(object): backfilled=backfilled, ) - @defer.inlineCallbacks - def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): + async def _calculate_new_extremities( + self, + room_id: str, + event_contexts: List[Tuple[FrozenEvent, EventContext]], + latest_event_ids: List[str], + ): """Calculates the new forward extremities for a room given events to persist. @@ -444,13 +459,13 @@ class EventsPersistenceStorage(object): ) # Remove any events which are prev_events of any existing events. - existing_prevs = yield self.main_store._get_events_which_are_prevs(result) + existing_prevs = await self.main_store._get_events_which_are_prevs(result) result.difference_update(existing_prevs) # Finally handle the case where the new events have soft-failed prev # events. If they do we need to remove them and their prev events, # otherwise we end up with dangling extremities. - existing_prevs = yield self.main_store._get_prevs_before_rejected( + existing_prevs = await self.main_store._get_prevs_before_rejected( e_id for event in new_events for e_id in event.prev_event_ids() ) result.difference_update(existing_prevs) @@ -464,10 +479,13 @@ class EventsPersistenceStorage(object): return result - @defer.inlineCallbacks - def _get_new_state_after_events( - self, room_id, events_context, old_latest_event_ids, new_latest_event_ids - ): + async def _get_new_state_after_events( + self, + room_id: str, + events_context: List[Tuple[FrozenEvent, EventContext]], + old_latest_event_ids: Iterable[str], + new_latest_event_ids: Iterable[str], + ) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]]]: """Calculate the current state dict after adding some new events to a room @@ -485,7 +503,6 @@ class EventsPersistenceStorage(object): the new forward extremities for the room. Returns: - Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]: Returns a tuple of two state maps, the first being the full new current state and the second being the delta to the existing current state. If both are None then there has been no change. @@ -547,7 +564,7 @@ class EventsPersistenceStorage(object): if missing_event_ids: # Now pull out the state groups for any missing events from DB - event_to_groups = yield self.main_store._get_state_group_for_events( + event_to_groups = await self.main_store._get_state_group_for_events( missing_event_ids ) event_id_to_state_group.update(event_to_groups) @@ -588,7 +605,7 @@ class EventsPersistenceStorage(object): # their state IDs so we can resolve to a single state set. missing_state = new_state_groups - set(state_groups_map) if missing_state: - group_to_state = yield self.state_store._get_state_for_groups(missing_state) + group_to_state = await self.state_store._get_state_for_groups(missing_state) state_groups_map.update(group_to_state) if len(new_state_groups) == 1: @@ -612,10 +629,10 @@ class EventsPersistenceStorage(object): break if not room_version: - room_version = yield self.main_store.get_room_version(room_id) + room_version = await self.main_store.get_room_version(room_id) logger.debug("calling resolve_state_groups from preserve_events") - res = yield self._state_resolution_handler.resolve_state_groups( + res = await self._state_resolution_handler.resolve_state_groups( room_id, room_version, state_groups, @@ -625,18 +642,14 @@ class EventsPersistenceStorage(object): return res.state, None - @defer.inlineCallbacks - def _calculate_state_delta(self, room_id, current_state): + async def _calculate_state_delta( + self, room_id: str, current_state: StateMap[str] + ) -> DeltaState: """Calculate the new state deltas for a room. Assumes that we are only persisting events for one room at a time. - - Returns: - tuple[list, dict] (to_delete, to_insert): where to_delete are the - type/state_keys to remove from current_state_events and `to_insert` - are the updates to current_state_events. """ - existing_state = yield self.main_store.get_current_state_ids(room_id) + existing_state = await self.main_store.get_current_state_ids(room_id) to_delete = [key for key in existing_state if key not in current_state] @@ -646,4 +659,4 @@ class EventsPersistenceStorage(object): if ev_id != existing_state.get(key) } - return to_delete, to_insert + return DeltaState(to_delete=to_delete, to_insert=to_insert) From 07124d028df6b33336dcc2ef807fd7866f42902a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 21 Jan 2020 19:04:58 +0000 Subject: [PATCH 031/213] Port synapse_port_db to async/await (#6718) * Raise an exception if there are pending background updates So we return with a non-0 code * Changelog * Port synapse_port_db to async/await * Port update_database to async/await * Add version string to mocked homeservers * Remove unused imports * Convert overseen bits to async/await * Fixup logging contexts * Fix imports * Add a way to print an error without raising an exception * Incorporate review --- changelog.d/6718.bugfix | 1 + scripts-dev/update_database | 20 ++-- scripts/synapse_port_db | 194 +++++++++++++++++++++--------------- 3 files changed, 126 insertions(+), 89 deletions(-) create mode 100644 changelog.d/6718.bugfix diff --git a/changelog.d/6718.bugfix b/changelog.d/6718.bugfix new file mode 100644 index 0000000000..23b23e3ed8 --- /dev/null +++ b/changelog.d/6718.bugfix @@ -0,0 +1 @@ +Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case. diff --git a/scripts-dev/update_database b/scripts-dev/update_database index 1d62f0403a..94aa8758b4 100755 --- a/scripts-dev/update_database +++ b/scripts-dev/update_database @@ -22,10 +22,12 @@ import yaml from twisted.internet import defer, reactor +import synapse from synapse.config.homeserver import HomeServerConfig from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.util.versionstring import get_version_string logger = logging.getLogger("update_database") @@ -38,6 +40,8 @@ class MockHomeserver(HomeServer): config.server_name, reactor=reactor, config=config, **kwargs ) + self.version_string = "Synapse/"+get_version_string(synapse) + if __name__ == "__main__": parser = argparse.ArgumentParser( @@ -81,15 +85,17 @@ if __name__ == "__main__": hs.setup() store = hs.get_datastore() - @defer.inlineCallbacks - def run_background_updates(): - yield store.db.updates.run_background_updates(sleep=False) + async def run_background_updates(): + await store.db.updates.run_background_updates(sleep=False) # Stop the reactor to exit the script once every background update is run. reactor.stop() - # Apply all background updates on the database. - reactor.callWhenRunning( - lambda: run_as_background_process("background_updates", run_background_updates) - ) + def run(): + # Apply all background updates on the database. + defer.ensureDeferred( + run_as_background_process("background_updates", run_background_updates) + ) + + reactor.callWhenRunning(run) reactor.run() diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 5e69104b97..e8b698f3ff 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -27,13 +27,16 @@ from six import string_types import yaml -from twisted.enterprise import adbapi from twisted.internet import defer, reactor +import synapse from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig -from synapse.logging.context import PreserveLoggingContext -from synapse.storage._base import LoggingTransaction +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, + run_in_background, +) from synapse.storage.data_stores.main.client_ips import ClientIpBackgroundUpdateStore from synapse.storage.data_stores.main.deviceinbox import ( DeviceInboxBackgroundUpdateStore, @@ -61,6 +64,7 @@ from synapse.storage.database import Database, make_conn from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.util import Clock +from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse_port_db") @@ -125,6 +129,13 @@ APPEND_ONLY_TABLES = [ ] +# Error returned by the run function. Used at the top-level part of the script to +# handle errors and return codes. +end_error = None +# The exec_info for the error, if any. If error is defined but not exec_info the script +# will show only the error message without the stacktrace, if exec_info is defined but +# not the error then the script will show nothing outside of what's printed in the run +# function. If both are defined, the script will print both the error and the stacktrace. end_error_exec_info = None @@ -177,6 +188,7 @@ class MockHomeserver: self.clock = Clock(reactor) self.config = config self.hostname = config.server_name + self.version_string = "Synapse/"+get_version_string(synapse) def get_clock(self): return self.clock @@ -189,11 +201,10 @@ class Porter(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) - @defer.inlineCallbacks - def setup_table(self, table): + async def setup_table(self, table): if table in APPEND_ONLY_TABLES: # It's safe to just carry on inserting. - row = yield self.postgres_store.db.simple_select_one( + row = await self.postgres_store.db.simple_select_one( table="port_from_sqlite3", keyvalues={"table_name": table}, retcols=("forward_rowid", "backward_rowid"), @@ -207,10 +218,10 @@ class Porter(object): forward_chunk, already_ported, total_to_port, - ) = yield self._setup_sent_transactions() + ) = await self._setup_sent_transactions() backward_chunk = 0 else: - yield self.postgres_store.db.simple_insert( + await self.postgres_store.db.simple_insert( table="port_from_sqlite3", values={ "table_name": table, @@ -227,7 +238,7 @@ class Porter(object): backward_chunk = row["backward_rowid"] if total_to_port is None: - already_ported, total_to_port = yield self._get_total_count_to_port( + already_ported, total_to_port = await self._get_total_count_to_port( table, forward_chunk, backward_chunk ) else: @@ -238,9 +249,9 @@ class Porter(object): ) txn.execute("TRUNCATE %s CASCADE" % (table,)) - yield self.postgres_store.execute(delete_all) + await self.postgres_store.execute(delete_all) - yield self.postgres_store.db.simple_insert( + await self.postgres_store.db.simple_insert( table="port_from_sqlite3", values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, ) @@ -248,16 +259,13 @@ class Porter(object): forward_chunk = 1 backward_chunk = 0 - already_ported, total_to_port = yield self._get_total_count_to_port( + already_ported, total_to_port = await self._get_total_count_to_port( table, forward_chunk, backward_chunk ) - defer.returnValue( - (table, already_ported, total_to_port, forward_chunk, backward_chunk) - ) + return table, already_ported, total_to_port, forward_chunk, backward_chunk - @defer.inlineCallbacks - def handle_table( + async def handle_table( self, table, postgres_size, table_size, forward_chunk, backward_chunk ): logger.info( @@ -275,7 +283,7 @@ class Porter(object): self.progress.add_table(table, postgres_size, table_size) if table == "event_search": - yield self.handle_search_table( + await self.handle_search_table( postgres_size, table_size, forward_chunk, backward_chunk ) return @@ -294,7 +302,7 @@ class Porter(object): if table == "user_directory_stream_pos": # We need to make sure there is a single row, `(X, null), as that is # what synapse expects to be there. - yield self.postgres_store.db.simple_insert( + await self.postgres_store.db.simple_insert( table=table, values={"stream_id": None} ) self.progress.update(table, table_size) # Mark table as done @@ -335,7 +343,7 @@ class Porter(object): return headers, forward_rows, backward_rows - headers, frows, brows = yield self.sqlite_store.db.runInteraction( + headers, frows, brows = await self.sqlite_store.db.runInteraction( "select", r ) @@ -361,7 +369,7 @@ class Porter(object): }, ) - yield self.postgres_store.execute(insert) + await self.postgres_store.execute(insert) postgres_size += len(rows) @@ -369,8 +377,7 @@ class Porter(object): else: return - @defer.inlineCallbacks - def handle_search_table( + async def handle_search_table( self, postgres_size, table_size, forward_chunk, backward_chunk ): select = ( @@ -390,7 +397,7 @@ class Porter(object): return headers, rows - headers, rows = yield self.sqlite_store.db.runInteraction("select", r) + headers, rows = await self.sqlite_store.db.runInteraction("select", r) if rows: forward_chunk = rows[-1][0] + 1 @@ -438,7 +445,7 @@ class Porter(object): }, ) - yield self.postgres_store.execute(insert) + await self.postgres_store.execute(insert) postgres_size += len(rows) @@ -476,11 +483,10 @@ class Porter(object): return store - @defer.inlineCallbacks - def run_background_updates_on_postgres(self): + async def run_background_updates_on_postgres(self): # Manually apply all background updates on the PostgreSQL database. postgres_ready = ( - yield self.postgres_store.db.updates.has_completed_background_updates() + await self.postgres_store.db.updates.has_completed_background_updates() ) if not postgres_ready: @@ -489,13 +495,20 @@ class Porter(object): self.progress.set_state("Running background updates on PostgreSQL") while not postgres_ready: - yield self.postgres_store.db.updates.do_next_background_update(100) - postgres_ready = yield ( + await self.postgres_store.db.updates.do_next_background_update(100) + postgres_ready = await ( self.postgres_store.db.updates.has_completed_background_updates() ) - @defer.inlineCallbacks - def run(self): + async def run(self): + """Ports the SQLite database to a PostgreSQL database. + + When a fatal error is met, its message is assigned to the global "end_error" + variable. When this error comes with a stacktrace, its exec_info is assigned to + the global "end_error_exec_info" variable. + """ + global end_error + try: # we allow people to port away from outdated versions of sqlite. self.sqlite_store = self.build_db_store( @@ -505,21 +518,21 @@ class Porter(object): # Check if all background updates are done, abort if not. updates_complete = ( - yield self.sqlite_store.db.updates.has_completed_background_updates() + await self.sqlite_store.db.updates.has_completed_background_updates() ) if not updates_complete: - sys.stderr.write( + end_error = ( "Pending background updates exist in the SQLite3 database." " Please start Synapse again and wait until every update has finished" " before running this script.\n" ) - defer.returnValue(None) + return self.postgres_store = self.build_db_store( self.hs_config.get_single_database() ) - yield self.run_background_updates_on_postgres() + await self.run_background_updates_on_postgres() self.progress.set_state("Creating port tables") @@ -547,22 +560,22 @@ class Porter(object): ) try: - yield self.postgres_store.db.runInteraction("alter_table", alter_table) + await self.postgres_store.db.runInteraction("alter_table", alter_table) except Exception: # On Error Resume Next pass - yield self.postgres_store.db.runInteraction( + await self.postgres_store.db.runInteraction( "create_port_table", create_port_table ) # Step 2. Get tables. self.progress.set_state("Fetching tables") - sqlite_tables = yield self.sqlite_store.db.simple_select_onecol( + sqlite_tables = await self.sqlite_store.db.simple_select_onecol( table="sqlite_master", keyvalues={"type": "table"}, retcol="name" ) - postgres_tables = yield self.postgres_store.db.simple_select_onecol( + postgres_tables = await self.postgres_store.db.simple_select_onecol( table="information_schema.tables", keyvalues={}, retcol="distinct table_name", @@ -573,28 +586,34 @@ class Porter(object): # Step 3. Figure out what still needs copying self.progress.set_state("Checking on port progress") - setup_res = yield defer.gatherResults( - [ - self.setup_table(table) - for table in tables - if table not in ["schema_version", "applied_schema_deltas"] - and not table.startswith("sqlite_") - ], - consumeErrors=True, + setup_res = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(self.setup_table, table) + for table in tables + if table not in ["schema_version", "applied_schema_deltas"] + and not table.startswith("sqlite_") + ], + consumeErrors=True, + ) ) # Step 4. Do the copying. self.progress.set_state("Copying to postgres") - yield defer.gatherResults( - [self.handle_table(*res) for res in setup_res], consumeErrors=True + await make_deferred_yieldable( + defer.gatherResults( + [run_in_background(self.handle_table, *res) for res in setup_res], + consumeErrors=True, + ) ) # Step 5. Do final post-processing - yield self._setup_state_group_id_seq() + await self._setup_state_group_id_seq() self.progress.done() - except Exception: + except Exception as e: global end_error_exec_info + end_error = e end_error_exec_info = sys.exc_info() logger.exception("") finally: @@ -634,8 +653,7 @@ class Porter(object): return outrows - @defer.inlineCallbacks - def _setup_sent_transactions(self): + async def _setup_sent_transactions(self): # Only save things from the last day yesterday = int(time.time() * 1000) - 86400000 @@ -656,7 +674,7 @@ class Porter(object): return headers, [r for r in rows if r[ts_ind] < yesterday] - headers, rows = yield self.sqlite_store.db.runInteraction("select", r) + headers, rows = await self.sqlite_store.db.runInteraction("select", r) rows = self._convert_rows("sent_transactions", headers, rows) @@ -669,7 +687,7 @@ class Porter(object): txn, "sent_transactions", headers[1:], rows ) - yield self.postgres_store.execute(insert) + await self.postgres_store.execute(insert) else: max_inserted_rowid = 0 @@ -686,10 +704,10 @@ class Porter(object): else: return 1 - next_chunk = yield self.sqlite_store.execute(get_start_id) + next_chunk = await self.sqlite_store.execute(get_start_id) next_chunk = max(max_inserted_rowid + 1, next_chunk) - yield self.postgres_store.db.simple_insert( + await self.postgres_store.db.simple_insert( table="port_from_sqlite3", values={ "table_name": "sent_transactions", @@ -705,46 +723,49 @@ class Porter(object): (size,) = txn.fetchone() return int(size) - remaining_count = yield self.sqlite_store.execute(get_sent_table_size) + remaining_count = await self.sqlite_store.execute(get_sent_table_size) total_count = remaining_count + inserted_rows - defer.returnValue((next_chunk, inserted_rows, total_count)) + return next_chunk, inserted_rows, total_count - @defer.inlineCallbacks - def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): - frows = yield self.sqlite_store.execute_sql( + async def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk): + frows = await self.sqlite_store.execute_sql( "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk ) - brows = yield self.sqlite_store.execute_sql( + brows = await self.sqlite_store.execute_sql( "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk ) - defer.returnValue(frows[0][0] + brows[0][0]) + return frows[0][0] + brows[0][0] - @defer.inlineCallbacks - def _get_already_ported_count(self, table): - rows = yield self.postgres_store.execute_sql( + async def _get_already_ported_count(self, table): + rows = await self.postgres_store.execute_sql( "SELECT count(*) FROM %s" % (table,) ) - defer.returnValue(rows[0][0]) + return rows[0][0] - @defer.inlineCallbacks - def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): - remaining, done = yield defer.gatherResults( - [ - self._get_remaining_count_to_port(table, forward_chunk, backward_chunk), - self._get_already_ported_count(table), - ], - consumeErrors=True, + async def _get_total_count_to_port(self, table, forward_chunk, backward_chunk): + remaining, done = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._get_remaining_count_to_port, + table, + forward_chunk, + backward_chunk, + ), + run_in_background(self._get_already_ported_count, table), + ], + ) ) remaining = int(remaining) if remaining else 0 done = int(done) if done else 0 - defer.returnValue((done, remaining + done)) + return done, remaining + done def _setup_state_group_id_seq(self): def r(txn): @@ -1010,7 +1031,12 @@ if __name__ == "__main__": hs_config=config, ) - reactor.callWhenRunning(porter.run) + @defer.inlineCallbacks + def run(): + with LoggingContext("synapse_port_db_run"): + yield defer.ensureDeferred(porter.run()) + + reactor.callWhenRunning(run) reactor.run() @@ -1019,7 +1045,11 @@ if __name__ == "__main__": else: start() - if end_error_exec_info: - exc_type, exc_value, exc_traceback = end_error_exec_info - traceback.print_exception(exc_type, exc_value, exc_traceback) + if end_error: + if end_error_exec_info: + exc_type, exc_value, exc_traceback = end_error_exec_info + traceback.print_exception(exc_type, exc_value, exc_traceback) + + sys.stderr.write(end_error) + sys.exit(5) From 837f62266b845cce9797fbe989a7816d4f1fadff Mon Sep 17 00:00:00 2001 From: Ivan Vilata-i-Balaguer Date: Wed, 22 Jan 2020 02:32:52 -0500 Subject: [PATCH 032/213] Avoid attribute error when `password_config` present but empty (#6753) The old statement returned `None` for such a `password_config` (like the one created on first run), thus retrieval of the `pepper` key failed with `AttributeError`. Fixes #5315 Signed-off-by: Ivan Vilata i Balaguer --- changelog.d/6753.bugfix | 1 + scripts/hash_password | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6753.bugfix diff --git a/changelog.d/6753.bugfix b/changelog.d/6753.bugfix new file mode 100644 index 0000000000..5dfde793e1 --- /dev/null +++ b/changelog.d/6753.bugfix @@ -0,0 +1 @@ +Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. diff --git a/scripts/hash_password b/scripts/hash_password index a1eb0769da..a30767f758 100755 --- a/scripts/hash_password +++ b/scripts/hash_password @@ -52,7 +52,7 @@ if __name__ == "__main__": if "config" in args and args.config: config = yaml.safe_load(args.config) bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds) - password_config = config.get("password_config", {}) + password_config = config.get("password_config", None) or {} password_pepper = password_config.get("pepper", password_pepper) password = args.password From 2093f83ea045d8a3fc6daa0c793da9b17237dc1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Jan 2020 10:36:48 +0000 Subject: [PATCH 033/213] Remove unused CI docker compose files (#6754) These now exist in the pipelines repo. --- .buildkite/docker-compose.py35.pg95.yaml | 22 ---------------------- .buildkite/docker-compose.py37.pg11.yaml | 22 ---------------------- .buildkite/docker-compose.py37.pg95.yaml | 22 ---------------------- changelog.d/6754.misc | 1 + 4 files changed, 1 insertion(+), 66 deletions(-) delete mode 100644 .buildkite/docker-compose.py35.pg95.yaml delete mode 100644 .buildkite/docker-compose.py37.pg11.yaml delete mode 100644 .buildkite/docker-compose.py37.pg95.yaml create mode 100644 changelog.d/6754.misc diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml deleted file mode 100644 index 43237b7775..0000000000 --- a/.buildkite/docker-compose.py35.pg95.yaml +++ /dev/null @@ -1,22 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.5 - environment: - POSTGRES_PASSWORD: postgres - command: -c fsync=off - - testenv: - image: python:3.5 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /src - volumes: - - ..:/src diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml deleted file mode 100644 index b767228147..0000000000 --- a/.buildkite/docker-compose.py37.pg11.yaml +++ /dev/null @@ -1,22 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:11 - environment: - POSTGRES_PASSWORD: postgres - command: -c fsync=off - - testenv: - image: python:3.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /src - volumes: - - ..:/src diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml deleted file mode 100644 index 02fcd28304..0000000000 --- a/.buildkite/docker-compose.py37.pg95.yaml +++ /dev/null @@ -1,22 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.5 - environment: - POSTGRES_PASSWORD: postgres - command: -c fsync=off - - testenv: - image: python:3.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /src - volumes: - - ..:/src diff --git a/changelog.d/6754.misc b/changelog.d/6754.misc new file mode 100644 index 0000000000..0a955e47e6 --- /dev/null +++ b/changelog.d/6754.misc @@ -0,0 +1 @@ +Remove unused CI docker compose files. From 5d7a6ad2238981646b2ae7b4071d8715281d181a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 22 Jan 2020 10:37:00 +0000 Subject: [PATCH 034/213] Allow streaming cache invalidate all to workers. (#6749) --- changelog.d/6749.misc | 1 + docs/tcp_replication.md | 5 ++++ synapse/replication/slave/storage/_base.py | 7 +++++- synapse/replication/tcp/streams/_base.py | 26 +++++++++++++++++---- synapse/storage/_base.py | 18 +++++++++++---- synapse/storage/data_stores/main/cache.py | 27 ++++++++++++++++++---- 6 files changed, 69 insertions(+), 15 deletions(-) create mode 100644 changelog.d/6749.misc diff --git a/changelog.d/6749.misc b/changelog.d/6749.misc new file mode 100644 index 0000000000..9fa13cb1d4 --- /dev/null +++ b/changelog.d/6749.misc @@ -0,0 +1 @@ +Allow streaming cache 'invalidate all' to workers. diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index a0b1d563ff..e3a4634b14 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -254,6 +254,11 @@ and they key to invalidate. For example: > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251] +Alternatively, an entire cache can be invalidated by sending down a `null` +instead of the key. For example: + + > RDATA caches 550953772 ["get_user_by_id", null, 1550574873252] + However, there are times when a number of caches need to be invalidated at the same time with the same key. To reduce traffic we batch those invalidations into a single poke by defining a special cache name that diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 704282c800..f45cbd37a0 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -66,11 +66,16 @@ class BaseSlavedStore(SQLBaseStore): self._cache_id_gen.advance(token) for row in rows: if row.cache_func == CURRENT_STATE_CACHE_NAME: + if row.keys is None: + raise Exception( + "Can't send an 'invalidate all' for current state cache" + ) + room_id = row.keys[0] members_changed = set(row.keys[1:]) self._invalidate_state_caches(room_id, members_changed) else: - self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys)) + self._attempt_to_invalidate_cache(row.cache_func, row.keys) def _invalidate_cache_and_stream(self, txn, cache_func, keys): txn.call_after(cache_func.invalidate, keys) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index e03e77199b..a8d568b14a 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -17,7 +17,9 @@ import itertools import logging from collections import namedtuple -from typing import Any +from typing import Any, List, Optional + +import attr logger = logging.getLogger(__name__) @@ -65,10 +67,24 @@ PushersStreamRow = namedtuple( "PushersStreamRow", ("user_id", "app_id", "pushkey", "deleted"), # str # str # str # bool ) -CachesStreamRow = namedtuple( - "CachesStreamRow", - ("cache_func", "keys", "invalidation_ts"), # str # list(str) # int -) + + +@attr.s +class CachesStreamRow: + """Stream to inform workers they should invalidate their cache. + + Attributes: + cache_func: Name of the cached function. + keys: The entry in the cache to invalidate. If None then will + invalidate all. + invalidation_ts: Timestamp of when the invalidation took place. + """ + + cache_func = attr.ib(type=str) + keys = attr.ib(type=Optional[List[Any]]) + invalidation_ts = attr.ib(type=int) + + PublicRoomsStreamRow = namedtuple( "PublicRoomsStreamRow", ( diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 3bb9381663..da3b99f93d 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -17,6 +17,7 @@ import logging import random from abc import ABCMeta +from typing import Any, Optional from six import PY2 from six.moves import builtins @@ -26,7 +27,7 @@ from canonicaljson import json from synapse.storage.database import LoggingTransaction # noqa: F401 from synapse.storage.database import make_in_list_sql_clause # noqa: F401 from synapse.storage.database import Database -from synapse.types import get_domain_from_id +from synapse.types import Collection, get_domain_from_id logger = logging.getLogger(__name__) @@ -63,17 +64,24 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,)) - def _attempt_to_invalidate_cache(self, cache_name, key): + def _attempt_to_invalidate_cache( + self, cache_name: str, key: Optional[Collection[Any]] + ): """Attempts to invalidate the cache of the given name, ignoring if the cache doesn't exist. Mainly used for invalidating caches on workers, where they may not have the cache. Args: - cache_name (str) - key (tuple) + cache_name + key: Entry to invalidate. If None then invalidates the entire + cache. """ + try: - getattr(self, cache_name).invalidate(key) + if key is None: + getattr(self, cache_name).invalidate_all() + else: + getattr(self, cache_name).invalidate(tuple(key)) except AttributeError: # We probably haven't pulled in the cache in this worker, # which is fine. diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py index bf91512daf..afa2b41c98 100644 --- a/synapse/storage/data_stores/main/cache.py +++ b/synapse/storage/data_stores/main/cache.py @@ -16,6 +16,7 @@ import itertools import logging +from typing import Any, Iterable, Optional from twisted.internet import defer @@ -43,6 +44,14 @@ class CacheInvalidationStore(SQLBaseStore): txn.call_after(cache_func.invalidate, keys) self._send_invalidation_to_replication(txn, cache_func.__name__, keys) + def _invalidate_all_cache_and_stream(self, txn, cache_func): + """Invalidates the entire cache and adds it to the cache stream so slaves + will know to invalidate their caches. + """ + + txn.call_after(cache_func.invalidate_all) + self._send_invalidation_to_replication(txn, cache_func.__name__, None) + def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed): """Special case invalidation of caches based on current state. @@ -73,17 +82,24 @@ class CacheInvalidationStore(SQLBaseStore): txn, CURRENT_STATE_CACHE_NAME, [room_id] ) - def _send_invalidation_to_replication(self, txn, cache_name, keys): + def _send_invalidation_to_replication( + self, txn, cache_name: str, keys: Optional[Iterable[Any]] + ): """Notifies replication that given cache has been invalidated. Note that this does *not* invalidate the cache locally. Args: txn - cache_name (str) - keys (iterable[str]) + cache_name + keys: Entry to invalidate. If None will invalidate all. """ + if cache_name == CURRENT_STATE_CACHE_NAME and keys is None: + raise Exception( + "Can't stream invalidate all with magic current state cache" + ) + if isinstance(self.database_engine, PostgresEngine): # get_next() returns a context manager which is designed to wrap # the transaction. However, we want to only get an ID when we want @@ -95,13 +111,16 @@ class CacheInvalidationStore(SQLBaseStore): txn.call_after(ctx.__exit__, None, None, None) txn.call_after(self.hs.get_notifier().on_new_replication_data) + if keys is not None: + keys = list(keys) + self.db.simple_insert_txn( txn, table="cache_invalidation_stream", values={ "stream_id": stream_id, "cache_func": cache_name, - "keys": list(keys), + "keys": keys, "invalidation_ts": self.clock.time_msec(), }, ) From 5e52d8563bdc0ab6667f0ec2571f35791720a40a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 Jan 2020 11:05:14 +0000 Subject: [PATCH 035/213] Allow monthly active user limiting support for worker mode, fixes #4639. (#6742) --- changelog.d/6742.bugfix | 1 + synapse/app/client_reader.py | 4 + synapse/app/event_creator.py | 4 + synapse/app/federation_reader.py | 4 + synapse/app/synchrotron.py | 4 + .../data_stores/main/monthly_active_users.py | 165 +++++++++--------- 6 files changed, 100 insertions(+), 82 deletions(-) create mode 100644 changelog.d/6742.bugfix diff --git a/changelog.d/6742.bugfix b/changelog.d/6742.bugfix new file mode 100644 index 0000000000..ca2687c8bb --- /dev/null +++ b/changelog.d/6742.bugfix @@ -0,0 +1 @@ +Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 3edfe19567..ca96da6a4a 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -62,6 +62,9 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.rest.client.versions import VersionsRestServlet from synapse.server import HomeServer +from synapse.storage.data_stores.main.monthly_active_users import ( + MonthlyActiveUsersWorkerStore, +) from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -85,6 +88,7 @@ class ClientReaderSlavedStore( SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, + MonthlyActiveUsersWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index d0ddbe38fc..58e5b354f6 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -56,6 +56,9 @@ from synapse.rest.client.v1.room import ( RoomStateEventRestServlet, ) from synapse.server import HomeServer +from synapse.storage.data_stores.main.monthly_active_users import ( + MonthlyActiveUsersWorkerStore, +) from synapse.storage.data_stores.main.user_directory import UserDirectoryStore from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole @@ -81,6 +84,7 @@ class EventCreatorSlavedStore( SlavedEventStore, SlavedRegistrationStore, RoomStore, + MonthlyActiveUsersWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 311523e0ed..1f1cea1416 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -46,6 +46,9 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer +from synapse.storage.data_stores.main.monthly_active_users import ( + MonthlyActiveUsersWorkerStore, +) from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -66,6 +69,7 @@ class FederationReaderSlavedStore( RoomStore, DirectoryStore, SlavedTransactionStore, + MonthlyActiveUsersWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 3218da07bd..8982c0676e 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -54,6 +54,9 @@ from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet from synapse.rest.client.v1.room import RoomInitialSyncRestServlet from synapse.rest.client.v2_alpha import sync from synapse.server import HomeServer +from synapse.storage.data_stores.main.monthly_active_users import ( + MonthlyActiveUsersWorkerStore, +) from synapse.storage.data_stores.main.presence import UserPresenceState from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole @@ -77,6 +80,7 @@ class SynchrotronSlavedStore( SlavedEventStore, SlavedClientIpStore, RoomStore, + MonthlyActiveUsersWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py index 27158534cb..89a41542a3 100644 --- a/synapse/storage/data_stores/main/monthly_active_users.py +++ b/synapse/storage/data_stores/main/monthly_active_users.py @@ -27,12 +27,76 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 60 * 60 * 1000 -class MonthlyActiveUsersStore(SQLBaseStore): +class MonthlyActiveUsersWorkerStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): - super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) + super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs) self._clock = hs.get_clock() self.hs = hs + + @cached(num_args=0) + def get_monthly_active_count(self): + """Generates current count of monthly active users + + Returns: + Defered[int]: Number of current monthly active users + """ + + def _count_users(txn): + sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users" + + txn.execute(sql) + (count,) = txn.fetchone() + return count + + return self.db.runInteraction("count_users", _count_users) + + @defer.inlineCallbacks + def get_registered_reserved_users(self): + """Of the reserved threepids defined in config, which are associated + with registered users? + + Returns: + Defered[list]: Real reserved users + """ + users = [] + + for tp in self.hs.config.mau_limits_reserved_threepids[ + : self.hs.config.max_mau_value + ]: + user_id = yield self.hs.get_datastore().get_user_id_by_threepid( + tp["medium"], tp["address"] + ) + if user_id: + users.append(user_id) + + return users + + @cached(num_args=1) + def user_last_seen_monthly_active(self, user_id): + """ + Checks if a given user is part of the monthly active user group + Arguments: + user_id (str): user to add/update + Return: + Deferred[int] : timestamp since last seen, None if never seen + + """ + + return self.db.simple_select_one_onecol( + table="monthly_active_users", + keyvalues={"user_id": user_id}, + retcol="timestamp", + allow_none=True, + desc="user_last_seen_monthly_active", + ) + + +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): + def __init__(self, database: Database, db_conn, hs): + super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) + # Do not add more reserved users than the total allowable number + # cur = LoggingTransaction( self.db.new_transaction( db_conn, "initialise_mau_threepids", @@ -146,57 +210,22 @@ class MonthlyActiveUsersStore(SQLBaseStore): txn.execute(sql, query_args) + # It seems poor to invalidate the whole cache, Postgres supports + # 'Returning' which would allow me to invalidate only the + # specific users, but sqlite has no way to do this and instead + # I would need to SELECT and the DELETE which without locking + # is racy. + # Have resolved to invalidate the whole cache for now and do + # something about it if and when the perf becomes significant + self._invalidate_all_cache_and_stream( + txn, self.user_last_seen_monthly_active + ) + self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) + reserved_users = yield self.get_registered_reserved_users() yield self.db.runInteraction( "reap_monthly_active_users", _reap_users, reserved_users ) - # It seems poor to invalidate the whole cache, Postgres supports - # 'Returning' which would allow me to invalidate only the - # specific users, but sqlite has no way to do this and instead - # I would need to SELECT and the DELETE which without locking - # is racy. - # Have resolved to invalidate the whole cache for now and do - # something about it if and when the perf becomes significant - self.user_last_seen_monthly_active.invalidate_all() - self.get_monthly_active_count.invalidate_all() - - @cached(num_args=0) - def get_monthly_active_count(self): - """Generates current count of monthly active users - - Returns: - Defered[int]: Number of current monthly active users - """ - - def _count_users(txn): - sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users" - - txn.execute(sql) - (count,) = txn.fetchone() - return count - - return self.db.runInteraction("count_users", _count_users) - - @defer.inlineCallbacks - def get_registered_reserved_users(self): - """Of the reserved threepids defined in config, which are associated - with registered users? - - Returns: - Defered[list]: Real reserved users - """ - users = [] - - for tp in self.hs.config.mau_limits_reserved_threepids[ - : self.hs.config.max_mau_value - ]: - user_id = yield self.hs.get_datastore().get_user_id_by_threepid( - tp["medium"], tp["address"] - ) - if user_id: - users.append(user_id) - - return users @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): @@ -222,23 +251,9 @@ class MonthlyActiveUsersStore(SQLBaseStore): "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - user_in_mau = self.user_last_seen_monthly_active.cache.get( - (user_id,), None, update_metrics=False - ) - if user_in_mau is None: - self.get_monthly_active_count.invalidate(()) - - self.user_last_seen_monthly_active.invalidate((user_id,)) - def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member - Note that, after calling this method, it will generally be necessary - to invalidate the caches on user_last_seen_monthly_active and - get_monthly_active_count. We can't do that here, because we are running - in a database thread rather than the main thread, and we can't call - txn.call_after because txn may not be a LoggingTransaction. - We consciously do not call is_support_txn from this method because it is not possible to cache the response. is_support_txn will be false in almost all cases, so it seems reasonable to call it only for @@ -269,27 +284,13 @@ class MonthlyActiveUsersStore(SQLBaseStore): values={"timestamp": int(self._clock.time_msec())}, ) - return is_insert - - @cached(num_args=1) - def user_last_seen_monthly_active(self, user_id): - """ - Checks if a given user is part of the monthly active user group - Arguments: - user_id (str): user to add/update - Return: - Deferred[int] : timestamp since last seen, None if never seen - - """ - - return self.db.simple_select_one_onecol( - table="monthly_active_users", - keyvalues={"user_id": user_id}, - retcol="timestamp", - allow_none=True, - desc="user_last_seen_monthly_active", + self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) + self._invalidate_cache_and_stream( + txn, self.user_last_seen_monthly_active, (user_id,) ) + return is_insert + @defer.inlineCallbacks def populate_monthly_active_users(self, user_id): """Checks on the state of monthly active user limits and optionally From aa9b00fb2f9a7718d67fb11621a83035492ed9fb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 22 Jan 2020 11:05:50 +0000 Subject: [PATCH 036/213] Fix and add test to deprecated quarantine media admin api (#6756) --- changelog.d/6756.feature | 1 + synapse/rest/admin/media.py | 2 +- tests/rest/admin/test_admin.py | 15 +++++++++++---- 3 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6756.feature diff --git a/changelog.d/6756.feature b/changelog.d/6756.feature new file mode 100644 index 0000000000..6328c868f2 --- /dev/null +++ b/changelog.d/6756.feature @@ -0,0 +1 @@ +Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. \ No newline at end of file diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 3a445d6eed..ee75095c0e 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -36,7 +36,7 @@ class QuarantineMediaInRoom(RestServlet): historical_admin_path_patterns("/room/(?P[^/]+)/media/quarantine") + # This path kept around for legacy reasons - historical_admin_path_patterns("/quarantine_media/(?P![^/]+)") + historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") ) def __init__(self, hs): diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index f3b4a31e21..af4d604e50 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -516,7 +516,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): ), ) - def test_quarantine_all_media_in_room(self): + def test_quarantine_all_media_in_room(self, override_url_template=None): self.register_user("room_admin", "pass", admin=True) admin_user_tok = self.login("room_admin", "pass") @@ -555,9 +555,12 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): ) # Quarantine all media in the room - url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote( - room_id - ) + if override_url_template: + url = override_url_template % urllib.parse.quote(room_id) + else: + url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote( + room_id + ) request, channel = self.make_request("POST", url, access_token=admin_user_tok,) self.render(request) self.pump(1.0) @@ -611,6 +614,10 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): ), ) + def test_quaraantine_all_media_in_room_deprecated_api_path(self): + # Perform the above test with the deprecated API path + self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s") + def test_quarantine_all_media_by_user(self): self.register_user("user_admin", "pass", admin=True) admin_user_tok = self.login("user_admin", "pass") From ed83c3a018714f20bb660c39e1a42e856eb3bedb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 12:27:42 +0000 Subject: [PATCH 037/213] Fix typo in _select_thumbnail --- synapse/rest/media/v1/thumbnail_resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index eee93b4313..d57480f761 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -331,7 +331,7 @@ class ThumbnailResource(DirectServeResource): ) ) if crop_info_list: - return min(crop_info_list2)[-1] + return min(crop_info_list)[-1] else: return min(crop_info_list2)[-1] else: From 67aa18e8dc81d6d5c0645fc330e049ed7d2e786e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 12:28:07 +0000 Subject: [PATCH 038/213] Add tests for thumbnailing --- tests/rest/media/v1/test_media_storage.py | 48 +++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index bc662b61db..d80a7daed3 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -17,7 +17,7 @@ import os import shutil import tempfile -from binascii import unhexlify +from binascii import unhexlify, hexlify from mock import Mock from six.moves.urllib import parse @@ -149,6 +149,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.media_repo = hs.get_media_repository_resource() self.download_resource = self.media_repo.children[b"download"] + self.thumbnail_resource = self.media_repo.children[b"thumbnail"] # smol png self.end_content = unhexlify( @@ -157,10 +158,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): b"0a2db40000000049454e44ae426082" ) + self.media_id = "example.com/12345" + def _req(self, content_disposition): request, channel = self.make_request( - "GET", "example.com/12345", shorthand=False + "GET", self.media_id, shorthand=False ) request.render(self.download_resource) self.pump() @@ -170,7 +173,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.assertEqual(len(self.fetches), 1) self.assertEqual(self.fetches[0][1], "example.com") self.assertEqual( - self.fetches[0][2], "/_matrix/media/v1/download/example.com/12345" + self.fetches[0][2], "/_matrix/media/v1/download/" + self.media_id ) self.assertEqual(self.fetches[0][3], {"allow_remote": "false"}) @@ -229,3 +232,42 @@ class MediaRepoTests(unittest.HomeserverTestCase): headers = channel.headers self.assertEqual(headers.getRawHeaders(b"Content-Type"), [b"image/png"]) self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) + + def test_thumbnail_crop(self): + expected_body = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000020000000200806" + b"000000737a7af40000001a49444154789cedc101010000008220" + b"ffaf6e484001000000ef0610200001194334ee0000000049454e" + b"44ae426082" + ) + + self._test_thumbnail("crop", expected_body) + + def test_thumbnail_scale(self): + expected_body = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000d49444154789c636060606000000005" + b"0001a5f645400000000049454e44ae426082" + ) + + self._test_thumbnail("scale", expected_body) + + def _test_thumbnail(self, method, expected_body): + params = "?width=32&height=32&method=" + method + request, channel = self.make_request( + "GET", self.media_id + params, shorthand=False + ) + request.render(self.thumbnail_resource) + self.pump() + + headers = { + b"Content-Length": [b"%d" % (len(self.end_content))], + b"Content-Type": [b"image/png"], + } + self.fetches[0][0].callback( + (self.end_content, (len(self.end_content), headers)) + ) + self.pump() + + self.assertEqual(channel.code, 200) + self.assertEqual(channel.result["body"], expected_body, channel.result["body"]) From d9a8728b1183c416a89a79856f3b5185386600b2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 12:30:49 +0000 Subject: [PATCH 039/213] Remove unused import --- tests/rest/media/v1/test_media_storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index d80a7daed3..6345cc7637 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -17,7 +17,7 @@ import os import shutil import tempfile -from binascii import unhexlify, hexlify +from binascii import unhexlify from mock import Mock from six.moves.urllib import parse From 6ae0c8db3335faa9b5f0e4407f7a4a3713c84062 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 12:38:18 +0000 Subject: [PATCH 040/213] Lint + changelog --- changelog.d/6764.misc | 1 + tests/rest/media/v1/test_media_storage.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6764.misc diff --git a/changelog.d/6764.misc b/changelog.d/6764.misc new file mode 100644 index 0000000000..8edd767405 --- /dev/null +++ b/changelog.d/6764.misc @@ -0,0 +1 @@ +Fixup `synapse.rest` to pass mypy. diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 6345cc7637..1809ceb839 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -162,9 +162,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): def _req(self, content_disposition): - request, channel = self.make_request( - "GET", self.media_id, shorthand=False - ) + request, channel = self.make_request("GET", self.media_id, shorthand=False) request.render(self.download_resource) self.pump() From 90a28fb475a29daa9e7a9ee7204f6f76cc8af441 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 22 Jan 2020 13:36:43 +0000 Subject: [PATCH 041/213] Admin API to list, filter and sort rooms (#6720) --- changelog.d/6720.feature | 1 + docs/admin_api/rooms.md | 173 ++++++++++ synapse/rest/admin/__init__.py | 3 +- synapse/rest/admin/_base.py | 15 + synapse/rest/admin/rooms.py | 82 +++++ synapse/rest/client/v2_alpha/_base.py | 2 +- synapse/storage/data_stores/main/room.py | 125 ++++++- tests/rest/admin/test_admin.py | 393 ++++++++++++++++++++++- 8 files changed, 787 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6720.feature create mode 100644 docs/admin_api/rooms.md diff --git a/changelog.d/6720.feature b/changelog.d/6720.feature new file mode 100644 index 0000000000..dfc1b74d62 --- /dev/null +++ b/changelog.d/6720.feature @@ -0,0 +1 @@ +Add a new admin API to list and filter rooms on the server. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md new file mode 100644 index 0000000000..082721ea95 --- /dev/null +++ b/docs/admin_api/rooms.md @@ -0,0 +1,173 @@ +# List Room API + +The List Room admin API allows server admins to get a list of rooms on their +server. There are various parameters available that allow for filtering and +sorting the returned list. This API supports pagination. + +## Parameters + +The following query parameters are available: + +* `from` - Offset in the returned list. Defaults to `0`. +* `limit` - Maximum amount of rooms to return. Defaults to `100`. +* `order_by` - The method in which to sort the returned list of rooms. Valid values are: + - `alphabetical` - Rooms are ordered alphabetically by room name. This is the default. + - `size` - Rooms are ordered by the number of members. Largest to smallest. +* `dir` - Direction of room order. Either `f` for forwards or `b` for backwards. Setting + this value to `b` will reverse the above sort order. Defaults to `f`. +* `search_term` - Filter rooms by their room name. Search term can be contained in any + part of the room name. Defaults to no filtering. + +The following fields are possible in the JSON response body: + +* `rooms` - An array of objects, each containing information about a room. + - Room objects contain the following fields: + - `room_id` - The ID of the room. + - `name` - The name of the room. + - `canonical_alias` - The canonical (main) alias address of the room. + - `joined_members` - How many users are currently in the room. +* `offset` - The current pagination offset in rooms. This parameter should be + used instead of `next_token` for room offset as `next_token` is + not intended to be parsed. +* `total_rooms` - The total number of rooms this query can return. Using this + and `offset`, you have enough information to know the current + progression through the list. +* `next_batch` - If this field is present, we know that there are potentially + more rooms on the server that did not all fit into this response. + We can use `next_batch` to get the "next page" of results. To do + so, simply repeat your request, setting the `from` parameter to + the value of `next_batch`. +* `prev_batch` - If this field is present, it is possible to paginate backwards. + Use `prev_batch` for the `from` value in the next request to + get the "previous page" of results. + +## Usage + +A standard request with no filtering: + +``` +GET /_synapse/admin/rooms + +{} +``` + +Response: + +``` +{ + "rooms": [ + { + "room_id": "!OGEhHVWSdvArJzumhm:matrix.org", + "name": "Matrix HQ", + "canonical_alias": "#matrix:matrix.org", + "joined_members": 8326 + }, + ... (8 hidden items) ... + { + "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org", + "name": "This Week In Matrix (TWIM)", + "canonical_alias": "#twim:matrix.org", + "joined_members": 314 + } + ], + "offset": 0, + "total_rooms": 10 +} +``` + +Filtering by room name: + +``` +GET /_synapse/admin/rooms?search_term=TWIM + +{} +``` + +Response: + +``` +{ + "rooms": [ + { + "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org", + "name": "This Week In Matrix (TWIM)", + "canonical_alias": "#twim:matrix.org", + "joined_members": 314 + } + ], + "offset": 0, + "total_rooms": 1 +} +``` + +Paginating through a list of rooms: + +``` +GET /_synapse/admin/rooms?order_by=size + +{} +``` + +Response: + +``` +{ + "rooms": [ + { + "room_id": "!OGEhHVWSdvArJzumhm:matrix.org", + "name": "Matrix HQ", + "canonical_alias": "#matrix:matrix.org", + "joined_members": 8326 + }, + ... (98 hidden items) ... + { + "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org", + "name": "This Week In Matrix (TWIM)", + "canonical_alias": "#twim:matrix.org", + "joined_members": 314 + } + ], + "offset": 0, + "total_rooms": 150 + "next_token": 100 +} +``` + +The presence of the `next_token` parameter tells us that there are more rooms +than returned in this request, and we need to make another request to get them. +To get the next batch of room results, we repeat our request, setting the `from` +parameter to the value of `next_token`. + +``` +GET /_synapse/admin/rooms?order_by=size&from=100 + +{} +``` + +Response: + +``` +{ + "rooms": [ + { + "room_id": "!mscvqgqpHYjBGDxNym:matrix.org", + "name": "Music Theory", + "canonical_alias": "#musictheory:matrix.org", + "joined_members": 127 + }, + ... (48 hidden items) ... + { + "room_id": "!twcBhHVdZlQWuuxBhN:termina.org.uk", + "name": "weechat-matrix", + "canonical_alias": "#weechat-matrix:termina.org.uk", + "joined_members": 137 + } + ], + "offset": 100, + "prev_batch": 0, + "total_rooms": 150 +} +``` + +Once the `next_token` parameter is no longer present, we know we've reached the +end of the list. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 2932fe2123..42cc2b062a 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -29,7 +29,7 @@ from synapse.rest.admin._base import ( from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet -from synapse.rest.admin.rooms import ShutdownRoomRestServlet +from synapse.rest.admin.rooms import ListRoomRestServlet, ShutdownRoomRestServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.users import ( AccountValidityRenewServlet, @@ -188,6 +188,7 @@ def register_servlets(hs, http_server): Register all the admin servlets. """ register_servlets_for_client_rest_resource(hs, http_server) + ListRoomRestServlet(hs).register(http_server) PurgeRoomServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index afd0647205..459482eb6d 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -40,6 +40,21 @@ def historical_admin_path_patterns(path_regex): ) +def admin_patterns(path_regex: str): + """Returns the list of patterns for an admin endpoint + + Args: + path_regex: The regex string to match. This should NOT have a ^ + as this will be prefixed. + + Returns: + A list of regex patterns. + """ + admin_prefix = "^/_synapse/admin/v1" + patterns = [re.compile(admin_prefix + path_regex)] + return patterns + + async def assert_requester_is_admin(auth, request): """Verify that the requester is an admin user diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f7cc5e9be9..f9b8c0a4f0 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -15,15 +15,20 @@ import logging from synapse.api.constants import Membership +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, + parse_integer, parse_json_object_from_request, + parse_string, ) from synapse.rest.admin._base import ( + admin_patterns, assert_user_is_admin, historical_admin_path_patterns, ) +from synapse.storage.data_stores.main.room import RoomSortOrder from synapse.types import create_requester from synapse.util.async_helpers import maybe_awaitable @@ -155,3 +160,80 @@ class ShutdownRoomRestServlet(RestServlet): "new_room_id": new_room_id, }, ) + + +class ListRoomRestServlet(RestServlet): + """ + List all rooms that are known to the homeserver. Results are returned + in a dictionary containing room information. Supports pagination. + """ + + PATTERNS = admin_patterns("/rooms") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + self.admin_handler = hs.get_handlers().admin_handler + + async def on_GET(self, request): + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + # Extract query parameters + start = parse_integer(request, "from", default=0) + limit = parse_integer(request, "limit", default=100) + order_by = parse_string(request, "order_by", default="alphabetical") + if order_by not in ( + RoomSortOrder.ALPHABETICAL.value, + RoomSortOrder.SIZE.value, + ): + raise SynapseError( + 400, + "Unknown value for order_by: %s" % (order_by,), + errcode=Codes.INVALID_PARAM, + ) + + search_term = parse_string(request, "search_term") + if search_term == "": + raise SynapseError( + 400, + "search_term cannot be an empty string", + errcode=Codes.INVALID_PARAM, + ) + + direction = parse_string(request, "dir", default="f") + if direction not in ("f", "b"): + raise SynapseError( + 400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM + ) + + reverse_order = True if direction == "b" else False + + # Return list of rooms according to parameters + rooms, total_rooms = await self.store.get_rooms_paginate( + start, limit, order_by, reverse_order, search_term + ) + response = { + # next_token should be opaque, so return a value the client can parse + "offset": start, + "rooms": rooms, + "total_rooms": total_rooms, + } + + # Are there more rooms to paginate through after this? + if (start + limit) < total_rooms: + # There are. Calculate where the query should start from next time + # to get the next part of the list + response["next_batch"] = start + limit + + # Is it possible to paginate backwards? Check if we currently have an + # offset + if start > 0: + if start > limit: + # Going back one iteration won't take us to the start. + # Calculate new offset + response["prev_batch"] = start - limit + else: + response["prev_batch"] = 0 + + return 200, response diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 2a3f4dd58f..bc11b4dda4 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -32,7 +32,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): Args: path_regex (str): The regex string to match. This should NOT have a ^ - as this will be prefixed. + as this will be prefixed. Returns: SRE_Pattern """ diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 49bab62be3..d968803ad2 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -18,7 +18,8 @@ import collections import logging import re from abc import abstractmethod -from typing import List, Optional, Tuple +from enum import Enum +from typing import Any, Dict, List, Optional, Tuple from six import integer_types @@ -46,6 +47,18 @@ RatelimitOverride = collections.namedtuple( ) +class RoomSortOrder(Enum): + """ + Enum to define the sorting method used when returning rooms with get_rooms_paginate + + ALPHABETICAL = sort rooms alphabetically by name + SIZE = sort rooms by membership size, highest to lowest + """ + + ALPHABETICAL = "alphabetical" + SIZE = "size" + + class RoomWorkerStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): super(RoomWorkerStore, self).__init__(database, db_conn, hs) @@ -281,6 +294,116 @@ class RoomWorkerStore(SQLBaseStore): desc="is_room_blocked", ) + async def get_rooms_paginate( + self, + start: int, + limit: int, + order_by: RoomSortOrder, + reverse_order: bool, + search_term: Optional[str], + ) -> Tuple[List[Dict[str, Any]], int]: + """Function to retrieve a paginated list of rooms as json. + + Args: + start: offset in the list + limit: maximum amount of rooms to retrieve + order_by: the sort order of the returned list + reverse_order: whether to reverse the room list + search_term: a string to filter room names by + Returns: + A list of room dicts and an integer representing the total number of + rooms that exist given this query + """ + # Filter room names by a string + where_statement = "" + if search_term: + where_statement = "WHERE state.name LIKE ?" + + # Our postgres db driver converts ? -> %s in SQL strings as that's the + # placeholder for postgres. + # HOWEVER, if you put a % into your SQL then everything goes wibbly. + # To get around this, we're going to surround search_term with %'s + # before giving it to the database in python instead + search_term = "%" + search_term + "%" + + # Set ordering + if RoomSortOrder(order_by) == RoomSortOrder.SIZE: + order_by_column = "curr.joined_members" + order_by_asc = False + elif RoomSortOrder(order_by) == RoomSortOrder.ALPHABETICAL: + # Sort alphabetically + order_by_column = "state.name" + order_by_asc = True + else: + raise StoreError( + 500, "Incorrect value for order_by provided: %s" % order_by + ) + + # Whether to return the list in reverse order + if reverse_order: + # Flip the boolean + order_by_asc = not order_by_asc + + # Create one query for getting the limited number of events that the user asked + # for, and another query for getting the total number of events that could be + # returned. Thus allowing us to see if there are more events to paginate through + info_sql = """ + SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members + FROM room_stats_state state + INNER JOIN room_stats_current curr USING (room_id) + %s + ORDER BY %s %s + LIMIT ? + OFFSET ? + """ % ( + where_statement, + order_by_column, + "ASC" if order_by_asc else "DESC", + ) + + # Use a nested SELECT statement as SQL can't count(*) with an OFFSET + count_sql = """ + SELECT count(*) FROM ( + SELECT room_id FROM room_stats_state state + %s + ) AS get_room_ids + """ % ( + where_statement, + ) + + def _get_rooms_paginate_txn(txn): + # Execute the data query + sql_values = (limit, start) + if search_term: + # Add the search term into the WHERE clause + sql_values = (search_term,) + sql_values + txn.execute(info_sql, sql_values) + + # Refactor room query data into a structured dictionary + rooms = [] + for room in txn: + rooms.append( + { + "room_id": room[0], + "name": room[1], + "canonical_alias": room[2], + "joined_members": room[3], + } + ) + + # Execute the count query + + # Add the search term into the WHERE clause if present + sql_values = (search_term,) if search_term else () + txn.execute(count_sql, sql_values) + + room_count = txn.fetchone() + return rooms, room_count[0] + + return await self.db.runInteraction( + "get_rooms_paginate", _get_rooms_paginate_txn, + ) + @cachedInlineCallbacks(max_entries=10000) def get_ratelimit_for_user(self, user_id): """Check if there are any overrides for ratelimiting for the given diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index af4d604e50..0342aed416 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -17,6 +17,7 @@ import json import os import urllib.parse from binascii import unhexlify +from typing import List, Optional from mock import Mock @@ -26,7 +27,7 @@ import synapse.rest.admin from synapse.http.server import JsonResource from synapse.logging.context import make_deferred_yieldable from synapse.rest.admin import VersionServlet -from synapse.rest.client.v1 import events, login, room +from synapse.rest.client.v1 import directory, events, login, room from synapse.rest.client.v2_alpha import groups from tests import unittest @@ -468,9 +469,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): ) # Extract media ID from the response - server_name_and_media_id = response["content_uri"][ - 6: - ] # Cut off the 'mxc://' bit + server_name_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_name_and_media_id.split("/") # Attempt to access the media @@ -692,3 +691,389 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): % server_and_media_id_2 ), ) + + +class RoomTestCase(unittest.HomeserverTestCase): + """Test /room admin API. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + # Create user + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + def test_list_rooms(self): + """Test that we can list rooms""" + # Create 3 test rooms + total_rooms = 3 + room_ids = [] + for x in range(total_rooms): + room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + room_ids.append(room_id) + + # Request the list of rooms + url = "/_synapse/admin/v1/rooms" + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + + # Check request completed successfully + self.assertEqual(200, int(channel.code), msg=channel.json_body) + + # Check that response json body contains a "rooms" key + self.assertTrue( + "rooms" in channel.json_body, + msg="Response body does not " "contain a 'rooms' key", + ) + + # Check that 3 rooms were returned + self.assertEqual(3, len(channel.json_body["rooms"]), msg=channel.json_body) + + # Check their room_ids match + returned_room_ids = [room["room_id"] for room in channel.json_body["rooms"]] + self.assertEqual(room_ids, returned_room_ids) + + # Check that all fields are available + for r in channel.json_body["rooms"]: + self.assertIn("name", r) + self.assertIn("canonical_alias", r) + self.assertIn("joined_members", r) + + # Check that the correct number of total rooms was returned + self.assertEqual(channel.json_body["total_rooms"], total_rooms) + + # Check that the offset is correct + # Should be 0 as we aren't paginating + self.assertEqual(channel.json_body["offset"], 0) + + # Check that the prev_batch parameter is not present + self.assertNotIn("prev_batch", channel.json_body) + + # We shouldn't receive a next token here as there's no further rooms to show + self.assertNotIn("next_batch", channel.json_body) + + def test_list_rooms_pagination(self): + """Test that we can get a full list of rooms through pagination""" + # Create 5 test rooms + total_rooms = 5 + room_ids = [] + for x in range(total_rooms): + room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + room_ids.append(room_id) + + # Set the name of the rooms so we get a consistent returned ordering + for idx, room_id in enumerate(room_ids): + self.helper.send_state( + room_id, "m.room.name", {"name": str(idx)}, tok=self.admin_user_tok, + ) + + # Request the list of rooms + returned_room_ids = [] + start = 0 + limit = 2 + + run_count = 0 + should_repeat = True + while should_repeat: + run_count += 1 + + url = "/_synapse/admin/v1/rooms?from=%d&limit=%d&order_by=%s" % ( + start, + limit, + "alphabetical", + ) + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual( + 200, int(channel.result["code"]), msg=channel.result["body"] + ) + + self.assertTrue("rooms" in channel.json_body) + for r in channel.json_body["rooms"]: + returned_room_ids.append(r["room_id"]) + + # Check that the correct number of total rooms was returned + self.assertEqual(channel.json_body["total_rooms"], total_rooms) + + # Check that the offset is correct + # We're only getting 2 rooms each page, so should be 2 * last run_count + self.assertEqual(channel.json_body["offset"], 2 * (run_count - 1)) + + if run_count > 1: + # Check the value of prev_batch is correct + self.assertEqual(channel.json_body["prev_batch"], 2 * (run_count - 2)) + + if "next_batch" not in channel.json_body: + # We have reached the end of the list + should_repeat = False + else: + # Make another query with an updated start value + start = channel.json_body["next_batch"] + + # We should've queried the endpoint 3 times + self.assertEqual( + run_count, + 3, + msg="Should've queried 3 times for 5 rooms with limit 2 per query", + ) + + # Check that we received all of the room ids + self.assertEqual(room_ids, returned_room_ids) + + url = "/_synapse/admin/v1/rooms?from=%d&limit=%d" % (start, limit) + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + def test_correct_room_attributes(self): + """Test the correct attributes for a room are returned""" + # Create a test room + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + test_alias = "#test:test" + test_room_name = "something" + + # Have another user join the room + user_2 = self.register_user("user4", "pass") + user_tok_2 = self.login("user4", "pass") + self.helper.join(room_id, user_2, tok=user_tok_2) + + # Create a new alias to this room + url = "/_matrix/client/r0/directory/room/%s" % (urllib.parse.quote(test_alias),) + request, channel = self.make_request( + "PUT", + url.encode("ascii"), + {"room_id": room_id}, + access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Set this new alias as the canonical alias for this room + self.helper.send_state( + room_id, + "m.room.aliases", + {"aliases": [test_alias]}, + tok=self.admin_user_tok, + state_key="test", + ) + self.helper.send_state( + room_id, + "m.room.canonical_alias", + {"alias": test_alias}, + tok=self.admin_user_tok, + ) + + # Set a name for the room + self.helper.send_state( + room_id, "m.room.name", {"name": test_room_name}, tok=self.admin_user_tok, + ) + + # Request the list of rooms + url = "/_synapse/admin/v1/rooms" + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + # Check that rooms were returned + self.assertTrue("rooms" in channel.json_body) + rooms = channel.json_body["rooms"] + + # Check that only one room was returned + self.assertEqual(len(rooms), 1) + + # And that the value of the total_rooms key was correct + self.assertEqual(channel.json_body["total_rooms"], 1) + + # Check that the offset is correct + # We're not paginating, so should be 0 + self.assertEqual(channel.json_body["offset"], 0) + + # Check that there is no `prev_batch` + self.assertNotIn("prev_batch", channel.json_body) + + # Check that there is no `next_batch` + self.assertNotIn("next_batch", channel.json_body) + + # Check that all provided attributes are set + r = rooms[0] + self.assertEqual(room_id, r["room_id"]) + self.assertEqual(test_room_name, r["name"]) + self.assertEqual(test_alias, r["canonical_alias"]) + + def test_room_list_sort_order(self): + """Test room list sort ordering. alphabetical versus number of members, + reversing the order, etc. + """ + # Create 3 test rooms + room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C + self.helper.send_state( + room_id_1, "m.room.name", {"name": "A"}, tok=self.admin_user_tok, + ) + self.helper.send_state( + room_id_2, "m.room.name", {"name": "B"}, tok=self.admin_user_tok, + ) + self.helper.send_state( + room_id_3, "m.room.name", {"name": "C"}, tok=self.admin_user_tok, + ) + + # Set room member size in the reverse order. room 1 -> 1 member, 2 -> 2, 3 -> 3 + user_1 = self.register_user("bob1", "pass") + user_1_tok = self.login("bob1", "pass") + self.helper.join(room_id_2, user_1, tok=user_1_tok) + + user_2 = self.register_user("bob2", "pass") + user_2_tok = self.login("bob2", "pass") + self.helper.join(room_id_3, user_2, tok=user_2_tok) + + user_3 = self.register_user("bob3", "pass") + user_3_tok = self.login("bob3", "pass") + self.helper.join(room_id_3, user_3, tok=user_3_tok) + + def _order_test( + order_type: str, expected_room_list: List[str], reverse: bool = False, + ): + """Request the list of rooms in a certain order. Assert that order is what + we expect + + Args: + order_type: The type of ordering to give the server + expected_room_list: The list of room_ids in the order we expect to get + back from the server + """ + # Request the list of rooms in the given order + url = "/_synapse/admin/v1/rooms?order_by=%s" % (order_type,) + if reverse: + url += "&dir=b" + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(200, channel.code, msg=channel.json_body) + + # Check that rooms were returned + self.assertTrue("rooms" in channel.json_body) + rooms = channel.json_body["rooms"] + + # Check for the correct total_rooms value + self.assertEqual(channel.json_body["total_rooms"], 3) + + # Check that the offset is correct + # We're not paginating, so should be 0 + self.assertEqual(channel.json_body["offset"], 0) + + # Check that there is no `prev_batch` + self.assertNotIn("prev_batch", channel.json_body) + + # Check that there is no `next_batch` + self.assertNotIn("next_batch", channel.json_body) + + # Check that rooms were returned in alphabetical order + returned_order = [r["room_id"] for r in rooms] + self.assertListEqual(expected_room_list, returned_order) # order is checked + + # Test different sort orders, with forward and reverse directions + _order_test("alphabetical", [room_id_1, room_id_2, room_id_3]) + _order_test("alphabetical", [room_id_3, room_id_2, room_id_1], reverse=True) + + _order_test("size", [room_id_3, room_id_2, room_id_1]) + _order_test("size", [room_id_1, room_id_2, room_id_3], reverse=True) + + def test_search_term(self): + """Test that searching for a room works correctly""" + # Create two test rooms + room_id_1 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + room_name_1 = "something" + room_name_2 = "else" + + # Set the name for each room + self.helper.send_state( + room_id_1, "m.room.name", {"name": room_name_1}, tok=self.admin_user_tok, + ) + self.helper.send_state( + room_id_2, "m.room.name", {"name": room_name_2}, tok=self.admin_user_tok, + ) + + def _search_test( + expected_room_id: Optional[str], + search_term: str, + expected_http_code: int = 200, + ): + """Search for a room and check that the returned room's id is a match + + Args: + expected_room_id: The room_id expected to be returned by the API. Set + to None to expect zero results for the search + search_term: The term to search for room names with + expected_http_code: The expected http code for the request + """ + url = "/_synapse/admin/v1/rooms?search_term=%s" % (search_term,) + request, channel = self.make_request( + "GET", url.encode("ascii"), access_token=self.admin_user_tok, + ) + self.render(request) + self.assertEqual(expected_http_code, channel.code, msg=channel.json_body) + + if expected_http_code != 200: + return + + # Check that rooms were returned + self.assertTrue("rooms" in channel.json_body) + rooms = channel.json_body["rooms"] + + # Check that the expected number of rooms were returned + expected_room_count = 1 if expected_room_id else 0 + self.assertEqual(len(rooms), expected_room_count) + self.assertEqual(channel.json_body["total_rooms"], expected_room_count) + + # Check that the offset is correct + # We're not paginating, so should be 0 + self.assertEqual(channel.json_body["offset"], 0) + + # Check that there is no `prev_batch` + self.assertNotIn("prev_batch", channel.json_body) + + # Check that there is no `next_batch` + self.assertNotIn("next_batch", channel.json_body) + + if expected_room_id: + # Check that the first returned room id is correct + r = rooms[0] + self.assertEqual(expected_room_id, r["room_id"]) + + # Perform search tests + _search_test(room_id_1, "something") + _search_test(room_id_1, "thing") + + _search_test(room_id_2, "else") + _search_test(room_id_2, "se") + + _search_test(None, "foo") + _search_test(None, "bar") + _search_test(None, "", expected_http_code=400) From 0d0f32bc53fefb5eb444940998b97594da894967 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 14:03:46 +0000 Subject: [PATCH 042/213] 1.9.0rc1 --- CHANGES.md | 70 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5742.feature | 1 - changelog.d/6621.doc | 1 - changelog.d/6624.doc | 1 - changelog.d/6654.bugfix | 1 - changelog.d/6655.misc | 1 - changelog.d/6656.doc | 1 - changelog.d/6663.doc | 1 - changelog.d/6664.bugfix | 1 - changelog.d/6665.doc | 1 - changelog.d/6666.misc | 1 - changelog.d/6667.misc | 1 - changelog.d/6675.removal | 1 - changelog.d/6681.feature | 1 - changelog.d/6682.bugfix | 2 -- changelog.d/6685.doc | 1 - changelog.d/6686.misc | 1 - changelog.d/6687.misc | 1 - changelog.d/6688.misc | 1 - changelog.d/6689.misc | 1 - changelog.d/6690.bugfix | 1 - changelog.d/6691.misc | 1 - changelog.d/6697.misc | 1 - changelog.d/6698.doc | 1 - changelog.d/6702.misc | 1 - changelog.d/6706.misc | 1 - changelog.d/6711.bugfix | 1 - changelog.d/6712.feature | 1 - changelog.d/6714.bugfix | 1 - changelog.d/6715.misc | 1 - changelog.d/6716.misc | 1 - changelog.d/6717.misc | 1 - changelog.d/6718.bugfix | 1 - changelog.d/6720.feature | 1 - changelog.d/6723.misc | 1 - changelog.d/6724.misc | 1 - changelog.d/6728.misc | 1 - changelog.d/6730.bugfix | 1 - changelog.d/6731.bugfix | 1 - changelog.d/6732.misc | 1 - changelog.d/6733.misc | 1 - changelog.d/6742.bugfix | 1 - changelog.d/6747.bugfix | 1 - changelog.d/6749.misc | 1 - changelog.d/6753.bugfix | 1 - changelog.d/6754.misc | 1 - changelog.d/6756.feature | 1 - changelog.d/6764.misc | 1 - synapse/__init__.py | 2 +- 49 files changed, 71 insertions(+), 49 deletions(-) delete mode 100644 changelog.d/5742.feature delete mode 100644 changelog.d/6621.doc delete mode 100644 changelog.d/6624.doc delete mode 100644 changelog.d/6654.bugfix delete mode 100644 changelog.d/6655.misc delete mode 100644 changelog.d/6656.doc delete mode 100644 changelog.d/6663.doc delete mode 100644 changelog.d/6664.bugfix delete mode 100644 changelog.d/6665.doc delete mode 100644 changelog.d/6666.misc delete mode 100644 changelog.d/6667.misc delete mode 100644 changelog.d/6675.removal delete mode 100644 changelog.d/6681.feature delete mode 100644 changelog.d/6682.bugfix delete mode 100644 changelog.d/6685.doc delete mode 100644 changelog.d/6686.misc delete mode 100644 changelog.d/6687.misc delete mode 100644 changelog.d/6688.misc delete mode 100644 changelog.d/6689.misc delete mode 100644 changelog.d/6690.bugfix delete mode 100644 changelog.d/6691.misc delete mode 100644 changelog.d/6697.misc delete mode 100644 changelog.d/6698.doc delete mode 100644 changelog.d/6702.misc delete mode 100644 changelog.d/6706.misc delete mode 100644 changelog.d/6711.bugfix delete mode 100644 changelog.d/6712.feature delete mode 100644 changelog.d/6714.bugfix delete mode 100644 changelog.d/6715.misc delete mode 100644 changelog.d/6716.misc delete mode 100644 changelog.d/6717.misc delete mode 100644 changelog.d/6718.bugfix delete mode 100644 changelog.d/6720.feature delete mode 100644 changelog.d/6723.misc delete mode 100644 changelog.d/6724.misc delete mode 100644 changelog.d/6728.misc delete mode 100644 changelog.d/6730.bugfix delete mode 100644 changelog.d/6731.bugfix delete mode 100644 changelog.d/6732.misc delete mode 100644 changelog.d/6733.misc delete mode 100644 changelog.d/6742.bugfix delete mode 100644 changelog.d/6747.bugfix delete mode 100644 changelog.d/6749.misc delete mode 100644 changelog.d/6753.bugfix delete mode 100644 changelog.d/6754.misc delete mode 100644 changelog.d/6756.feature delete mode 100644 changelog.d/6764.misc diff --git a/CHANGES.md b/CHANGES.md index c8840e9c74..7629a7e8ce 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,73 @@ +Synapse 1.9.0rc1 (2020-01-22) +============================= + +Features +-------- + +- Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5742](https://github.com/matrix-org/synapse/issues/5742)) +- Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. ([\#6681](https://github.com/matrix-org/synapse/issues/6681), [\#6756](https://github.com/matrix-org/synapse/issues/6756)) +- Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712)) +- Add a new admin API to list and filter rooms on the server. ([\#6720](https://github.com/matrix-org/synapse/issues/6720)) + + +Bugfixes +-------- + +- Correctly proxy HTTP errors due to API calls to remote group servers. ([\#6654](https://github.com/matrix-org/synapse/issues/6654)) +- Fix media repo admin APIs when using a media worker. ([\#6664](https://github.com/matrix-org/synapse/issues/6664)) +- Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. ([\#6682](https://github.com/matrix-org/synapse/issues/6682)) +- Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690)) +- Fix `purge_room` admin API. ([\#6711](https://github.com/matrix-org/synapse/issues/6711)) +- Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. ([\#6714](https://github.com/matrix-org/synapse/issues/6714)) +- Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case. ([\#6718](https://github.com/matrix-org/synapse/issues/6718)) +- Fix changing password via user admin API. ([\#6730](https://github.com/matrix-org/synapse/issues/6730)) +- Fix `/events/:event_id` deprecated API. ([\#6731](https://github.com/matrix-org/synapse/issues/6731)) +- Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). ([\#6742](https://github.com/matrix-org/synapse/issues/6742)) +- Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. ([\#6747](https://github.com/matrix-org/synapse/issues/6747)) +- Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. ([\#6753](https://github.com/matrix-org/synapse/issues/6753)) + + +Improved Documentation +---------------------- + +- Fix a typo in the configuration example for purge jobs in the sample configuration file. ([\#6621](https://github.com/matrix-org/synapse/issues/6621)) +- Add complete documentation of the message retention policies support. ([\#6624](https://github.com/matrix-org/synapse/issues/6624), [\#6665](https://github.com/matrix-org/synapse/issues/6665)) +- No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656)) +- Add some helpful tips about changelog entries to the github pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663)) +- Clarify the `account_validity` and `email` sections of the sample configuration. ([\#6685](https://github.com/matrix-org/synapse/issues/6685)) +- Add more endpoints to the documentation for Synapse workers. ([\#6698](https://github.com/matrix-org/synapse/issues/6698)) + + +Deprecations and Removals +------------------------- + +- Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). ([\#6675](https://github.com/matrix-org/synapse/issues/6675)) + + +Internal Changes +---------------- + +- Add `local_current_membership` table for tracking local user membership state in rooms. ([\#6655](https://github.com/matrix-org/synapse/issues/6655), [\#6728](https://github.com/matrix-org/synapse/issues/6728)) +- Port `synapse.replication.tcp` to async/await. ([\#6666](https://github.com/matrix-org/synapse/issues/6666)) +- Fixup `synapse.replication` to pass mypy checks. ([\#6667](https://github.com/matrix-org/synapse/issues/6667)) +- Allow additional_resources to implement IResource directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686)) +- Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687)) +- Updates and extensions to the module API. ([\#6688](https://github.com/matrix-org/synapse/issues/6688)) +- Updates to the SAML mapping provider API. ([\#6689](https://github.com/matrix-org/synapse/issues/6689), [\#6723](https://github.com/matrix-org/synapse/issues/6723)) +- Remove redundant RegistrationError class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691)) +- Don't block processing of incoming EDUs behind processing PDUs in the same transaction. ([\#6697](https://github.com/matrix-org/synapse/issues/6697)) +- Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. ([\#6702](https://github.com/matrix-org/synapse/issues/6702)) +- Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. ([\#6706](https://github.com/matrix-org/synapse/issues/6706)) +- Add StateMap type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715)) +- Add a `DeltaState` to track changes to be made to current state during event persistence. ([\#6716](https://github.com/matrix-org/synapse/issues/6716)) +- Add more logging around message retention policies support. ([\#6717](https://github.com/matrix-org/synapse/issues/6717)) +- When processing a SAML response, log the assertions for easier configuration. ([\#6724](https://github.com/matrix-org/synapse/issues/6724)) +- Fixup `synapse.rest` to pass mypy. ([\#6732](https://github.com/matrix-org/synapse/issues/6732), [\#6764](https://github.com/matrix-org/synapse/issues/6764)) +- Fixup synapse.api to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733)) +- Allow streaming cache 'invalidate all' to workers. ([\#6749](https://github.com/matrix-org/synapse/issues/6749)) +- Remove unused CI docker compose files. ([\#6754](https://github.com/matrix-org/synapse/issues/6754)) + + Synapse 1.8.0 (2020-01-09) ========================== diff --git a/changelog.d/5742.feature b/changelog.d/5742.feature deleted file mode 100644 index de10302275..0000000000 --- a/changelog.d/5742.feature +++ /dev/null @@ -1 +0,0 @@ -Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. diff --git a/changelog.d/6621.doc b/changelog.d/6621.doc deleted file mode 100644 index 6722ccfda3..0000000000 --- a/changelog.d/6621.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the configuration example for purge jobs in the sample configuration file. diff --git a/changelog.d/6624.doc b/changelog.d/6624.doc deleted file mode 100644 index bc9a022db2..0000000000 --- a/changelog.d/6624.doc +++ /dev/null @@ -1 +0,0 @@ -Add complete documentation of the message retention policies support. diff --git a/changelog.d/6654.bugfix b/changelog.d/6654.bugfix deleted file mode 100644 index fed35252db..0000000000 --- a/changelog.d/6654.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly proxy HTTP errors due to API calls to remote group servers. diff --git a/changelog.d/6655.misc b/changelog.d/6655.misc deleted file mode 100644 index 01e78bc84e..0000000000 --- a/changelog.d/6655.misc +++ /dev/null @@ -1 +0,0 @@ -Add `local_current_membership` table for tracking local user membership state in rooms. diff --git a/changelog.d/6656.doc b/changelog.d/6656.doc deleted file mode 100644 index 9f32da1a88..0000000000 --- a/changelog.d/6656.doc +++ /dev/null @@ -1 +0,0 @@ -No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer. diff --git a/changelog.d/6663.doc b/changelog.d/6663.doc deleted file mode 100644 index 83b9c1626a..0000000000 --- a/changelog.d/6663.doc +++ /dev/null @@ -1 +0,0 @@ -Add some helpful tips about changelog entries to the github pull request template. \ No newline at end of file diff --git a/changelog.d/6664.bugfix b/changelog.d/6664.bugfix deleted file mode 100644 index 8c6a6fa1c8..0000000000 --- a/changelog.d/6664.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix media repo admin APIs when using a media worker. diff --git a/changelog.d/6665.doc b/changelog.d/6665.doc deleted file mode 100644 index bc9a022db2..0000000000 --- a/changelog.d/6665.doc +++ /dev/null @@ -1 +0,0 @@ -Add complete documentation of the message retention policies support. diff --git a/changelog.d/6666.misc b/changelog.d/6666.misc deleted file mode 100644 index e79c23d2d2..0000000000 --- a/changelog.d/6666.misc +++ /dev/null @@ -1 +0,0 @@ -Port `synapse.replication.tcp` to async/await. diff --git a/changelog.d/6667.misc b/changelog.d/6667.misc deleted file mode 100644 index 227f80a508..0000000000 --- a/changelog.d/6667.misc +++ /dev/null @@ -1 +0,0 @@ -Fixup `synapse.replication` to pass mypy checks. diff --git a/changelog.d/6675.removal b/changelog.d/6675.removal deleted file mode 100644 index 95df9a2d83..0000000000 --- a/changelog.d/6675.removal +++ /dev/null @@ -1 +0,0 @@ -Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). diff --git a/changelog.d/6681.feature b/changelog.d/6681.feature deleted file mode 100644 index 5cf19a4e0e..0000000000 --- a/changelog.d/6681.feature +++ /dev/null @@ -1 +0,0 @@ -Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. diff --git a/changelog.d/6682.bugfix b/changelog.d/6682.bugfix deleted file mode 100644 index d48ea31477..0000000000 --- a/changelog.d/6682.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. - diff --git a/changelog.d/6685.doc b/changelog.d/6685.doc deleted file mode 100644 index 7cf750fe3f..0000000000 --- a/changelog.d/6685.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify the `account_validity` and `email` sections of the sample configuration. \ No newline at end of file diff --git a/changelog.d/6686.misc b/changelog.d/6686.misc deleted file mode 100644 index 4070f2e563..0000000000 --- a/changelog.d/6686.misc +++ /dev/null @@ -1 +0,0 @@ -Allow additional_resources to implement IResource directly. diff --git a/changelog.d/6687.misc b/changelog.d/6687.misc deleted file mode 100644 index deb0454602..0000000000 --- a/changelog.d/6687.misc +++ /dev/null @@ -1 +0,0 @@ -Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location. diff --git a/changelog.d/6688.misc b/changelog.d/6688.misc deleted file mode 100644 index 2a9f28ce5c..0000000000 --- a/changelog.d/6688.misc +++ /dev/null @@ -1 +0,0 @@ -Updates and extensions to the module API. \ No newline at end of file diff --git a/changelog.d/6689.misc b/changelog.d/6689.misc deleted file mode 100644 index 17f15e73a8..0000000000 --- a/changelog.d/6689.misc +++ /dev/null @@ -1 +0,0 @@ -Updates to the SAML mapping provider API. diff --git a/changelog.d/6690.bugfix b/changelog.d/6690.bugfix deleted file mode 100644 index 30ce1dc9f7..0000000000 --- a/changelog.d/6690.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username. diff --git a/changelog.d/6691.misc b/changelog.d/6691.misc deleted file mode 100644 index 104e9ce648..0000000000 --- a/changelog.d/6691.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant RegistrationError class. diff --git a/changelog.d/6697.misc b/changelog.d/6697.misc deleted file mode 100644 index 5650387804..0000000000 --- a/changelog.d/6697.misc +++ /dev/null @@ -1 +0,0 @@ -Don't block processing of incoming EDUs behind processing PDUs in the same transaction. diff --git a/changelog.d/6698.doc b/changelog.d/6698.doc deleted file mode 100644 index 5aba51252d..0000000000 --- a/changelog.d/6698.doc +++ /dev/null @@ -1 +0,0 @@ -Add more endpoints to the documentation for Synapse workers. diff --git a/changelog.d/6702.misc b/changelog.d/6702.misc deleted file mode 100644 index f7bc98409c..0000000000 --- a/changelog.d/6702.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. \ No newline at end of file diff --git a/changelog.d/6706.misc b/changelog.d/6706.misc deleted file mode 100644 index 1ac11cc04b..0000000000 --- a/changelog.d/6706.misc +++ /dev/null @@ -1 +0,0 @@ -Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. diff --git a/changelog.d/6711.bugfix b/changelog.d/6711.bugfix deleted file mode 100644 index c70506bd88..0000000000 --- a/changelog.d/6711.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `purge_room` admin API. diff --git a/changelog.d/6712.feature b/changelog.d/6712.feature deleted file mode 100644 index 2cce0ecf88..0000000000 --- a/changelog.d/6712.feature +++ /dev/null @@ -1 +0,0 @@ -Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). diff --git a/changelog.d/6714.bugfix b/changelog.d/6714.bugfix deleted file mode 100644 index 410516694f..0000000000 --- a/changelog.d/6714.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. diff --git a/changelog.d/6715.misc b/changelog.d/6715.misc deleted file mode 100644 index 8876b0446d..0000000000 --- a/changelog.d/6715.misc +++ /dev/null @@ -1 +0,0 @@ -Add StateMap type alias to simplify types. diff --git a/changelog.d/6716.misc b/changelog.d/6716.misc deleted file mode 100644 index 319aaa4acb..0000000000 --- a/changelog.d/6716.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `DeltaState` to track changes to be made to current state during event persistence. diff --git a/changelog.d/6717.misc b/changelog.d/6717.misc deleted file mode 100644 index a2a7776126..0000000000 --- a/changelog.d/6717.misc +++ /dev/null @@ -1 +0,0 @@ -Add more logging around message retention policies support. diff --git a/changelog.d/6718.bugfix b/changelog.d/6718.bugfix deleted file mode 100644 index 23b23e3ed8..0000000000 --- a/changelog.d/6718.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case. diff --git a/changelog.d/6720.feature b/changelog.d/6720.feature deleted file mode 100644 index dfc1b74d62..0000000000 --- a/changelog.d/6720.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new admin API to list and filter rooms on the server. \ No newline at end of file diff --git a/changelog.d/6723.misc b/changelog.d/6723.misc deleted file mode 100644 index 17f15e73a8..0000000000 --- a/changelog.d/6723.misc +++ /dev/null @@ -1 +0,0 @@ -Updates to the SAML mapping provider API. diff --git a/changelog.d/6724.misc b/changelog.d/6724.misc deleted file mode 100644 index 5256be75fa..0000000000 --- a/changelog.d/6724.misc +++ /dev/null @@ -1 +0,0 @@ -When processing a SAML response, log the assertions for easier configuration. diff --git a/changelog.d/6728.misc b/changelog.d/6728.misc deleted file mode 100644 index 01e78bc84e..0000000000 --- a/changelog.d/6728.misc +++ /dev/null @@ -1 +0,0 @@ -Add `local_current_membership` table for tracking local user membership state in rooms. diff --git a/changelog.d/6730.bugfix b/changelog.d/6730.bugfix deleted file mode 100644 index beb444ca66..0000000000 --- a/changelog.d/6730.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix changing password via user admin API. diff --git a/changelog.d/6731.bugfix b/changelog.d/6731.bugfix deleted file mode 100644 index 21f6e15cbd..0000000000 --- a/changelog.d/6731.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `/events/:event_id` deprecated API. diff --git a/changelog.d/6732.misc b/changelog.d/6732.misc deleted file mode 100644 index 8edd767405..0000000000 --- a/changelog.d/6732.misc +++ /dev/null @@ -1 +0,0 @@ -Fixup `synapse.rest` to pass mypy. diff --git a/changelog.d/6733.misc b/changelog.d/6733.misc deleted file mode 100644 index bf048c0be2..0000000000 --- a/changelog.d/6733.misc +++ /dev/null @@ -1 +0,0 @@ -Fixup synapse.api to pass mypy. diff --git a/changelog.d/6742.bugfix b/changelog.d/6742.bugfix deleted file mode 100644 index ca2687c8bb..0000000000 --- a/changelog.d/6742.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). diff --git a/changelog.d/6747.bugfix b/changelog.d/6747.bugfix deleted file mode 100644 index c98107e741..0000000000 --- a/changelog.d/6747.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. diff --git a/changelog.d/6749.misc b/changelog.d/6749.misc deleted file mode 100644 index 9fa13cb1d4..0000000000 --- a/changelog.d/6749.misc +++ /dev/null @@ -1 +0,0 @@ -Allow streaming cache 'invalidate all' to workers. diff --git a/changelog.d/6753.bugfix b/changelog.d/6753.bugfix deleted file mode 100644 index 5dfde793e1..0000000000 --- a/changelog.d/6753.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. diff --git a/changelog.d/6754.misc b/changelog.d/6754.misc deleted file mode 100644 index 0a955e47e6..0000000000 --- a/changelog.d/6754.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused CI docker compose files. diff --git a/changelog.d/6756.feature b/changelog.d/6756.feature deleted file mode 100644 index 6328c868f2..0000000000 --- a/changelog.d/6756.feature +++ /dev/null @@ -1 +0,0 @@ -Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. \ No newline at end of file diff --git a/changelog.d/6764.misc b/changelog.d/6764.misc deleted file mode 100644 index 8edd767405..0000000000 --- a/changelog.d/6764.misc +++ /dev/null @@ -1 +0,0 @@ -Fixup `synapse.rest` to pass mypy. diff --git a/synapse/__init__.py b/synapse/__init__.py index 17a6f691c8..1c44ca0999 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.9.0.dev2" +__version__ = "1.9.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From ffa637050d3a3e996747ed0a60d8d1958a0b57e0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 14:19:23 +0000 Subject: [PATCH 043/213] Fixup changelog --- CHANGES.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7629a7e8ce..102ef5f326 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ Features - Allow admin to create or modify a user. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5742](https://github.com/matrix-org/synapse/issues/5742)) - Add new quarantine media admin APIs to quarantine by media ID or by user who uploaded the media. ([\#6681](https://github.com/matrix-org/synapse/issues/6681), [\#6756](https://github.com/matrix-org/synapse/issues/6756)) -- Add org.matrix.e2e_cross_signing to unstable_features in /versions as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712)) +- Add `org.matrix.e2e_cross_signing` to `unstable_features` in `/versions` as per [MSC1756](https://github.com/matrix-org/matrix-doc/pull/1756). ([\#6712](https://github.com/matrix-org/synapse/issues/6712)) - Add a new admin API to list and filter rooms on the server. ([\#6720](https://github.com/matrix-org/synapse/issues/6720)) @@ -16,15 +16,16 @@ Bugfixes - Correctly proxy HTTP errors due to API calls to remote group servers. ([\#6654](https://github.com/matrix-org/synapse/issues/6654)) - Fix media repo admin APIs when using a media worker. ([\#6664](https://github.com/matrix-org/synapse/issues/6664)) - Fix "CRITICAL" errors being logged when a request is received for a uri containing non-ascii characters. ([\#6682](https://github.com/matrix-org/synapse/issues/6682)) -- Fix a bug where we would assign a numeric userid if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690)) +- Fix a bug where we would assign a numeric user ID if somebody tried registering with an empty username. ([\#6690](https://github.com/matrix-org/synapse/issues/6690)) - Fix `purge_room` admin API. ([\#6711](https://github.com/matrix-org/synapse/issues/6711)) - Fix a bug causing Synapse to not always purge quiet rooms with a low `max_lifetime` in their message retention policies when running the automated purge jobs. ([\#6714](https://github.com/matrix-org/synapse/issues/6714)) -- Fix a bug causing the `synapse_port_db` script to return 0 in a specific error case. ([\#6718](https://github.com/matrix-org/synapse/issues/6718)) +- Fix the `synapse_port_db` not correctly running background updates. Thanks @tadzik for reporting. ([\#6718](https://github.com/matrix-org/synapse/issues/6718)) - Fix changing password via user admin API. ([\#6730](https://github.com/matrix-org/synapse/issues/6730)) - Fix `/events/:event_id` deprecated API. ([\#6731](https://github.com/matrix-org/synapse/issues/6731)) - Fix monthly active user limiting support for worker mode, fixes [#4639](https://github.com/matrix-org/synapse/issues/4639). ([\#6742](https://github.com/matrix-org/synapse/issues/6742)) - Fix bug when setting `account_validity` to an empty block in the config. Thanks to @Sorunome for reporting. ([\#6747](https://github.com/matrix-org/synapse/issues/6747)) - Fix `AttributeError: 'NoneType' object has no attribute 'get'` in `hash_password` when configuration has an empty `password_config`. Contributed by @ivilata. ([\#6753](https://github.com/matrix-org/synapse/issues/6753)) +- Fix the `docker-compose.yaml` overriding the entire `/etc` folder of the container. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656)) Improved Documentation @@ -32,8 +33,7 @@ Improved Documentation - Fix a typo in the configuration example for purge jobs in the sample configuration file. ([\#6621](https://github.com/matrix-org/synapse/issues/6621)) - Add complete documentation of the message retention policies support. ([\#6624](https://github.com/matrix-org/synapse/issues/6624), [\#6665](https://github.com/matrix-org/synapse/issues/6665)) -- No more overriding the entire /etc folder of the container in docker-compose.yaml. Contributed by Fabian Meyer. ([\#6656](https://github.com/matrix-org/synapse/issues/6656)) -- Add some helpful tips about changelog entries to the github pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663)) +- Add some helpful tips about changelog entries to the GitHub pull request template. ([\#6663](https://github.com/matrix-org/synapse/issues/6663)) - Clarify the `account_validity` and `email` sections of the sample configuration. ([\#6685](https://github.com/matrix-org/synapse/issues/6685)) - Add more endpoints to the documentation for Synapse workers. ([\#6698](https://github.com/matrix-org/synapse/issues/6698)) @@ -50,20 +50,20 @@ Internal Changes - Add `local_current_membership` table for tracking local user membership state in rooms. ([\#6655](https://github.com/matrix-org/synapse/issues/6655), [\#6728](https://github.com/matrix-org/synapse/issues/6728)) - Port `synapse.replication.tcp` to async/await. ([\#6666](https://github.com/matrix-org/synapse/issues/6666)) - Fixup `synapse.replication` to pass mypy checks. ([\#6667](https://github.com/matrix-org/synapse/issues/6667)) -- Allow additional_resources to implement IResource directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686)) -- Allow REST endpoint implementations to raise a RedirectException, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687)) +- Allow `additional_resources` to implement `IResource` directly. ([\#6686](https://github.com/matrix-org/synapse/issues/6686)) +- Allow REST endpoint implementations to raise a `RedirectException`, which will redirect the user's browser to a given location. ([\#6687](https://github.com/matrix-org/synapse/issues/6687)) - Updates and extensions to the module API. ([\#6688](https://github.com/matrix-org/synapse/issues/6688)) - Updates to the SAML mapping provider API. ([\#6689](https://github.com/matrix-org/synapse/issues/6689), [\#6723](https://github.com/matrix-org/synapse/issues/6723)) -- Remove redundant RegistrationError class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691)) +- Remove redundant `RegistrationError` class. ([\#6691](https://github.com/matrix-org/synapse/issues/6691)) - Don't block processing of incoming EDUs behind processing PDUs in the same transaction. ([\#6697](https://github.com/matrix-org/synapse/issues/6697)) - Remove duplicate check for the `session` query parameter on the `/auth/xxx/fallback/web` Client-Server endpoint. ([\#6702](https://github.com/matrix-org/synapse/issues/6702)) - Attempt to retry sending a transaction when we detect a remote server has come back online, rather than waiting for a transaction to be triggered by new data. ([\#6706](https://github.com/matrix-org/synapse/issues/6706)) -- Add StateMap type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715)) +- Add `StateMap` type alias to simplify types. ([\#6715](https://github.com/matrix-org/synapse/issues/6715)) - Add a `DeltaState` to track changes to be made to current state during event persistence. ([\#6716](https://github.com/matrix-org/synapse/issues/6716)) - Add more logging around message retention policies support. ([\#6717](https://github.com/matrix-org/synapse/issues/6717)) - When processing a SAML response, log the assertions for easier configuration. ([\#6724](https://github.com/matrix-org/synapse/issues/6724)) - Fixup `synapse.rest` to pass mypy. ([\#6732](https://github.com/matrix-org/synapse/issues/6732), [\#6764](https://github.com/matrix-org/synapse/issues/6764)) -- Fixup synapse.api to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733)) +- Fixup `synapse.api` to pass mypy. ([\#6733](https://github.com/matrix-org/synapse/issues/6733)) - Allow streaming cache 'invalidate all' to workers. ([\#6749](https://github.com/matrix-org/synapse/issues/6749)) - Remove unused CI docker compose files. ([\#6754](https://github.com/matrix-org/synapse/issues/6754)) From 91085ef49e8553cede573d1a6e52f0b97bdb1bf7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 14:30:22 +0000 Subject: [PATCH 044/213] Add deprecation headers --- CHANGES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 102ef5f326..f446bdf6f1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ Synapse 1.9.0rc1 (2020-01-22) ============================= +**WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). + +If your Synapse deployment uses workers, note that the reverse-proxy configurations have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). + Features -------- From 33f7e5ce2a449bda2f3c8dfddef9bf0f71bff593 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 22 Jan 2020 14:49:21 +0000 Subject: [PATCH 045/213] Fixup warning about workers changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f446bdf6f1..0392acbde4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.9.0rc1 (2020-01-22) **WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). -If your Synapse deployment uses workers, note that the reverse-proxy configurations have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). +If your Synapse deployment uses workers, note that the reverse-proxy configurations for the `synapse.app.media_repository`, `synapse.app.federation_reader` and `synapse.app.event_creator` have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). Features -------- From ce84dd9e207d9ae88e4cf9ca8a9731fcac043969 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 22 Jan 2020 15:09:57 +0000 Subject: [PATCH 046/213] Remove unnecessary abstractions in admin handler (#6751) --- changelog.d/6751.misc | 1 + synapse/handlers/admin.py | 62 ------------------- synapse/rest/admin/users.py | 19 +++--- .../storage/data_stores/main/registration.py | 2 +- 4 files changed, 11 insertions(+), 73 deletions(-) create mode 100644 changelog.d/6751.misc diff --git a/changelog.d/6751.misc b/changelog.d/6751.misc new file mode 100644 index 0000000000..7222520528 --- /dev/null +++ b/changelog.d/6751.misc @@ -0,0 +1 @@ +Remove some unnecessary admin handler abstraction methods. \ No newline at end of file diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 60a7c938bc..9205865231 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -62,68 +62,6 @@ class AdminHandler(BaseHandler): ret["avatar_url"] = profile.avatar_url return ret - async def get_users(self): - """Function to retrieve a list of users in users table. - - Args: - Returns: - defer.Deferred: resolves to list[dict[str, Any]] - """ - ret = await self.store.get_users() - - return ret - - async def get_users_paginate(self, start, limit, name, guests, deactivated): - """Function to retrieve a paginated list of users from - users list. This will return a json list of users. - - Args: - start (int): start number to begin the query from - limit (int): number of rows to retrieve - name (string): filter for user names - guests (bool): whether to in include guest users - deactivated (bool): whether to include deactivated users - Returns: - defer.Deferred: resolves to json list[dict[str, Any]] - """ - ret = await self.store.get_users_paginate( - start, limit, name, guests, deactivated - ) - - return ret - - async def search_users(self, term): - """Function to search users list for one or more users with - the matched term. - - Args: - term (str): search term - Returns: - defer.Deferred: resolves to list[dict[str, Any]] - """ - ret = await self.store.search_users(term) - - return ret - - def get_user_server_admin(self, user): - """ - Get the admin bit on a user. - - Args: - user_id (UserID): the (necessarily local) user to manipulate - """ - return self.store.is_server_admin(user) - - def set_user_server_admin(self, user, admin): - """ - Set the admin bit on a user. - - Args: - user_id (UserID): the (necessarily local) user to manipulate - admin (bool): whether or not the user should be an admin of this server - """ - return self.store.set_server_admin(user, admin) - async def export_user_data(self, user_id, writer): """Write all data we have on the user to the given writer. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 52d27fa3e3..927e9ca9ee 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -45,6 +45,7 @@ class UsersRestServlet(RestServlet): def __init__(self, hs): self.hs = hs + self.store = hs.get_datastore() self.auth = hs.get_auth() self.admin_handler = hs.get_handlers().admin_handler @@ -55,7 +56,7 @@ class UsersRestServlet(RestServlet): if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only users a local user") - ret = await self.admin_handler.get_users() + ret = await self.store.get_users() return 200, ret @@ -80,6 +81,7 @@ class UsersRestServletV2(RestServlet): def __init__(self, hs): self.hs = hs + self.store = hs.get_datastore() self.auth = hs.get_auth() self.admin_handler = hs.get_handlers().admin_handler @@ -92,7 +94,7 @@ class UsersRestServletV2(RestServlet): guests = parse_boolean(request, "guests", default=True) deactivated = parse_boolean(request, "deactivated", default=False) - users = await self.admin_handler.get_users_paginate( + users = await self.store.get_users_paginate( start, limit, user_id, guests, deactivated ) ret = {"users": users} @@ -516,8 +518,8 @@ class SearchUsersRestServlet(RestServlet): PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") def __init__(self, hs): - self.store = hs.get_datastore() self.hs = hs + self.store = hs.get_datastore() self.auth = hs.get_auth() self.handlers = hs.get_handlers() @@ -540,7 +542,7 @@ class SearchUsersRestServlet(RestServlet): term = parse_string(request, "term", required=True) logger.info("term: %s ", term) - ret = await self.handlers.admin_handler.search_users(term) + ret = await self.handlers.store.search_users(term) return 200, ret @@ -574,8 +576,8 @@ class UserAdminServlet(RestServlet): def __init__(self, hs): self.hs = hs + self.store = hs.get_datastore() self.auth = hs.get_auth() - self.handlers = hs.get_handlers() async def on_GET(self, request, user_id): await assert_requester_is_admin(self.auth, request) @@ -585,8 +587,7 @@ class UserAdminServlet(RestServlet): if not self.hs.is_mine(target_user): raise SynapseError(400, "Only local users can be admins of this homeserver") - is_admin = await self.handlers.admin_handler.get_user_server_admin(target_user) - is_admin = bool(is_admin) + is_admin = await self.store.is_server_admin(target_user) return 200, {"admin": is_admin} @@ -609,8 +610,6 @@ class UserAdminServlet(RestServlet): if target_user == auth_user and not set_admin_to: raise SynapseError(400, "You may not demote yourself.") - await self.handlers.admin_handler.set_user_server_admin( - target_user, set_admin_to - ) + await self.store.set_user_server_admin(target_user, set_admin_to) return 200, {} diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index cb4b2b39a0..49306642ed 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -291,7 +291,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="is_server_admin", ) - return res if res else False + return bool(res) if res else False def set_server_admin(self, user, admin): """Sets whether a user is an admin of this homeserver. From d31f5f4d89694a6e41b1c9af09ed6405ecb07376 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 23 Jan 2020 11:37:26 +0000 Subject: [PATCH 047/213] Update admin room docs with correct endpoints (#6770) --- changelog.d/6770.doc | 1 + docs/admin_api/rooms.md | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6770.doc diff --git a/changelog.d/6770.doc b/changelog.d/6770.doc new file mode 100644 index 0000000000..a251b82238 --- /dev/null +++ b/changelog.d/6770.doc @@ -0,0 +1 @@ +Fix endpoint documentation for the List Rooms admin api. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 082721ea95..2db457c1b6 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -46,7 +46,7 @@ The following fields are possible in the JSON response body: A standard request with no filtering: ``` -GET /_synapse/admin/rooms +GET /_synapse/admin/v1/rooms {} ``` @@ -78,7 +78,7 @@ Response: Filtering by room name: ``` -GET /_synapse/admin/rooms?search_term=TWIM +GET /_synapse/admin/v1/rooms?search_term=TWIM {} ``` @@ -103,7 +103,7 @@ Response: Paginating through a list of rooms: ``` -GET /_synapse/admin/rooms?order_by=size +GET /_synapse/admin/v1/rooms?order_by=size {} ``` @@ -139,7 +139,7 @@ To get the next batch of room results, we repeat our request, setting the `from` parameter to the value of `next_token`. ``` -GET /_synapse/admin/rooms?order_by=size&from=100 +GET /_synapse/admin/v1/rooms?order_by=size&from=100 {} ``` From 5bd3cb7260984164c4c54eb2add1fa7821795360 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 Jan 2020 12:03:58 +0000 Subject: [PATCH 048/213] Minor fixes to user admin api (#6761) * don't insist on a password (this is valid if you have an SSO login) * fix reference to undefined `requester` --- changelog.d/6761.bugfix | 1 + synapse/rest/admin/users.py | 14 +++++--------- 2 files changed, 6 insertions(+), 9 deletions(-) create mode 100644 changelog.d/6761.bugfix diff --git a/changelog.d/6761.bugfix b/changelog.d/6761.bugfix new file mode 100644 index 0000000000..1c664c02df --- /dev/null +++ b/changelog.d/6761.bugfix @@ -0,0 +1 @@ +Minor fixes to `PUT /_synapse/admin/v2/users` admin api. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 927e9ca9ee..3455741195 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -153,7 +153,8 @@ class UserRestServletV2(RestServlet): return 200, ret async def on_PUT(self, request, user_id): - await assert_requester_is_admin(self.auth, request) + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) target_user = UserID.from_string(user_id) body = parse_json_object_from_request(request) @@ -164,8 +165,6 @@ class UserRestServletV2(RestServlet): user = await self.admin_handler.get_user(target_user) if user: # modify user - requester = await self.auth.get_user_by_req(request) - if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True @@ -212,11 +211,8 @@ class UserRestServletV2(RestServlet): return 200, user else: # create user - if "password" not in body: - raise SynapseError( - 400, "password must be specified", errcode=Codes.BAD_JSON - ) - elif ( + password = body.get("password") + if password is not None and ( not isinstance(body["password"], text_type) or len(body["password"]) > 512 ): @@ -231,7 +227,7 @@ class UserRestServletV2(RestServlet): user_id = await self.registration_handler.register_user( localpart=target_user.localpart, - password=body["password"], + password=password, admin=bool(admin), default_display_name=displayname, user_type=user_type, From 6b7462a13fff8f4e1dbad0bb4d81bbe0515af7c1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 Jan 2020 12:11:44 +0000 Subject: [PATCH 049/213] a bit of debugging for media storage providers (#6757) * a bit of debugging for media storage providers * changelog --- changelog.d/6757.misc | 1 + synapse/rest/media/v1/media_storage.py | 1 + synapse/rest/media/v1/storage_provider.py | 6 ++++++ 3 files changed, 8 insertions(+) create mode 100644 changelog.d/6757.misc diff --git a/changelog.d/6757.misc b/changelog.d/6757.misc new file mode 100644 index 0000000000..a50c5e974a --- /dev/null +++ b/changelog.d/6757.misc @@ -0,0 +1 @@ +Add some debugging for media storage providers. diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 3b87717a5a..683a79c966 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -148,6 +148,7 @@ class MediaStorage(object): for provider in self.storage_providers: res = yield provider.fetch(path, file_info) if res: + logger.debug("Streaming %s from %s", path, provider) return res return None diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 37687ea7f4..858680be26 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -77,6 +77,9 @@ class StorageProviderWrapper(StorageProvider): self.store_synchronous = store_synchronous self.store_remote = store_remote + def __str__(self): + return "StorageProviderWrapper[%s]" % (self.backend,) + def store_file(self, path, file_info): if not file_info.server_name and not self.store_local: return defer.succeed(None) @@ -114,6 +117,9 @@ class FileStorageProviderBackend(StorageProvider): self.cache_directory = hs.config.media_store_path self.base_directory = config + def __str__(self): + return "FileStorageProviderBackend[%s]" % (self.base_directory,) + def store_file(self, path, file_info): """See StorageProvider.store_file""" From f3eac2b3e94539b5b01ce1f5580f11e8ba205385 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 23 Jan 2020 12:57:55 +0000 Subject: [PATCH 050/213] 1.9.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0392acbde4..a582f0fb58 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.9.0 (2020-01-23) +========================== + +No significant changes. + + Synapse 1.9.0rc1 (2020-01-22) ============================= diff --git a/debian/changelog b/debian/changelog index 7413c238e6..49f3187691 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.9.0) stable; urgency=medium + + * New synapse release 1.9.0. + + -- Synapse Packaging team Thu, 23 Jan 2020 12:56:31 +0000 + matrix-synapse-py3 (1.8.0) stable; urgency=medium [ Richard van der Hoff ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 1c44ca0999..6236e13aa3 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.9.0rc1" +__version__ = "1.9.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 1dc5a791cf0aad8d672ee4e2e84b544c6be94431 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 23 Jan 2020 12:59:29 +0000 Subject: [PATCH 051/213] Fixup changelog --- CHANGES.md | 5 ++++- changelog.d/6770.doc | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6770.doc diff --git a/CHANGES.md b/CHANGES.md index a582f0fb58..9a53e1872d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,10 @@ Synapse 1.9.0 (2020-01-23) ========================== -No significant changes. +Improved Documentation +---------------------- + +- Fix endpoint documentation for the List Rooms admin api. ([\#6770](https://github.com/matrix-org/synapse/issues/6770)) Synapse 1.9.0rc1 (2020-01-22) diff --git a/changelog.d/6770.doc b/changelog.d/6770.doc deleted file mode 100644 index a251b82238..0000000000 --- a/changelog.d/6770.doc +++ /dev/null @@ -1 +0,0 @@ -Fix endpoint documentation for the List Rooms admin api. \ No newline at end of file From 1755326d8a7a5c79261417675e38f5a8ccf39081 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 23 Jan 2020 13:11:07 +0000 Subject: [PATCH 052/213] Fixup changelog --- CHANGES.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9a53e1872d..056726ce91 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,11 @@ Synapse 1.9.0 (2020-01-23) ========================== +**WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). + +If your Synapse deployment uses workers, note that the reverse-proxy configurations for the `synapse.app.media_repository`, `synapse.app.federation_reader` and `synapse.app.event_creator` workers have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). Existing configurations will continue to work. + + Improved Documentation ---------------------- @@ -10,10 +15,6 @@ Improved Documentation Synapse 1.9.0rc1 (2020-01-22) ============================= -**WARNING**: As of this release, Synapse no longer supports versions of SQLite before 3.11, and will refuse to start when configured to use an older version. Administrators are recommended to migrate their database to Postgres (see instructions [here](docs/postgres.md)). - -If your Synapse deployment uses workers, note that the reverse-proxy configurations for the `synapse.app.media_repository`, `synapse.app.federation_reader` and `synapse.app.event_creator` have changed, with the addition of a few paths (see the updated configurations [here](docs/workers.md#available-worker-applications)). - Features -------- From 9bae740527c4621f9f8eb8ca936669f2372c42eb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 23 Jan 2020 13:13:19 +0000 Subject: [PATCH 053/213] Fixup changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 056726ce91..8a068ae8c9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ If your Synapse deployment uses workers, note that the reverse-proxy configurati Improved Documentation ---------------------- -- Fix endpoint documentation for the List Rooms admin api. ([\#6770](https://github.com/matrix-org/synapse/issues/6770)) +- Fix endpoint documentation for the List Rooms admin API. ([\#6770](https://github.com/matrix-org/synapse/issues/6770)) Synapse 1.9.0rc1 (2020-01-22) From fa4d609e20318821e2ffbeb35bfddbc86be81be0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Jan 2020 15:19:03 +0000 Subject: [PATCH 054/213] Make 'event.redacts' never raise. (#6771) There are quite a few places that we assume that a redaction event has a corresponding `redacts` key, which is not always the case. So lets cheekily make it so that event.redacts just returns None instead. --- changelog.d/6771.bugfix | 1 + synapse/events/__init__.py | 28 ++++++++++++--- synapse/storage/data_stores/main/events.py | 2 +- .../storage/data_stores/main/events_worker.py | 2 +- tests/storage/test_redaction.py | 35 +++++++++++++++++++ 5 files changed, 62 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6771.bugfix diff --git a/changelog.d/6771.bugfix b/changelog.d/6771.bugfix new file mode 100644 index 0000000000..623ba24acb --- /dev/null +++ b/changelog.d/6771.bugfix @@ -0,0 +1 @@ +Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 88ed6d764f..72c09327f4 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -116,16 +116,32 @@ class _EventInternalMetadata(object): return getattr(self, "redacted", False) -def _event_dict_property(key): +_SENTINEL = object() + + +def _event_dict_property(key, default=_SENTINEL): + """Creates a new property for the given key that delegates access to + `self._event_dict`. + + The default is used if the key is missing from the `_event_dict`, if given, + otherwise an AttributeError will be raised. + + Note: If a default is given then `hasattr` will always return true. + """ + # We want to be able to use hasattr with the event dict properties. # However, (on python3) hasattr expects AttributeError to be raised. Hence, # we need to transform the KeyError into an AttributeError - def getter(self): + + def getter_raises(self): try: return self._event_dict[key] except KeyError: raise AttributeError(key) + def getter_default(self): + return self._event_dict.get(key, default) + def setter(self, v): try: self._event_dict[key] = v @@ -138,7 +154,11 @@ def _event_dict_property(key): except KeyError: raise AttributeError(key) - return property(getter, setter, delete) + if default is _SENTINEL: + # No default given, so use the getter that raises + return property(getter_raises, setter, delete) + else: + return property(getter_default, setter, delete) class EventBase(object): @@ -165,7 +185,7 @@ class EventBase(object): origin = _event_dict_property("origin") origin_server_ts = _event_dict_property("origin_server_ts") prev_events = _event_dict_property("prev_events") - redacts = _event_dict_property("redacts") + redacts = _event_dict_property("redacts", None) room_id = _event_dict_property("room_id") sender = _event_dict_property("sender") user_id = _event_dict_property("sender") diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 596daf8909..ce553566a5 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -951,7 +951,7 @@ class EventsStore( elif event.type == EventTypes.Message: # Insert into the event_search table. self._store_room_message_txn(txn, event) - elif event.type == EventTypes.Redaction: + elif event.type == EventTypes.Redaction and event.redacts is not None: # Insert into the redactions table. self._store_redaction(txn, event) elif event.type == EventTypes.Retention: diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py index 3b93e0597a..7251e819f5 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py @@ -287,7 +287,7 @@ class EventsWorkerStore(SQLBaseStore): # we have to recheck auth now. if not allow_rejected and entry.event.type == EventTypes.Redaction: - if not hasattr(entry.event, "redacts"): + if entry.event.redacts is None: # A redacted redaction doesn't have a `redacts` key, in # which case lets just withhold the event. # diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index dc45173355..feb1c07cb2 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -398,3 +398,38 @@ class RedactionTestCase(unittest.HomeserverTestCase): self.get_success( self.store.get_event(first_redact_event.event_id, allow_none=True) ) + + def test_store_redacted_redaction(self): + """Tests that we can store a redacted redaction. + """ + + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) + + builder = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Redaction, + "sender": self.u_alice.to_string(), + "room_id": self.room1.to_string(), + "content": {"reason": "foo"}, + }, + ) + + redaction_event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) + ) + + self.get_success( + self.storage.persistence.persist_event(redaction_event, context) + ) + + # Now lets jump to the future where we have censored the redaction event + # in the DB. + self.reactor.advance(60 * 60 * 24 * 31) + + # We just want to check that fetching the event doesn't raise an exception. + self.get_success( + self.store.get_event(redaction_event.event_id, allow_none=True) + ) From aa6ad288f16ed263b2a94b480259639d52ff6cad Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Fri, 24 Jan 2020 11:01:57 +0200 Subject: [PATCH 055/213] Clarifications to the workers documentation * Add note that user_dir requires disabling user dir updates from the main synapse process. * Add note that federation_reader should have the federation listener resource. Signed-off-by: Jason Robinson --- changelog.d/6775.doc | 1 + docs/workers.md | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/6775.doc diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc new file mode 100644 index 0000000000..9421250f8b --- /dev/null +++ b/changelog.d/6775.doc @@ -0,0 +1 @@ +Clarify documentation related to user_dir and federation_reader workers. diff --git a/docs/workers.md b/docs/workers.md index 0ab269fd96..a5d6d18f23 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -185,6 +185,9 @@ reverse-proxy configuration. The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single instance. +Note that the `worker_listeners.resources.name` needs to be set to `federation` +for this worker. + ### `synapse.app.federation_sender` Handles sending federation traffic to other servers. Doesn't handle any @@ -265,6 +268,10 @@ the following regular expressions: ^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$ +When using this worker you must also set `update_user_directory: False` in the +shared configuration file to stop the main synapse running background +jobs related to updating the user directory. + ### `synapse.app.frontend_proxy` Proxies some frequently-requested client endpoints to add caching and remove From 9f7aaf90b5ef76416852f35201a851d45eccc0a1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 24 Jan 2020 14:28:40 +0000 Subject: [PATCH 056/213] Validate client_secret parameter (#6767) --- changelog.d/6767.bugfix | 1 + synapse/handlers/identity.py | 4 +- synapse/rest/client/v2_alpha/account.py | 23 ++++++++--- synapse/rest/client/v2_alpha/register.py | 3 ++ synapse/util/stringutils.py | 17 ++++++++ tests/util/test_stringutils.py | 51 ++++++++++++++++++++++++ 6 files changed, 93 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6767.bugfix create mode 100644 tests/util/test_stringutils.py diff --git a/changelog.d/6767.bugfix b/changelog.d/6767.bugfix new file mode 100644 index 0000000000..63c7c63315 --- /dev/null +++ b/changelog.d/6767.bugfix @@ -0,0 +1 @@ +Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 000fbf090f..23f07832e7 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -38,7 +38,7 @@ from synapse.api.errors import ( from synapse.config.emailconfig import ThreepidBehaviour from synapse.http.client import SimpleHttpClient from synapse.util.hash import sha256_and_url_safe_base64 -from synapse.util.stringutils import random_string +from synapse.util.stringutils import assert_valid_client_secret, random_string from ._base import BaseHandler @@ -84,6 +84,8 @@ class IdentityHandler(BaseHandler): raise SynapseError( 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM ) + assert_valid_client_secret(client_secret) + session_id = creds.get("sid") if not session_id: raise SynapseError( diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index fc240f5cf8..dc837d6c75 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -30,6 +30,7 @@ from synapse.http.servlet import ( ) from synapse.push.mailer import Mailer, load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import assert_valid_client_secret from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -81,6 +82,8 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Extract params from body client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) + email = body["email"] send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param @@ -166,8 +169,9 @@ class PasswordResetSubmitTokenServlet(RestServlet): ) sid = parse_string(request, "sid", required=True) - client_secret = parse_string(request, "client_secret", required=True) token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + assert_valid_client_secret(client_secret) # Attempt to validate a 3PID session try: @@ -353,6 +357,8 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) + email = body["email"] send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param @@ -413,6 +419,8 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): body, ["client_secret", "country", "phone_number", "send_attempt"] ) client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) + country = body["country"] phone_number = body["phone_number"] send_attempt = body["send_attempt"] @@ -493,8 +501,9 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): ) sid = parse_string(request, "sid", required=True) - client_secret = parse_string(request, "client_secret", required=True) token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + assert_valid_client_secret(client_secret) # Attempt to validate a 3PID session try: @@ -559,6 +568,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict(body, ["client_secret", "sid", "token"]) + assert_valid_client_secret(body["client_secret"]) # Proxy submit_token request to msisdn threepid delegate response = await self.identity_handler.proxy_msisdn_submit_token( @@ -600,8 +610,9 @@ class ThreepidRestServlet(RestServlet): ) assert_params_in_dict(threepid_creds, ["client_secret", "sid"]) - client_secret = threepid_creds["client_secret"] sid = threepid_creds["sid"] + client_secret = threepid_creds["client_secret"] + assert_valid_client_secret(client_secret) validation_session = await self.identity_handler.validate_threepid_session( client_secret, sid @@ -637,8 +648,9 @@ class ThreepidAddRestServlet(RestServlet): body = parse_json_object_from_request(request) assert_params_in_dict(body, ["client_secret", "sid"]) - client_secret = body["client_secret"] sid = body["sid"] + client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) await self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -676,8 +688,9 @@ class ThreepidBindRestServlet(RestServlet): assert_params_in_dict(body, ["id_server", "sid", "client_secret"]) id_server = body["id_server"] sid = body["sid"] - client_secret = body["client_secret"] id_access_token = body.get("id_access_token") # optional + client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 1bda9aec7e..a09189b1b4 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -49,6 +49,7 @@ from synapse.http.servlet import ( from synapse.push.mailer import load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter +from synapse.util.stringutils import assert_valid_client_secret from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -116,6 +117,8 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): # Extract params from body client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) + email = body["email"] send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 982c6d81ca..2c0dcb5208 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +15,22 @@ # limitations under the License. import random +import re import string import six from six import PY2, PY3 from six.moves import range +from synapse.api.errors import Codes, SynapseError + _string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" +# https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-register-email-requesttoken +# Note: The : character is allowed here for older clients, but will be removed in a +# future release. Context: https://github.com/matrix-org/synapse/issues/6766 +client_secret_regex = re.compile(r"^[0-9a-zA-Z\.\=\_\-\:]+$") + # random_string and random_string_with_symbols are used for a range of things, # some cryptographically important, some less so. We use SystemRandom to make sure # we get cryptographically-secure randoms. @@ -109,3 +118,11 @@ def exception_to_unicode(e): return msg.decode("utf-8", errors="replace") else: return msg + + +def assert_valid_client_secret(client_secret): + """Validate that a given string matches the client_secret regex defined by the spec""" + if client_secret_regex.match(client_secret) is None: + raise SynapseError( + 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM + ) diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py new file mode 100644 index 0000000000..4f4da29a98 --- /dev/null +++ b/tests/util/test_stringutils.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.api.errors import SynapseError +from synapse.util.stringutils import assert_valid_client_secret + +from .. import unittest + + +class StringUtilsTestCase(unittest.TestCase): + def test_client_secret_regex(self): + """Ensure that client_secret does not contain illegal characters""" + good = [ + "abcde12345", + "ABCabc123", + "_--something==_", + "...--==-18913", + "8Dj2odd-e9asd.cd==_--ddas-secret-", + # We temporarily allow : characters: https://github.com/matrix-org/synapse/issues/6766 + # To be removed in a future release + "SECRET:1234567890", + ] + + bad = [ + "--+-/secret", + "\\dx--dsa288", + "", + "AAS//", + "asdj**", + ">X> Date: Mon, 27 Jan 2020 10:20:48 +0200 Subject: [PATCH 057/213] Fix federation_reader listeners doc as per PR review Signed-off-by: Jason Robinson --- docs/workers.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/workers.md b/docs/workers.md index a5d6d18f23..09a9d8a7b8 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -185,8 +185,18 @@ reverse-proxy configuration. The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single instance. -Note that the `worker_listeners.resources.name` needs to be set to `federation` -for this worker. +Note that `federation` must be added to the listener resources in the worker config: + +```yaml +worker_app: synapse.app.federation_reader +... +worker_listeners: + - type: http + port: + resources: + - names: + - federation +``` ### `synapse.app.federation_sender` From cf9d56e5cfa93dbd5f52df89e1fdb088755d811a Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Mon, 27 Jan 2020 14:09:59 +0200 Subject: [PATCH 058/213] Formatting of changelog Co-Authored-By: Brendan Abolivier --- changelog.d/6775.doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc index 9421250f8b..c6078ef82d 100644 --- a/changelog.d/6775.doc +++ b/changelog.d/6775.doc @@ -1 +1 @@ -Clarify documentation related to user_dir and federation_reader workers. +Clarify documentation related to `user_dir` and `federation_reader` workers. From d5275fc55f4edc42d1543825da2c13df63d96927 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Jan 2020 13:47:50 +0000 Subject: [PATCH 059/213] Propagate cache invalidates from workers to other workers. (#6748) Currently if a worker invalidates a cache it will be streamed to master, which then didn't forward those to other workers. --- changelog.d/6748.misc | 1 + synapse/replication/tcp/protocol.py | 2 +- synapse/replication/tcp/resource.py | 9 ++++++--- synapse/storage/data_stores/main/cache.py | 22 +++++++++++++++++++++- 4 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6748.misc diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc new file mode 100644 index 0000000000..de320d4cd9 --- /dev/null +++ b/changelog.d/6748.misc @@ -0,0 +1 @@ +Propagate cache invalidates from workers to other workers. diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 131e5acb09..bc1482a9bb 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -459,7 +459,7 @@ class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): await self.streamer.on_remove_pusher(cmd.app_id, cmd.push_key, cmd.user_id) async def on_INVALIDATE_CACHE(self, cmd): - self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys) + await self.streamer.on_invalidate_cache(cmd.cache_func, cmd.keys) async def on_REMOTE_SERVER_UP(self, cmd: RemoteServerUpCommand): self.streamer.on_remote_server_up(cmd.data) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 6ebf944f66..ce60ae2e07 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -17,7 +17,7 @@ import logging import random -from typing import List +from typing import Any, List from six import itervalues @@ -271,11 +271,14 @@ class ReplicationStreamer(object): self.notifier.on_new_replication_data() @measure_func("repl.on_invalidate_cache") - def on_invalidate_cache(self, cache_func, keys): + async def on_invalidate_cache(self, cache_func: str, keys: List[Any]): """The client has asked us to invalidate a cache """ invalidate_cache_counter.inc() - getattr(self.store, cache_func).invalidate(tuple(keys)) + + # We invalidate the cache locally, but then also stream that to other + # workers. + await self.store.invalidate_cache_and_stream(cache_func, tuple(keys)) @measure_func("repl.on_user_ip") async def on_user_ip( diff --git a/synapse/storage/data_stores/main/cache.py b/synapse/storage/data_stores/main/cache.py index afa2b41c98..d4c44dcc75 100644 --- a/synapse/storage/data_stores/main/cache.py +++ b/synapse/storage/data_stores/main/cache.py @@ -16,7 +16,7 @@ import itertools import logging -from typing import Any, Iterable, Optional +from typing import Any, Iterable, Optional, Tuple from twisted.internet import defer @@ -33,6 +33,26 @@ CURRENT_STATE_CACHE_NAME = "cs_cache_fake" class CacheInvalidationStore(SQLBaseStore): + async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): + """Invalidates the cache and adds it to the cache stream so slaves + will know to invalidate their caches. + + This should only be used to invalidate caches where slaves won't + otherwise know from other replication streams that the cache should + be invalidated. + """ + cache_func = getattr(self, cache_name, None) + if not cache_func: + return + + cache_func.invalidate(keys) + await self.runInteraction( + "invalidate_cache_and_stream", + self._send_invalidation_to_replication, + cache_func.__name__, + keys, + ) + def _invalidate_cache_and_stream(self, txn, cache_func, keys): """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. From 8df862e45d9848c226399c8e39d31497461516ff Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Jan 2020 14:30:57 +0000 Subject: [PATCH 060/213] Add `rooms.room_version` column (#6729) This is so that we don't have to rely on pulling it out from `current_state_events` table. --- changelog.d/6729.misc | 1 + synapse/federation/federation_client.py | 50 ++++++---- synapse/handlers/federation.py | 65 ++++++++++--- synapse/handlers/room.py | 52 ++++++---- .../v2_alpha/room_upgrade_rest_servlet.py | 3 +- synapse/storage/data_stores/main/room.py | 94 +++++++++++++++++-- .../schema/delta/57/rooms_version_column.sql | 24 +++++ synapse/storage/data_stores/main/state.py | 34 ++++--- tests/storage/test_room.py | 7 +- tests/storage/test_state.py | 5 +- tests/utils.py | 8 ++ 11 files changed, 270 insertions(+), 73 deletions(-) create mode 100644 changelog.d/6729.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql diff --git a/changelog.d/6729.misc b/changelog.d/6729.misc new file mode 100644 index 0000000000..5537355bea --- /dev/null +++ b/changelog.d/6729.misc @@ -0,0 +1 @@ +Record room versions in the `rooms` table. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index af652a7659..d57e8ca7a2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,6 +17,7 @@ import copy import itertools import logging +from typing import Dict, Iterable from prometheus_client import Counter @@ -29,6 +30,7 @@ from synapse.api.errors import ( FederationDeniedError, HttpResponseException, SynapseError, + UnsupportedRoomVersionError, ) from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, @@ -385,6 +387,8 @@ class FederationClient(FederationBase): return res except InvalidResponseError as e: logger.warning("Failed to %s via %s: %s", description, destination, e) + except UnsupportedRoomVersionError: + raise except HttpResponseException as e: if not 500 <= e.code < 600: raise e.to_synapse_error() @@ -404,7 +408,13 @@ class FederationClient(FederationBase): raise SynapseError(502, "Failed to %s via any server" % (description,)) def make_membership_event( - self, destinations, room_id, user_id, membership, content, params + self, + destinations: Iterable[str], + room_id: str, + user_id: str, + membership: str, + content: dict, + params: Dict[str, str], ): """ Creates an m.room.member event, with context, without participating in the room. @@ -417,21 +427,23 @@ class FederationClient(FederationBase): Note that this does not append any events to any graphs. Args: - destinations (Iterable[str]): Candidate homeservers which are probably + destinations: Candidate homeservers which are probably participating in the room. - room_id (str): The room in which the event will happen. - user_id (str): The user whose membership is being evented. - membership (str): The "membership" property of the event. Must be - one of "join" or "leave". - content (dict): Any additional data to put into the content field - of the event. - params (dict[str, str|Iterable[str]]): Query parameters to include in the - request. + room_id: The room in which the event will happen. + user_id: The user whose membership is being evented. + membership: The "membership" property of the event. Must be one of + "join" or "leave". + content: Any additional data to put into the content field of the + event. + params: Query parameters to include in the request. Return: - Deferred[tuple[str, FrozenEvent, int]]: resolves to a tuple of - `(origin, event, event_format)` where origin is the remote - homeserver which generated the event, and event_format is one of - `synapse.api.room_versions.EventFormatVersions`. + Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of + `(origin, event, room_version)` where origin is the remote + homeserver which generated the event, and room_version is the + version of the room. + + Fails with a `UnsupportedRoomVersionError` if remote responds with + a room version we don't understand. Fails with a ``SynapseError`` if the chosen remote server returns a 300/400 code. @@ -453,8 +465,12 @@ class FederationClient(FederationBase): # Note: If not supplied, the room version may be either v1 or v2, # however either way the event format version will be v1. - room_version = ret.get("room_version", RoomVersions.V1.identifier) - event_format = room_version_to_event_format(room_version) + room_version_id = ret.get("room_version", RoomVersions.V1.identifier) + room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not room_version: + raise UnsupportedRoomVersionError() + + event_format = room_version_to_event_format(room_version_id) pdu_dict = ret.get("event", None) if not isinstance(pdu_dict, dict): @@ -478,7 +494,7 @@ class FederationClient(FederationBase): event_dict=pdu_dict, ) - return (destination, ev, event_format) + return (destination, ev, room_version) return self._try_destination_list( "make_" + membership, destinations, send_request diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index d4f9a792fc..f824ee79a0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -44,10 +44,10 @@ from synapse.api.errors import ( StoreError, SynapseError, ) -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event -from synapse.events import EventBase +from synapse.events import EventBase, room_version_to_event_format from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.logging.context import ( @@ -703,8 +703,20 @@ class FederationHandler(BaseHandler): if not room: try: + prev_state_ids = await context.get_prev_state_ids() + create_event = await self.store.get_event( + prev_state_ids[(EventTypes.Create, "")] + ) + + room_version_id = create_event.content.get( + "room_version", RoomVersions.V1.identifier + ) + await self.store.store_room( - room_id=room_id, room_creator_user_id="", is_public=False + room_id=room_id, + room_creator_user_id="", + is_public=False, + room_version=KNOWN_ROOM_VERSIONS[room_version_id], ) except StoreError: logger.exception("Failed to store room.") @@ -1186,7 +1198,7 @@ class FederationHandler(BaseHandler): """ logger.debug("Joining %s to %s", joinee, room_id) - origin, event, event_format_version = yield self._make_and_verify_event( + origin, event, room_version = yield self._make_and_verify_event( target_hosts, room_id, joinee, @@ -1214,6 +1226,8 @@ class FederationHandler(BaseHandler): target_hosts.insert(0, origin) except ValueError: pass + + event_format_version = room_version_to_event_format(room_version.identifier) ret = yield self.federation_client.send_join( target_hosts, event, event_format_version ) @@ -1234,13 +1248,18 @@ class FederationHandler(BaseHandler): try: yield self.store.store_room( - room_id=room_id, room_creator_user_id="", is_public=False + room_id=room_id, + room_creator_user_id="", + is_public=False, + room_version=room_version, ) except Exception: # FIXME pass - yield self._persist_auth_tree(origin, auth_chain, state, event) + yield self._persist_auth_tree( + origin, auth_chain, state, event, room_version + ) # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information @@ -1486,7 +1505,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): - origin, event, event_format_version = yield self._make_and_verify_event( + origin, event, room_version = yield self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not @@ -1513,7 +1532,11 @@ class FederationHandler(BaseHandler): def _make_and_verify_event( self, target_hosts, room_id, user_id, membership, content={}, params=None ): - origin, event, format_ver = yield self.federation_client.make_membership_event( + ( + origin, + event, + room_version, + ) = yield self.federation_client.make_membership_event( target_hosts, room_id, user_id, membership, content, params=params ) @@ -1525,7 +1548,7 @@ class FederationHandler(BaseHandler): assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - return origin, event, format_ver + return origin, event, room_version @defer.inlineCallbacks @log_function @@ -1810,7 +1833,14 @@ class FederationHandler(BaseHandler): ) @defer.inlineCallbacks - def _persist_auth_tree(self, origin, auth_events, state, event): + def _persist_auth_tree( + self, + origin: str, + auth_events: List[EventBase], + state: List[EventBase], + event: EventBase, + room_version: RoomVersion, + ): """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event separately. Notifies about the persisted events @@ -1819,10 +1849,12 @@ class FederationHandler(BaseHandler): Will attempt to fetch missing auth events. Args: - origin (str): Where the events came from - auth_events (list) - state (list) - event (Event) + origin: Where the events came from + auth_events + state + event + room_version: The room version we expect this room to have, and + will raise if it doesn't match the version in the create event. Returns: Deferred @@ -1848,10 +1880,13 @@ class FederationHandler(BaseHandler): # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") - room_version = create_event.content.get( + room_version_id = create_event.content.get( "room_version", RoomVersions.V1.identifier ) + if room_version.identifier != room_version_id: + raise SynapseError(400, "Room version mismatch") + missing_auth_events = set() for e in itertools.chain(auth_events, state, [event]): for e_id in e.auth_event_ids(): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 9f50196ea7..a9490782b7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -29,7 +29,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.http.endpoint import parse_and_validate_server_name from synapse.storage.state import StateFilter from synapse.types import ( @@ -100,13 +100,15 @@ class RoomCreationHandler(BaseHandler): self.third_party_event_rules = hs.get_third_party_event_rules() @defer.inlineCallbacks - def upgrade_room(self, requester, old_room_id, new_version): + def upgrade_room( + self, requester: Requester, old_room_id: str, new_version: RoomVersion + ): """Replace a room with a new room with a different version Args: - requester (synapse.types.Requester): the user requesting the upgrade - old_room_id (unicode): the id of the room to be replaced - new_version (unicode): the new room version to use + requester: the user requesting the upgrade + old_room_id: the id of the room to be replaced + new_version: the new room version to use Returns: Deferred[unicode]: the new room id @@ -151,7 +153,7 @@ class RoomCreationHandler(BaseHandler): if r is None: raise NotFoundError("Unknown room id %s" % (old_room_id,)) new_room_id = yield self._generate_room_id( - creator_id=user_id, is_public=r["is_public"] + creator_id=user_id, is_public=r["is_public"], room_version=new_version, ) logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) @@ -299,18 +301,22 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def clone_existing_room( - self, requester, old_room_id, new_room_id, new_room_version, tombstone_event_id + self, + requester: Requester, + old_room_id: str, + new_room_id: str, + new_room_version: RoomVersion, + tombstone_event_id: str, ): """Populate a new room based on an old room Args: - requester (synapse.types.Requester): the user requesting the upgrade - old_room_id (unicode): the id of the room to be replaced - new_room_id (unicode): the id to give the new room (should already have been + requester: the user requesting the upgrade + old_room_id : the id of the room to be replaced + new_room_id: the id to give the new room (should already have been created with _gemerate_room_id()) - new_room_version (unicode): the new room version to use - tombstone_event_id (unicode|str): the ID of the tombstone event in the old - room. + new_room_version: the new room version to use + tombstone_event_id: the ID of the tombstone event in the old room. Returns: Deferred """ @@ -320,7 +326,7 @@ class RoomCreationHandler(BaseHandler): raise SynapseError(403, "You are not permitted to create rooms") creation_content = { - "room_version": new_room_version, + "room_version": new_room_version.identifier, "predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id}, } @@ -577,14 +583,15 @@ class RoomCreationHandler(BaseHandler): if ratelimit: yield self.ratelimit(requester) - room_version = config.get( + room_version_id = config.get( "room_version", self.config.default_room_version.identifier ) - if not isinstance(room_version, string_types): + if not isinstance(room_version_id, string_types): raise SynapseError(400, "room_version must be a string", Codes.BAD_JSON) - if room_version not in KNOWN_ROOM_VERSIONS: + room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) + if room_version is None: raise SynapseError( 400, "Your homeserver does not support this room version", @@ -631,7 +638,9 @@ class RoomCreationHandler(BaseHandler): visibility = config.get("visibility", None) is_public = visibility == "public" - room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public) + room_id = yield self._generate_room_id( + creator_id=user_id, is_public=is_public, room_version=room_version, + ) directory_handler = self.hs.get_handlers().directory_handler if room_alias: @@ -660,7 +669,7 @@ class RoomCreationHandler(BaseHandler): creation_content = config.get("creation_content", {}) # override any attempt to set room versions via the creation_content - creation_content["room_version"] = room_version + creation_content["room_version"] = room_version.identifier yield self._send_events_for_new_room( requester, @@ -849,7 +858,9 @@ class RoomCreationHandler(BaseHandler): yield send(etype=etype, state_key=state_key, content=content) @defer.inlineCallbacks - def _generate_room_id(self, creator_id, is_public): + def _generate_room_id( + self, creator_id: str, is_public: str, room_version: RoomVersion, + ): # autogen room IDs and try to create it. We may clash, so just # try a few times till one goes through, giving up eventually. attempts = 0 @@ -863,6 +874,7 @@ class RoomCreationHandler(BaseHandler): room_id=gen_room_id, room_creator_user_id=creator_id, is_public=is_public, + room_version=room_version, ) return gen_room_id except StoreError: diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index ca97330797..f357015a70 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -64,7 +64,8 @@ class RoomUpgradeRestServlet(RestServlet): assert_params_in_dict(content, ("new_version",)) new_version = content["new_version"] - if new_version not in KNOWN_ROOM_VERSIONS: + new_version = KNOWN_ROOM_VERSIONS.get(content["new_version"]) + if new_version is None: raise SynapseError( 400, "Your homeserver does not support this room version", diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index d968803ad2..9a17e336ba 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -29,9 +29,10 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.errors import StoreError +from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.search import SearchStore -from synapse.storage.database import Database +from synapse.storage.database import Database, LoggingTransaction from synapse.types import ThirdPartyInstanceID from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -734,6 +735,7 @@ class RoomWorkerStore(SQLBaseStore): class RoomBackgroundUpdateStore(SQLBaseStore): REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" + ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column" def __init__(self, database: Database, db_conn, hs): super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs) @@ -749,6 +751,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore): self._remove_tombstoned_rooms_from_directory, ) + self.db.updates.register_background_update_handler( + self.ADD_ROOMS_ROOM_VERSION_COLUMN, + self._background_add_rooms_room_version_column, + ) + @defer.inlineCallbacks def _background_insert_retention(self, progress, batch_size): """Retrieves a list of all rooms within a range and inserts an entry for each of @@ -817,6 +824,73 @@ class RoomBackgroundUpdateStore(SQLBaseStore): defer.returnValue(batch_size) + async def _background_add_rooms_room_version_column( + self, progress: dict, batch_size: int + ): + """Background update to go and add room version inforamtion to `rooms` + table from `current_state_events` table. + """ + + last_room_id = progress.get("room_id", "") + + def _background_add_rooms_room_version_column_txn(txn: LoggingTransaction): + sql = """ + SELECT room_id, json FROM current_state_events + INNER JOIN event_json USING (room_id, event_id) + WHERE room_id > ? AND type = 'm.room.create' AND state_key = '' + ORDER BY room_id + LIMIT ? + """ + + txn.execute(sql, (last_room_id, batch_size)) + + updates = [] + for room_id, event_json in txn: + event_dict = json.loads(event_json) + room_version_id = event_dict.get("content", {}).get( + "room_version", RoomVersions.V1.identifier + ) + + creator = event_dict.get("content").get("creator") + + updates.append((room_id, creator, room_version_id)) + + if not updates: + return True + + new_last_room_id = "" + for room_id, creator, room_version_id in updates: + # We upsert here just in case we don't already have a row, + # mainly for paranoia as much badness would happen if we don't + # insert the row and then try and get the room version for the + # room. + self.db.simple_upsert_txn( + txn, + table="rooms", + keyvalues={"room_id": room_id}, + values={"room_version": room_version_id}, + insertion_values={"is_public": False, "creator": creator}, + ) + new_last_room_id = room_id + + self.db.updates._background_update_progress_txn( + txn, self.ADD_ROOMS_ROOM_VERSION_COLUMN, {"room_id": new_last_room_id} + ) + + return False + + end = await self.db.runInteraction( + "_background_add_rooms_room_version_column", + _background_add_rooms_room_version_column_txn, + ) + + if end: + await self.db.updates._end_background_update( + self.ADD_ROOMS_ROOM_VERSION_COLUMN + ) + + return batch_size + async def _remove_tombstoned_rooms_from_directory( self, progress, batch_size ) -> int: @@ -881,14 +955,21 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): self.config = hs.config @defer.inlineCallbacks - def store_room(self, room_id, room_creator_user_id, is_public): + def store_room( + self, + room_id: str, + room_creator_user_id: str, + is_public: bool, + room_version: RoomVersion, + ): """Stores a room. Args: - room_id (str): The desired room ID, can be None. - room_creator_user_id (str): The user ID of the room creator. - is_public (bool): True to indicate that this room should appear in - public room lists. + room_id: The desired room ID, can be None. + room_creator_user_id: The user ID of the room creator. + is_public: True to indicate that this room should appear in + public room lists. + room_version: The version of the room Raises: StoreError if the room could not be stored. """ @@ -902,6 +983,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): "room_id": room_id, "creator": room_creator_user_id, "is_public": is_public, + "room_version": room_version.identifier, }, ) if is_public: diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql new file mode 100644 index 0000000000..352a66f5b0 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column.sql @@ -0,0 +1,24 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- We want to start storing the room version independently of +-- `current_state_events` so that we can delete stale entries from it without +-- losing the information. +ALTER TABLE rooms ADD COLUMN room_version TEXT; + + +INSERT into background_updates (update_name, progress_json) + VALUES ('add_rooms_room_version_column', '{}'); diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 33bebd1c48..bd7b0276f1 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -60,24 +60,34 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def __init__(self, database: Database, db_conn, hs): super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) - @defer.inlineCallbacks - def get_room_version(self, room_id): + @cached(max_entries=10000) + async def get_room_version(self, room_id: str) -> str: """Get the room_version of a given room - Args: - room_id (str) - - Returns: - Deferred[str] - Raises: - NotFoundError if the room is unknown + NotFoundError: if the room is unknown """ - # for now we do this by looking at the create event. We may want to cache this - # more intelligently in future. + + # First we try looking up room version from the database, but for old + # rooms we might not have added the room version to it yet so we fall + # back to previous behaviour and look in current state events. + + # We really should have an entry in the rooms table for every room we + # care about, but let's be a bit paranoid (at least while the background + # update is happening) to avoid breaking existing rooms. + version = await self.db.simple_select_one_onecol( + table="rooms", + keyvalues={"room_id": room_id}, + retcol="room_version", + desc="get_room_version", + allow_none=True, + ) + + if version is not None: + return version # Retrieve the room's create event - create_event = yield self.get_create_event_for_room(room_id) + create_event = await self.get_create_event_for_room(room_id) return create_event.content.get("room_version", "1") @defer.inlineCallbacks diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 3ddaa151fe..086adeb8fd 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -17,6 +17,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes +from synapse.api.room_versions import RoomVersions from synapse.types import RoomAlias, RoomID, UserID from tests import unittest @@ -40,6 +41,7 @@ class RoomStoreTestCase(unittest.TestCase): self.room.to_string(), room_creator_user_id=self.u_creator.to_string(), is_public=True, + room_version=RoomVersions.V1, ) @defer.inlineCallbacks @@ -68,7 +70,10 @@ class RoomEventsStoreTestCase(unittest.TestCase): self.room = RoomID.from_string("!abcde:test") yield self.store.store_room( - self.room.to_string(), room_creator_user_id="@creator:text", is_public=True + self.room.to_string(), + room_creator_user_id="@creator:text", + is_public=True, + room_version=RoomVersions.V1, ) @defer.inlineCallbacks diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index d6ecf102f8..04d58fbf24 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -45,7 +45,10 @@ class StateStoreTestCase(tests.unittest.TestCase): self.room = RoomID.from_string("!abc123:test") yield self.store.store_room( - self.room.to_string(), room_creator_user_id="@creator:text", is_public=True + self.room.to_string(), + room_creator_user_id="@creator:text", + is_public=True, + room_version=RoomVersions.V1, ) @defer.inlineCallbacks diff --git a/tests/utils.py b/tests/utils.py index e2e9cafd79..513f358f4f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -639,9 +639,17 @@ def create_room(hs, room_id, creator_id): """ persistence_store = hs.get_storage().persistence + store = hs.get_datastore() event_builder_factory = hs.get_event_builder_factory() event_creation_handler = hs.get_event_creation_handler() + yield store.store_room( + room_id=room_id, + room_creator_user_id=creator_id, + is_public=False, + room_version=RoomVersions.V1, + ) + builder = event_builder_factory.for_room_version( RoomVersions.V1, { From bdbeeb94ecc8e99f1401174df220a38e130db164 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jan 2020 13:05:24 +0000 Subject: [PATCH 061/213] Fix setting `mau_limit_reserved_threepids` config (#6793) Calling the invalidation function during initialisation of the data stores introduces a circular dependency, causing Synapse to fail to start. --- changelog.d/6793.bugfix | 1 + synapse/storage/data_stores/main/monthly_active_users.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6793.bugfix diff --git a/changelog.d/6793.bugfix b/changelog.d/6793.bugfix new file mode 100644 index 0000000000..564d4596ea --- /dev/null +++ b/changelog.d/6793.bugfix @@ -0,0 +1 @@ +Fix bug where setting `mau_limit_reserved_threepids` config would cause Synapse to refuse to start. diff --git a/synapse/storage/data_stores/main/monthly_active_users.py b/synapse/storage/data_stores/main/monthly_active_users.py index 89a41542a3..1507a14e09 100644 --- a/synapse/storage/data_stores/main/monthly_active_users.py +++ b/synapse/storage/data_stores/main/monthly_active_users.py @@ -121,7 +121,13 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): if user_id: is_support = self.is_support_user_txn(txn, user_id) if not is_support: - self.upsert_monthly_active_user_txn(txn, user_id) + # We do this manually here to avoid hitting #6791 + self.db.simple_upsert_txn( + txn, + table="monthly_active_users", + keyvalues={"user_id": user_id}, + values={"timestamp": int(self._clock.time_msec())}, + ) else: logger.warning("mau limit reserved threepid %s not found in db" % tp) From 77d9357226687a177c865bcdeaa0e750612fc078 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jan 2020 13:09:36 +0000 Subject: [PATCH 062/213] 1.9.1 --- CHANGES.md | 9 +++++++++ changelog.d/6793.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6793.bugfix diff --git a/CHANGES.md b/CHANGES.md index 8a068ae8c9..4c413b72ee 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.9.1 (2020-01-28) +========================== + +Bugfixes +-------- + +- Fix bug where setting `mau_limit_reserved_threepids` config would cause Synapse to refuse to start. ([\#6793](https://github.com/matrix-org/synapse/issues/6793)) + + Synapse 1.9.0 (2020-01-23) ========================== diff --git a/changelog.d/6793.bugfix b/changelog.d/6793.bugfix deleted file mode 100644 index 564d4596ea..0000000000 --- a/changelog.d/6793.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where setting `mau_limit_reserved_threepids` config would cause Synapse to refuse to start. diff --git a/debian/changelog b/debian/changelog index 49f3187691..74eb29c5ee 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.9.1) stable; urgency=medium + + * New synapse release 1.9.1. + + -- Synapse Packaging team Tue, 28 Jan 2020 13:09:23 +0000 + matrix-synapse-py3 (1.9.0) stable; urgency=medium * New synapse release 1.9.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 6236e13aa3..a236888d3c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.9.0" +__version__ = "1.9.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 02b44db922f01a35787d2535a834c9774b68020b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jan 2020 13:44:21 +0000 Subject: [PATCH 063/213] Warn if postgres database has non-C locale. (#6734) As using non-C locale can cause issues on upgrading OS. --- UPGRADE.rst | 9 +++++++ changelog.d/6734.bugfix | 1 + docs/postgres.md | 20 +++++++++++++- synapse/storage/engines/postgres.py | 42 +++++++++++++++++++++++++++++ synapse/storage/engines/sqlite.py | 5 ++++ synapse/storage/prepare_database.py | 5 ++++ 6 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6734.bugfix diff --git a/UPGRADE.rst b/UPGRADE.rst index a0202932b1..470246f128 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -76,6 +76,15 @@ for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to **** +=============================== + +Synapse will now log a warning on start up if used with a PostgreSQL database +that has a non-recommended locale set. + +See [docs/postgres.md](docs/postgres.md) for details. + + Upgrading to v1.8.0 =================== diff --git a/changelog.d/6734.bugfix b/changelog.d/6734.bugfix new file mode 100644 index 0000000000..79c6bab4d1 --- /dev/null +++ b/changelog.d/6734.bugfix @@ -0,0 +1 @@ +Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). diff --git a/docs/postgres.md b/docs/postgres.md index 7cb1ad18d4..e0793ecee8 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -32,7 +32,7 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate su - postgres # Or, if your system uses sudo to get administrative rights sudo -u postgres bash - + Then, create a user ``synapse_user`` with: createuser --pwprompt synapse_user @@ -63,6 +63,24 @@ You may need to enable password authentication so `synapse_user` can connect to the database. See . +### Fixing incorrect `COLLATE` or `CTYPE` + +Synapse will refuse to set up a new database if it has the wrong values of +`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using +different locales can cause issues if the locale library is updated from +underneath the database, or if a different version of the locale is used on any +replicas. + +The safest way to fix the issue is to take a dump and recreate the database with +the correct `COLLATE` and `CTYPE` parameters (as per +[docs/postgres.md](docs/postgres.md)). It is also possible to change the +parameters on a live database and run a `REINDEX` on the entire database, +however extreme care must be taken to avoid database corruption. + +Note that the above may fail with an error about duplicate rows if corruption +has already occurred, and such duplicate rows will need to be manually removed. + + ## Tuning Postgres The default settings should be fine for most deployments. For larger diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index c84cb452b0..a077345960 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -13,8 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + from ._base import IncorrectDatabaseSetup +logger = logging.getLogger(__name__) + class PostgresEngine(object): single_threaded = False @@ -52,6 +56,44 @@ class PostgresEngine(object): "See docs/postgres.rst for more information." % (rows[0][0],) ) + txn.execute( + "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()" + ) + collation, ctype = txn.fetchone() + if collation != "C": + logger.warning( + "Database has incorrect collation of %r. Should be 'C'", collation + ) + + if ctype != "C": + logger.warning( + "Database has incorrect ctype of %r. Should be 'C'", ctype + ) + + def check_new_database(self, txn): + """Gets called when setting up a brand new database. This allows us to + apply stricter checks on new databases versus existing database. + """ + + txn.execute( + "SELECT datcollate, datctype FROM pg_database WHERE datname = current_database()" + ) + collation, ctype = txn.fetchone() + + errors = [] + + if collation != "C": + errors.append(" - 'COLLATE' is set to %r. Should be 'C'" % (collation,)) + + if ctype != "C": + errors.append(" - 'CTYPE' is set to %r. Should be 'C'" % (collation,)) + + if errors: + raise IncorrectDatabaseSetup( + "Database is incorrectly configured:\n\n%s\n\n" + "See docs/postgres.md for more information." % ("\n".join(errors)) + ) + def convert_param_style(self, sql): return sql.replace("?", "%s") diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index cbf52f5191..641e490697 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -59,6 +59,11 @@ class Sqlite3Engine(object): if version < (3, 11, 0): raise RuntimeError("Synapse requires sqlite 3.11 or above.") + def check_new_database(self, txn): + """Gets called when setting up a brand new database. This allows us to + apply stricter checks on new databases versus existing database. + """ + def convert_param_style(self, sql): return sql diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index e86984cd50..c285ef52a0 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -136,6 +136,11 @@ def _setup_new_database(cur, database_engine, data_stores): data_stores (list[str]): The names of the data stores to instantiate on the given database. """ + + # We're about to set up a brand new database so we check that its + # configured to our liking. + database_engine.check_new_database(cur) + current_dir = os.path.join(dir_path, "schema", "full_schemas") directory_entries = os.listdir(current_dir) From a8ce7aeb433e08f46306797a1252668c178a7825 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 Jan 2020 14:18:29 +0000 Subject: [PATCH 064/213] Pass room version object into event_auth.check and check_redaction (#6788) These are easier to work with than the strings and we normally have one around. This fixes `FederationHander._persist_auth_tree` which was passing a RoomVersion object into event_auth.check instead of a string. --- changelog.d/6788.misc | 1 + synapse/api/auth.py | 7 +++++-- synapse/event_auth.py | 34 +++++++++++++++++++++------------- synapse/handlers/federation.py | 18 +++++++++++------- synapse/handlers/message.py | 10 +++++++--- synapse/state/v1.py | 4 ++-- synapse/state/v2.py | 4 +++- tests/test_event_auth.py | 11 ++++------- 8 files changed, 54 insertions(+), 35 deletions(-) create mode 100644 changelog.d/6788.misc diff --git a/changelog.d/6788.misc b/changelog.d/6788.misc new file mode 100644 index 0000000000..5537355bea --- /dev/null +++ b/changelog.d/6788.misc @@ -0,0 +1 @@ +Record room versions in the `rooms` table. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 2cbfab2569..8b1277ad02 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -33,6 +33,7 @@ from synapse.api.errors import ( MissingClientTokenError, ResourceLimitError, ) +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import is_threepid_reserved from synapse.types import StateMap, UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache @@ -77,15 +78,17 @@ class Auth(object): self._account_validity = hs.config.account_validity @defer.inlineCallbacks - def check_from_context(self, room_version, event, context, do_sig_check=True): + def check_from_context(self, room_version: str, event, context, do_sig_check=True): prev_state_ids = yield context.get_prev_state_ids() auth_events_ids = yield self.compute_auth_events( event, prev_state_ids, for_verification=True ) auth_events = yield self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in itervalues(auth_events)} + + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] event_auth.check( - room_version, event, auth_events=auth_events, do_sig_check=do_sig_check + room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check ) @defer.inlineCallbacks diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e3a1ba47a0..016d5678e5 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,17 +24,27 @@ from unpaddedbase64 import decode_base64 from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, EventSizeError, SynapseError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersion, +) from synapse.types import UserID, get_domain_from_id logger = logging.getLogger(__name__) -def check(room_version, event, auth_events, do_sig_check=True, do_size_check=True): +def check( + room_version_obj: RoomVersion, + event, + auth_events, + do_sig_check=True, + do_size_check=True, +): """ Checks if this event is correctly authed. Args: - room_version (str): the version of the room + room_version_obj: the version of the room event: the event being checked. auth_events (dict: event-key -> event): the existing room state. @@ -97,10 +108,11 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru 403, "Creation event's room_id domain does not match sender's" ) - room_version = event.content.get("room_version", "1") - if room_version not in KNOWN_ROOM_VERSIONS: + room_version_prop = event.content.get("room_version", "1") + if room_version_prop not in KNOWN_ROOM_VERSIONS: raise AuthError( - 403, "room appears to have unsupported version %s" % (room_version,) + 403, + "room appears to have unsupported version %s" % (room_version_prop,), ) # FIXME logger.debug("Allowing! %s", event) @@ -160,7 +172,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru _check_power_levels(event, auth_events) if event.type == EventTypes.Redaction: - check_redaction(room_version, event, auth_events) + check_redaction(room_version_obj, event, auth_events) logger.debug("Allowing! %s", event) @@ -386,7 +398,7 @@ def _can_send_event(event, auth_events): return True -def check_redaction(room_version, event, auth_events): +def check_redaction(room_version_obj: RoomVersion, event, auth_events): """Check whether the event sender is allowed to redact the target event. Returns: @@ -406,11 +418,7 @@ def check_redaction(room_version, event, auth_events): if user_level >= redact_level: return False - v = KNOWN_ROOM_VERSIONS.get(room_version) - if not v: - raise RuntimeError("Unrecognized room version %r" % (room_version,)) - - if v.event_format == EventFormatVersions.V1: + if room_version_obj.event_format == EventFormatVersions.V1: redacter_domain = get_domain_from_id(event.event_id) redactee_domain = get_domain_from_id(event.redacts) if redacter_domain == redactee_domain: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index f824ee79a0..180f165a7a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -47,7 +47,7 @@ from synapse.api.errors import ( from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import auth_types_for_event -from synapse.events import EventBase, room_version_to_event_format +from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator from synapse.logging.context import ( @@ -1198,7 +1198,7 @@ class FederationHandler(BaseHandler): """ logger.debug("Joining %s to %s", joinee, room_id) - origin, event, room_version = yield self._make_and_verify_event( + origin, event, room_version_obj = yield self._make_and_verify_event( target_hosts, room_id, joinee, @@ -1227,7 +1227,7 @@ class FederationHandler(BaseHandler): except ValueError: pass - event_format_version = room_version_to_event_format(room_version.identifier) + event_format_version = room_version_obj.event_format ret = yield self.federation_client.send_join( target_hosts, event, event_format_version ) @@ -1251,14 +1251,14 @@ class FederationHandler(BaseHandler): room_id=room_id, room_creator_user_id="", is_public=False, - room_version=room_version, + room_version=room_version_obj, ) except Exception: # FIXME pass yield self._persist_auth_tree( - origin, auth_chain, state, event, room_version + origin, auth_chain, state, event, room_version_obj ) # Check whether this room is the result of an upgrade of a room we already know @@ -2022,6 +2022,7 @@ class FederationHandler(BaseHandler): if do_soft_fail_check: room_version = yield self.store.get_room_version(event.room_id) + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". if state is not None: @@ -2071,7 +2072,9 @@ class FederationHandler(BaseHandler): } try: - event_auth.check(room_version, event, auth_events=current_auth_events) + event_auth.check( + room_version_obj, event, auth_events=current_auth_events + ) except AuthError as e: logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True @@ -2155,6 +2158,7 @@ class FederationHandler(BaseHandler): defer.Deferred[EventContext]: updated context object """ room_version = yield self.store.get_room_version(event.room_id) + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: context = yield self._update_auth_events_and_context_for_auth( @@ -2172,7 +2176,7 @@ class FederationHandler(BaseHandler): ) try: - event_auth.check(room_version, event, auth_events=auth_events) + event_auth.check(room_version_obj, event, auth_events=auth_events) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8ea3aca2f4..9a0f661b9b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -40,7 +40,7 @@ from synapse.api.errors import ( NotFoundError, SynapseError, ) -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.api.urls import ConsentURIBuilder from synapse.events.validator import EventValidator from synapse.logging.context import run_in_background @@ -962,9 +962,13 @@ class EventCreationHandler(object): ) auth_events = yield self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events.values()} - room_version = yield self.store.get_room_version(event.room_id) - if event_auth.check_redaction(room_version, event, auth_events=auth_events): + room_version = yield self.store.get_room_version(event.room_id) + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + + if event_auth.check_redaction( + room_version_obj, event, auth_events=auth_events + ): # this user doesn't have 'redact' rights, so we need to do some more # checks on the original event. Let's start by checking the original # event exists. diff --git a/synapse/state/v1.py b/synapse/state/v1.py index d6c34ce3b7..24b7c0faef 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -281,7 +281,7 @@ def _resolve_auth_events(events, auth_events): try: # The signatures have already been checked at this point event_auth.check( - RoomVersions.V1.identifier, + RoomVersions.V1, event, auth_events, do_sig_check=False, @@ -299,7 +299,7 @@ def _resolve_normal_events(events, auth_events): try: # The signatures have already been checked at this point event_auth.check( - RoomVersions.V1.identifier, + RoomVersions.V1, event, auth_events, do_sig_check=False, diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 6216fdd204..531018c6a5 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -26,6 +26,7 @@ import synapse.state from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.types import StateMap @@ -402,6 +403,7 @@ def _iterative_auth_checks( Deferred[StateMap[str]]: Returns the final updated state """ resolved_state = base_state.copy() + room_version_obj = KNOWN_ROOM_VERSIONS[room_version] for event_id in event_ids: event = event_map[event_id] @@ -430,7 +432,7 @@ def _iterative_auth_checks( try: event_auth.check( - room_version, + room_version_obj, event, auth_events, do_sig_check=False, diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 8b2741d277..ca20b085a2 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -37,7 +37,7 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send state event_auth.check( - RoomVersions.V1.identifier, + RoomVersions.V1, _random_state_event(creator), auth_events, do_sig_check=False, @@ -47,7 +47,7 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check, - RoomVersions.V1.identifier, + RoomVersions.V1, _random_state_event(joiner), auth_events, do_sig_check=False, @@ -76,7 +76,7 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check, - RoomVersions.V1.identifier, + RoomVersions.V1, _random_state_event(pleb), auth_events, do_sig_check=False, @@ -84,10 +84,7 @@ class EventAuthTestCase(unittest.TestCase): # king should be able to send state event_auth.check( - RoomVersions.V1.identifier, - _random_state_event(king), - auth_events, - do_sig_check=False, + RoomVersions.V1, _random_state_event(king), auth_events, do_sig_check=False, ) From 49d3bca37b91fa092e13fd28c42dcf970fb86bb7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 27 Jan 2020 16:14:54 +0000 Subject: [PATCH 065/213] Implement updated auth rules from MSC2260 --- synapse/api/room_versions.py | 16 ++++++++++++++++ synapse/event_auth.py | 24 +++++++++++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index c6f50fd7b9..cf7ee60d3a 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -57,6 +57,9 @@ class RoomVersion(object): state_res = attr.ib() # int; one of the StateResolutionVersions enforce_key_validity = attr.ib() # bool + # bool: before MSC2260, anyone was allowed to send an aliases event + special_case_aliases_auth = attr.ib(type=bool, default=False) + class RoomVersions(object): V1 = RoomVersion( @@ -65,6 +68,7 @@ class RoomVersions(object): EventFormatVersions.V1, StateResolutionVersions.V1, enforce_key_validity=False, + special_case_aliases_auth=True, ) V2 = RoomVersion( "2", @@ -72,6 +76,7 @@ class RoomVersions(object): EventFormatVersions.V1, StateResolutionVersions.V2, enforce_key_validity=False, + special_case_aliases_auth=True, ) V3 = RoomVersion( "3", @@ -79,6 +84,7 @@ class RoomVersions(object): EventFormatVersions.V2, StateResolutionVersions.V2, enforce_key_validity=False, + special_case_aliases_auth=True, ) V4 = RoomVersion( "4", @@ -86,6 +92,7 @@ class RoomVersions(object): EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=False, + special_case_aliases_auth=True, ) V5 = RoomVersion( "5", @@ -93,6 +100,14 @@ class RoomVersions(object): EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=True, + special_case_aliases_auth=True, + ) + MSC2260_DEV = RoomVersion( + "org.matrix.msc2260", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, ) @@ -104,5 +119,6 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V3, RoomVersions.V4, RoomVersions.V5, + RoomVersions.MSC2260_DEV, ) } # type: Dict[str, RoomVersion] diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 016d5678e5..3240e8a7b2 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -100,7 +100,12 @@ def check( if not event.signatures.get(event_id_domain): raise AuthError(403, "Event not signed by sending server") + # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules + # + # 1. If type is m.room.create: if event.type == EventTypes.Create: + # 1b. If the domain of the room_id does not match the domain of the sender, + # reject. sender_domain = get_domain_from_id(event.sender) room_id_domain = get_domain_from_id(event.room_id) if room_id_domain != sender_domain: @@ -108,40 +113,49 @@ def check( 403, "Creation event's room_id domain does not match sender's" ) + # 1c. If content.room_version is present and is not a recognised version, reject room_version_prop = event.content.get("room_version", "1") if room_version_prop not in KNOWN_ROOM_VERSIONS: raise AuthError( 403, "room appears to have unsupported version %s" % (room_version_prop,), ) - # FIXME + logger.debug("Allowing! %s", event) return + # 3. If event does not have a m.room.create in its auth_events, reject. creation_event = auth_events.get((EventTypes.Create, ""), None) - if not creation_event: raise AuthError(403, "No create event in auth events") + # additional check for m.federate creating_domain = get_domain_from_id(event.room_id) originating_domain = get_domain_from_id(event.sender) if creating_domain != originating_domain: if not _can_federate(event, auth_events): raise AuthError(403, "This room has been marked as unfederatable.") - # FIXME: Temp hack + # 4. If type is m.room.aliases if event.type == EventTypes.Aliases: + # 4a. If event has no state_key, reject if not event.is_state(): raise AuthError(403, "Alias event must be a state event") if not event.state_key: raise AuthError(403, "Alias event must have non-empty state_key") + + # 4b. If sender's domain doesn't matches [sic] state_key, reject sender_domain = get_domain_from_id(event.sender) if event.state_key != sender_domain: raise AuthError( 403, "Alias event's state_key does not match sender's domain" ) - logger.debug("Allowing! %s", event) - return + + # 4c. Otherwise, allow. + # This is removed by https://github.com/matrix-org/matrix-doc/pull/2260 + if room_version.special_case_aliases_auth: + logger.debug("Allowing! %s", event) + return if logger.isEnabledFor(logging.DEBUG): logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()]) From 99e205fc214a65d307d4f5484321bcbb32a60b5f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 27 Jan 2020 16:16:16 +0000 Subject: [PATCH 066/213] changelog --- changelog.d/6787.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6787.misc diff --git a/changelog.d/6787.misc b/changelog.d/6787.misc new file mode 100644 index 0000000000..82fe636173 --- /dev/null +++ b/changelog.d/6787.misc @@ -0,0 +1 @@ +Implement updated auth rules from MSC2260. From fbe0a82c0d603b12d8c1d9a2a1121dafb5616213 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 09:43:57 +0000 Subject: [PATCH 067/213] update changelog --- changelog.d/6787.feature | 1 + changelog.d/6787.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/6787.feature delete mode 100644 changelog.d/6787.misc diff --git a/changelog.d/6787.feature b/changelog.d/6787.feature new file mode 100644 index 0000000000..df9e4b77ab --- /dev/null +++ b/changelog.d/6787.feature @@ -0,0 +1 @@ +Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). diff --git a/changelog.d/6787.misc b/changelog.d/6787.misc deleted file mode 100644 index 82fe636173..0000000000 --- a/changelog.d/6787.misc +++ /dev/null @@ -1 +0,0 @@ -Implement updated auth rules from MSC2260. From e17a11066192354f6c6144135a14e7abe524f44c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jan 2020 14:43:21 +0000 Subject: [PATCH 068/213] Detect unknown remote devices and mark cache as stale (#6776) We just mark the fact that the cache may be stale in the database for now. --- changelog.d/6776.misc | 1 + synapse/handlers/devicemessage.py | 57 ++++++++++++++++++- synapse/handlers/federation.py | 20 +++++++ synapse/replication/slave/storage/devices.py | 2 +- synapse/storage/data_stores/main/devices.py | 29 ++++++++-- .../57/device_list_remote_cache_stale.sql | 25 ++++++++ 6 files changed, 126 insertions(+), 8 deletions(-) create mode 100644 changelog.d/6776.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql diff --git a/changelog.d/6776.misc b/changelog.d/6776.misc new file mode 100644 index 0000000000..4f9a4ac7a5 --- /dev/null +++ b/changelog.d/6776.misc @@ -0,0 +1 @@ +Detect unknown remote devices and mark cache as stale. diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 73b9e120f5..5c5fe77be2 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Any, Dict from canonicaljson import json @@ -65,6 +66,9 @@ class DeviceMessageHandler(object): logger.warning("Request for keys for non-local user %s", user_id) raise SynapseError(400, "Not a user here") + if not by_device: + continue + messages_by_device = { device_id: { "content": message_content, @@ -73,8 +77,11 @@ class DeviceMessageHandler(object): } for device_id, message_content in by_device.items() } - if messages_by_device: - local_messages[user_id] = messages_by_device + local_messages[user_id] = messages_by_device + + yield self._check_for_unknown_devices( + message_type, sender_user_id, by_device + ) stream_id = yield self.store.add_messages_from_remote_to_device_inbox( origin, message_id, local_messages @@ -84,6 +91,52 @@ class DeviceMessageHandler(object): "to_device_key", stream_id, users=local_messages.keys() ) + @defer.inlineCallbacks + def _check_for_unknown_devices( + self, + message_type: str, + sender_user_id: str, + by_device: Dict[str, Dict[str, Any]], + ): + """Checks inbound device messages for unkown remote devices, and if + found marks the remote cache for the user as stale. + """ + + if message_type != "m.room_key_request": + return + + # Get the sending device IDs + requesting_device_ids = set() + for message_content in by_device.values(): + device_id = message_content.get("requesting_device_id") + requesting_device_ids.add(device_id) + + # Check if we are tracking the devices of the remote user. + room_ids = yield self.store.get_rooms_for_user(sender_user_id) + if not room_ids: + logger.info( + "Received device message from remote device we don't" + " share a room with: %s %s", + sender_user_id, + requesting_device_ids, + ) + return + + # If we are tracking check that we know about the sending + # devices. + cached_devices = yield self.store.get_cached_devices_for_user(sender_user_id) + + unknown_devices = requesting_device_ids - set(cached_devices) + if unknown_devices: + logger.info( + "Received device message from remote device not in our cache: %s %s", + sender_user_id, + unknown_devices, + ) + yield self.store.mark_remote_user_device_cache_as_stale(sender_user_id) + # TODO: Poke something to start trying to refetch user's + # keys. + @defer.inlineCallbacks def send_device_message(self, sender_user_id, message_type, messages): set_tag("number_of_messages", len(messages)) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 180f165a7a..a67020a259 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -742,6 +742,26 @@ class FederationHandler(BaseHandler): user = UserID.from_string(event.state_key) await self.user_joined_room(user, room_id) + # For encrypted messages we check that we know about the sending device, + # if we don't then we mark the device cache for that user as stale. + if event.type == EventTypes.Encryption: + device_id = event.content.get("device_id") + if device_id is not None: + cached_devices = await self.store.get_cached_devices_for_user( + event.sender + ) + if device_id not in cached_devices: + logger.info( + "Received event from remote device not in our cache: %s %s", + event.sender, + device_id, + ) + await self.store.mark_remote_user_device_cache_as_stale( + event.sender + ) + # TODO: Poke something to start trying to refetch user's + # keys. + @log_function async def backfill(self, dest, room_id, limit, extremities): """ Trigger a backfill request to `dest` for the given `room_id` diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index dc625e0d7a..1c77687eea 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -72,6 +72,6 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto destination, token ) - self._get_cached_devices_for_user.invalidate((user_id,)) + self.get_cached_devices_for_user.invalidate((user_id,)) self._get_cached_user_device.invalidate_many((user_id,)) self.get_device_list_last_stream_id_for_remote.invalidate((user_id,)) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index f0a7962dd0..30bf66b2b6 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -457,7 +457,7 @@ class DeviceWorkerStore(SQLBaseStore): device = yield self._get_cached_user_device(user_id, device_id) results.setdefault(user_id, {})[device_id] = device else: - results[user_id] = yield self._get_cached_devices_for_user(user_id) + results[user_id] = yield self.get_cached_devices_for_user(user_id) set_tag("in_cache", results) set_tag("not_in_cache", user_ids_not_in_cache) @@ -475,12 +475,12 @@ class DeviceWorkerStore(SQLBaseStore): return db_to_json(content) @cachedInlineCallbacks() - def _get_cached_devices_for_user(self, user_id): + def get_cached_devices_for_user(self, user_id): devices = yield self.db.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, retcols=("device_id", "content"), - desc="_get_cached_devices_for_user", + desc="get_cached_devices_for_user", ) return { device["device_id"]: db_to_json(device["content"]) for device in devices @@ -641,6 +641,18 @@ class DeviceWorkerStore(SQLBaseStore): return results + def mark_remote_user_device_cache_as_stale(self, user_id: str): + """Records that the server has reason to believe the cache of the devices + for the remote users is out of date. + """ + return self.db.simple_upsert( + table="device_lists_remote_resync", + keyvalues={"user_id": user_id}, + values={}, + insertion_values={"added_ts": self._clock.time_msec()}, + desc="make_remote_user_device_cache_as_stale", + ) + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): @@ -887,7 +899,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ) txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id)) - txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,)) + txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,)) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) ) @@ -902,6 +914,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): lock=False, ) + # If we're replacing the remote user's device list cache presumably + # we've done a full resync, so we remove the entry that says we need + # to resync + self.db.simple_delete_txn( + txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id}, + ) + def update_remote_device_list_cache(self, user_id, devices, stream_id): """Replace the entire cache of the remote user's devices. @@ -942,7 +961,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ], ) - txn.call_after(self._get_cached_devices_for_user.invalidate, (user_id,)) + txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,)) txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,)) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) diff --git a/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql new file mode 100644 index 0000000000..c3b6de2099 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/device_list_remote_cache_stale.sql @@ -0,0 +1,25 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Records whether the server thinks that the remote users cached device lists +-- may be out of date (e.g. if we have received a to device message from a +-- device we don't know about). +CREATE TABLE IF NOT EXISTS device_lists_remote_resync ( + user_id TEXT NOT NULL, + added_ts BIGINT NOT NULL +); + +CREATE UNIQUE INDEX device_lists_remote_resync_idx ON device_lists_remote_resync (user_id); +CREATE INDEX device_lists_remote_resync_ts_idx ON device_lists_remote_resync (added_ts); From a1f307f7d1ca6d4f83f9f43272a4b152cfdee299 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 14:55:22 +0000 Subject: [PATCH 069/213] fix bad variable ref --- synapse/event_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 3240e8a7b2..472f165044 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -153,7 +153,7 @@ def check( # 4c. Otherwise, allow. # This is removed by https://github.com/matrix-org/matrix-doc/pull/2260 - if room_version.special_case_aliases_auth: + if room_version_obj.special_case_aliases_auth: logger.debug("Allowing! %s", event) return From fcfb591b312d6ec124c67aef2136a2d5948cadbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jan 2020 18:59:48 +0000 Subject: [PATCH 070/213] Fix outbound federation request metrics (#6795) --- changelog.d/6795.bugfix | 1 + synapse/http/matrixfederationclient.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6795.bugfix diff --git a/changelog.d/6795.bugfix b/changelog.d/6795.bugfix new file mode 100644 index 0000000000..d1585653b1 --- /dev/null +++ b/changelog.d/6795.bugfix @@ -0,0 +1 @@ +Fix outbound federation request metrics. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 16765d54e0..6f1bb04d8b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -408,6 +408,8 @@ class MatrixFederationHttpClient(object): _sec_timeout, ) + outgoing_requests_counter.labels(method_bytes).inc() + try: with Measure(self.clock, "outbound_request"): # we don't want all the fancy cookie and redirect handling @@ -440,6 +442,8 @@ class MatrixFederationHttpClient(object): response.phrase.decode("ascii", errors="replace"), ) + incoming_responses_counter.labels(method_bytes, response.code).inc() + set_tag(tags.HTTP_STATUS_CODE, response.code) if 200 <= response.code < 300: From 2cad8baa7030a86efc103599d79412741654dc15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jan 2020 09:56:41 +0000 Subject: [PATCH 071/213] Fix bug when querying remote user keys that require a resync. (#6796) We ended up only returning a single device, rather than all of them. --- changelog.d/6796.bugfix | 1 + synapse/handlers/e2e_keys.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6796.bugfix diff --git a/changelog.d/6796.bugfix b/changelog.d/6796.bugfix new file mode 100644 index 0000000000..206a157311 --- /dev/null +++ b/changelog.d/6796.bugfix @@ -0,0 +1 @@ +Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 2d889364d4..95a9d71f41 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -208,8 +208,9 @@ class E2eKeysHandler(object): ) user_devices = user_devices["devices"] + user_results = results.setdefault(user_id, {}) for device in user_devices: - results[user_id] = {device["device_id"]: device["keys"]} + user_results[device["device_id"]] = device["keys"] user_ids_updated.append(user_id) except Exception as e: failures[destination] = _exception_to_failure(e) From 611215a49cedf8d5f63c53168173763731d02260 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jan 2020 11:01:32 +0000 Subject: [PATCH 072/213] Delete current state when server leaves a room (#6792) Otherwise its just stale data, which may get deleted later anyway so can't be relied on. It's also a bit of a shotgun if we're trying to get the current state of a room we're not in. --- changelog.d/6792.misc | 1 + synapse/storage/data_stores/main/events.py | 183 +++++++++++++-------- synapse/storage/persist_events.py | 89 +++++++++- 3 files changed, 198 insertions(+), 75 deletions(-) create mode 100644 changelog.d/6792.misc diff --git a/changelog.d/6792.misc b/changelog.d/6792.misc new file mode 100644 index 0000000000..fa31d509b3 --- /dev/null +++ b/changelog.d/6792.misc @@ -0,0 +1 @@ +Delete current state from the database when server leaves a room. diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index ce553566a5..c9d0d68c3a 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -32,6 +32,7 @@ from twisted.internet import defer import synapse.metrics from synapse.api.constants import EventContentFields, EventTypes from synapse.api.errors import SynapseError +from synapse.api.room_versions import RoomVersions from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event_dict @@ -468,84 +469,93 @@ class EventsStore( to_delete = delta_state.to_delete to_insert = delta_state.to_insert - # First we add entries to the current_state_delta_stream. We - # do this before updating the current_state_events table so - # that we can use it to calculate the `prev_event_id`. (This - # allows us to not have to pull out the existing state - # unnecessarily). - # - # The stream_id for the update is chosen to be the minimum of the stream_ids - # for the batch of the events that we are persisting; that means we do not - # end up in a situation where workers see events before the - # current_state_delta updates. - # - sql = """ - INSERT INTO current_state_delta_stream - (stream_id, room_id, type, state_key, event_id, prev_event_id) - SELECT ?, ?, ?, ?, ?, ( - SELECT event_id FROM current_state_events - WHERE room_id = ? AND type = ? AND state_key = ? + if delta_state.no_longer_in_room: + # Server is no longer in the room so we delete the room from + # current_state_events, being careful we've already updated the + # rooms.room_version column (which gets populated in a + # background task). + self._upsert_room_version_txn(txn, room_id) + + # Before deleting we populate the current_state_delta_stream + # so that async background tasks get told what happened. + sql = """ + INSERT INTO current_state_delta_stream + (stream_id, room_id, type, state_key, event_id, prev_event_id) + SELECT ?, room_id, type, state_key, null, event_id + FROM current_state_events + WHERE room_id = ? + """ + txn.execute(sql, (stream_id, room_id)) + + self.db.simple_delete_txn( + txn, table="current_state_events", keyvalues={"room_id": room_id}, ) - """ - txn.executemany( - sql, - ( - ( - stream_id, - room_id, - etype, - state_key, - None, - room_id, - etype, - state_key, + else: + # We're still in the room, so we update the current state as normal. + + # First we add entries to the current_state_delta_stream. We + # do this before updating the current_state_events table so + # that we can use it to calculate the `prev_event_id`. (This + # allows us to not have to pull out the existing state + # unnecessarily). + # + # The stream_id for the update is chosen to be the minimum of the stream_ids + # for the batch of the events that we are persisting; that means we do not + # end up in a situation where workers see events before the + # current_state_delta updates. + # + sql = """ + INSERT INTO current_state_delta_stream + (stream_id, room_id, type, state_key, event_id, prev_event_id) + SELECT ?, ?, ?, ?, ?, ( + SELECT event_id FROM current_state_events + WHERE room_id = ? AND type = ? AND state_key = ? ) - for etype, state_key in to_delete - # We sanity check that we're deleting rather than updating - if (etype, state_key) not in to_insert - ), - ) - txn.executemany( - sql, - ( + """ + txn.executemany( + sql, ( - stream_id, - room_id, - etype, - state_key, - ev_id, - room_id, - etype, - state_key, - ) - for (etype, state_key), ev_id in iteritems(to_insert) - ), - ) + ( + stream_id, + room_id, + etype, + state_key, + to_insert.get((etype, state_key)), + room_id, + etype, + state_key, + ) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) + # Now we actually update the current_state_events table - # Now we actually update the current_state_events table + txn.executemany( + "DELETE FROM current_state_events" + " WHERE room_id = ? AND type = ? AND state_key = ?", + ( + (room_id, etype, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) - txn.executemany( - "DELETE FROM current_state_events" - " WHERE room_id = ? AND type = ? AND state_key = ?", - ( - (room_id, etype, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), - ) + # We include the membership in the current state table, hence we do + # a lookup when we insert. This assumes that all events have already + # been inserted into room_memberships. + txn.executemany( + """INSERT INTO current_state_events + (room_id, type, state_key, event_id, membership) + VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + """, + [ + (room_id, key[0], key[1], ev_id, ev_id) + for key, ev_id in iteritems(to_insert) + ], + ) - # We include the membership in the current state table, hence we do - # a lookup when we insert. This assumes that all events have already - # been inserted into room_memberships. - txn.executemany( - """INSERT INTO current_state_events - (room_id, type, state_key, event_id, membership) - VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) - """, - [ - (room_id, key[0], key[1], ev_id, ev_id) - for key, ev_id in iteritems(to_insert) - ], - ) + # We now update `local_current_membership`. We do this regardless + # of whether we're still in the room or not to handle the case where + # e.g. we just got banned (where we need to record that fact here). # Note: Do we really want to delete rows here (that we do not # subsequently reinsert below)? While technically correct it means @@ -601,6 +611,35 @@ class EventsStore( self._invalidate_state_caches_and_stream(txn, room_id, members_changed) + def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str): + """Update the room version in the database based off current state + events. + + This is used when we're about to delete current state and we want to + ensure that the `rooms.room_version` column is up to date. + """ + + sql = """ + SELECT json FROM event_json + INNER JOIN current_state_events USING (room_id, event_id) + WHERE room_id = ? AND type = ? AND state_key = ? + """ + txn.execute(sql, (room_id, EventTypes.Create, "")) + row = txn.fetchone() + if row: + event_json = json.loads(row[0]) + content = event_json.get("content", {}) + creator = content.get("creator") + room_version_id = content.get("room_version", RoomVersions.V1.identifier) + + self.db.simple_upsert_txn( + txn, + table="rooms", + keyvalues={"room_id": room_id}, + values={"room_version": room_version_id}, + insertion_values={"is_public": False, "creator": creator}, + ) + def _update_forward_extremities_txn( self, txn, new_forward_extremities, max_stream_order ): diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 368c457321..d060c8b992 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools import logging from collections import deque, namedtuple from typing import Iterable, List, Optional, Tuple @@ -27,7 +28,7 @@ from prometheus_client import Counter, Histogram from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.events import FrozenEvent from synapse.events.snapshot import EventContext from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable @@ -72,17 +73,20 @@ stale_forward_extremities_counter = Histogram( ) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True) class DeltaState: """Deltas to use to update the `current_state_events` table. Attributes: to_delete: List of type/state_keys to delete from current state to_insert: Map of state to upsert into current state + no_longer_in_room: The server is not longer in the room, so the room + should e.g. be removed from `current_state_events` table. """ to_delete = attr.ib(type=List[Tuple[str, str]]) to_insert = attr.ib(type=StateMap[str]) + no_longer_in_room = attr.ib(type=bool, default=False) class _EventPeristenceQueue(object): @@ -396,11 +400,12 @@ class EventsPersistenceStorage(object): # If either are not None then there has been a change, # and we need to work out the delta (or use that # given) + delta = None if delta_ids is not None: # If there is a delta we know that we've # only added or replaced state, never # removed keys entirely. - state_delta_for_room[room_id] = DeltaState([], delta_ids) + delta = DeltaState([], delta_ids) elif current_state is not None: with Measure( self._clock, "persist_events.calculate_state_delta" @@ -408,6 +413,22 @@ class EventsPersistenceStorage(object): delta = await self._calculate_state_delta( room_id, current_state ) + + if delta: + # If we have a change of state then lets check + # whether we're actually still a member of the room, + # or if our last user left. If we're no longer in + # the room then we delete the current state and + # extremities. + is_still_joined = await self._is_server_still_joined( + room_id, ev_ctx_rm, delta, current_state + ) + if not is_still_joined: + logger.info("Server no longer in room %s", room_id) + latest_event_ids = [] + current_state = {} + delta.no_longer_in_room = True + state_delta_for_room[room_id] = delta # If we have the current_state then lets prefill @@ -660,3 +681,65 @@ class EventsPersistenceStorage(object): } return DeltaState(to_delete=to_delete, to_insert=to_insert) + + async def _is_server_still_joined( + self, + room_id: str, + ev_ctx_rm: List[Tuple[FrozenEvent, EventContext]], + delta: DeltaState, + current_state: Optional[StateMap[str]], + ) -> bool: + """Check if the server will still be joined after the given events have + been persised. + + Args: + room_id + ev_ctx_rm + delta: The delta of current state between what is in the database + and what the new current state will be. + current_state: The new current state if it already been calculated, + otherwise None. + """ + + if not any( + self.is_mine_id(state_key) + for typ, state_key in itertools.chain(delta.to_delete, delta.to_insert) + if typ == EventTypes.Member + ): + # There have been no changes to membership of our users, so nothing + # has changed and we assume we're still in the room. + return True + + # Check if any of the given events are a local join that appear in the + # current state + for (typ, state_key), event_id in delta.to_insert.items(): + if typ != EventTypes.Member or not self.is_mine_id(state_key): + continue + + for event, _ in ev_ctx_rm: + if event_id == event.event_id: + if event.membership == Membership.JOIN: + return True + + # There's been a change of membership but we don't have a local join + # event in the new events, so we need to check the full state. + if current_state is None: + current_state = await self.main_store.get_current_state_ids(room_id) + current_state = dict(current_state) + for key in delta.to_delete: + current_state.pop(key, None) + + current_state.update(delta.to_insert) + + event_ids = [ + event_id + for (typ, state_key,), event_id in current_state.items() + if typ == EventTypes.Member and self.is_mine_id(state_key) + ] + + rows = await self.main_store.get_membership_from_event_ids(event_ids) + is_still_joined = any(row["membership"] == Membership.JOIN for row in rows) + if is_still_joined: + return True + else: + return False From 6b9e1014cf9c107f3198999159fbc935376fdcc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jan 2020 11:23:01 +0000 Subject: [PATCH 073/213] Fix race in federation sender that delayed device updates. (#6799) We were sending device updates down both the federation stream and device streams. This mean there was a race if the federation sender worker processed the federation stream first, as when the sender checked if there were new device updates the slaved ID generator hadn't been updated with the new stream IDs and so returned nothing. This situation is correctly handled by events/receipts/etc by not sending updates down the federation stream and instead having the federation sender worker listen on the other streams and poke the transaction queues as appropriate. --- changelog.d/6799.bugfix | 1 + synapse/app/federation_sender.py | 20 +++++++++++++++++++- synapse/federation/send_queue.py | 32 +++----------------------------- 3 files changed, 23 insertions(+), 30 deletions(-) create mode 100644 changelog.d/6799.bugfix diff --git a/changelog.d/6799.bugfix b/changelog.d/6799.bugfix new file mode 100644 index 0000000000..322a2758af --- /dev/null +++ b/changelog.d/6799.bugfix @@ -0,0 +1 @@ +Fix race in federation sender worker that delayed sending of device updates. diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 38d11fdd0f..63a91f1177 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -38,7 +38,11 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.replication.tcp.streams._base import ReceiptsStream +from synapse.replication.tcp.streams._base import ( + DeviceListsStream, + ReceiptsStream, + ToDeviceStream, +) from synapse.server import HomeServer from synapse.storage.database import Database from synapse.types import ReadReceipt @@ -256,6 +260,20 @@ class FederationSenderHandler(object): "process_receipts_for_federation", self._on_new_receipts, rows ) + # ... as well as device updates and messages + elif stream_name == DeviceListsStream.NAME: + hosts = set(row.destination for row in rows) + for host in hosts: + self.federation_sender.send_device_messages(host) + + elif stream_name == ToDeviceStream.NAME: + # The to_device stream includes stuff to be pushed to both local + # clients and remote servers, so we ignore entities that start with + # '@' (since they'll be local users rather than destinations). + hosts = set(row.entity for row in rows if not row.entity.startswith("@")) + for host in hosts: + self.federation_sender.send_device_messages(host) + @defer.inlineCallbacks def _on_new_receipts(self, rows): """ diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 174f6e42be..0bb82a6bb3 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -69,8 +69,6 @@ class FederationRemoteSendQueue(object): self.edus = SortedDict() # stream position -> Edu - self.device_messages = SortedDict() # stream position -> destination - self.pos = 1 self.pos_time = SortedDict() @@ -92,7 +90,6 @@ class FederationRemoteSendQueue(object): "keyed_edu", "keyed_edu_changed", "edus", - "device_messages", "pos_time", "presence_destinations", ]: @@ -171,12 +168,6 @@ class FederationRemoteSendQueue(object): for key in keys[:i]: del self.edus[key] - # Delete things out of device map - keys = self.device_messages.keys() - i = self.device_messages.bisect_left(position_to_delete) - for key in keys[:i]: - del self.device_messages[key] - def notify_new_events(self, current_id): """As per FederationSender""" # We don't need to replicate this as it gets sent down a different @@ -249,9 +240,8 @@ class FederationRemoteSendQueue(object): def send_device_messages(self, destination): """As per FederationSender""" - pos = self._next_pos() - self.device_messages[pos] = destination - self.notifier.on_new_replication_data() + # We don't need to replicate this as it gets sent down a different + # stream. def get_current_token(self): return self.pos - 1 @@ -339,14 +329,6 @@ class FederationRemoteSendQueue(object): for (pos, edu) in edus: rows.append((pos, EduRow(edu))) - # Fetch changed device messages - i = self.device_messages.bisect_right(from_token) - j = self.device_messages.bisect_right(to_token) + 1 - device_messages = {v: k for k, v in self.device_messages.items()[i:j]} - - for (destination, pos) in iteritems(device_messages): - rows.append((pos, DeviceRow(destination=destination))) - # Sort rows based on pos rows.sort() @@ -504,7 +486,6 @@ ParsedFederationStreamData = namedtuple( "presence_destinations", # list of tuples of UserPresenceState and destinations "keyed_edus", # dict of destination -> { key -> Edu } "edus", # dict of destination -> [Edu] - "device_destinations", # set of destinations ), ) @@ -523,11 +504,7 @@ def process_rows_for_federation(transaction_queue, rows): # them into the appropriate collection and then send them off. buff = ParsedFederationStreamData( - presence=[], - presence_destinations=[], - keyed_edus={}, - edus={}, - device_destinations=set(), + presence=[], presence_destinations=[], keyed_edus={}, edus={}, ) # Parse the rows in the stream and add to the buffer @@ -555,6 +532,3 @@ def process_rows_for_federation(transaction_queue, rows): for destination, edu_list in iteritems(buff.edus): for edu in edu_list: transaction_queue.send_edu(edu, None) - - for destination in buff.device_destinations: - transaction_queue.send_device_messages(destination) From ee42a5513e020424daa736962fee7fb69bd2373a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 11:02:55 +0000 Subject: [PATCH 074/213] Factor out a `copy_power_levels_contents` method I'm going to need another copy (hah!) of this. --- synapse/events/utils.py | 37 ++++++++++++++++++++++++++++++- synapse/handlers/room.py | 23 ++++++++++--------- tests/events/test_utils.py | 45 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 90 insertions(+), 15 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 07d1c5bcf0..be57c6d9be 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -12,8 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import collections import re +from typing import Mapping, Union from six import string_types @@ -422,3 +423,37 @@ class EventClientSerializer(object): return yieldable_gather_results( self.serialize_event, events, time_now=time_now, **kwargs ) + + +def copy_power_levels_contents( + old_power_levels: Mapping[str, Union[int, Mapping[str, int]]] +): + """Copy the content of a power_levels event, unfreezing frozendicts along the way + + Raises: + TypeError if the input does not look like a valid power levels event content + """ + if not isinstance(old_power_levels, collections.Mapping): + raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,)) + + power_levels = {} + for k, v in old_power_levels.items(): + + if isinstance(v, int): + power_levels[k] = v + continue + + if isinstance(v, collections.Mapping): + power_levels[k] = h = {} + for k1, v1 in v.items(): + # we should only have one level of nesting + if not isinstance(v1, int): + raise TypeError( + "Invalid power_levels value for %s.%s: %r" % (k, k1, v) + ) + h[k1] = v1 + continue + + raise TypeError("Invalid power_levels value for %s: %r" % (k, v)) + + return power_levels diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a9490782b7..532ee22fa4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -30,6 +30,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.events.utils import copy_power_levels_contents from synapse.http.endpoint import parse_and_validate_server_name from synapse.storage.state import StateFilter from synapse.types import ( @@ -367,6 +368,15 @@ class RoomCreationHandler(BaseHandler): if old_event: initial_state[k] = old_event.content + # deep-copy the power-levels event before we start modifying it + # note that if frozen_dicts are enabled, `power_levels` will be a frozen + # dict so we can't just copy.deepcopy it. + initial_state[ + (EventTypes.PowerLevels, "") + ] = power_levels = copy_power_levels_contents( + initial_state[(EventTypes.PowerLevels, "")] + ) + # Resolve the minimum power level required to send any state event # We will give the upgrading user this power level temporarily (if necessary) such that # they are able to copy all of the state events over, then revert them back to their @@ -375,8 +385,6 @@ class RoomCreationHandler(BaseHandler): # Copy over user power levels now as this will not be possible with >100PL users once # the room has been created - power_levels = initial_state[(EventTypes.PowerLevels, "")] - # Calculate the minimum power level needed to clone the room event_power_levels = power_levels.get("events", {}) state_default = power_levels.get("state_default", 0) @@ -386,16 +394,7 @@ class RoomCreationHandler(BaseHandler): # Raise the requester's power level in the new room if necessary current_power_level = power_levels["users"][user_id] if current_power_level < needed_power_level: - # make sure we copy the event content rather than overwriting it. - # note that if frozen_dicts are enabled, `power_levels` will be a frozen - # dict so we can't just copy.deepcopy it. - - new_power_levels = {k: v for k, v in power_levels.items() if k != "users"} - new_power_levels["users"] = { - k: v for k, v in power_levels.get("users", {}).items() if k != user_id - } - new_power_levels["users"][user_id] = needed_power_level - initial_state[(EventTypes.PowerLevels, "")] = new_power_levels + power_levels["users"][user_id] = needed_power_level yield self._send_events_for_new_room( requester, diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 9e3d4d0f47..2b13980dfd 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -15,9 +15,14 @@ from synapse.events import FrozenEvent -from synapse.events.utils import prune_event, serialize_event +from synapse.events.utils import ( + copy_power_levels_contents, + prune_event, + serialize_event, +) +from synapse.util.frozenutils import freeze -from .. import unittest +from tests import unittest def MockEvent(**kwargs): @@ -241,3 +246,39 @@ class SerializeEventTestCase(unittest.TestCase): self.serialize( MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4] ) + + +class CopyPowerLevelsContentTestCase(unittest.TestCase): + def setUp(self) -> None: + self.test_content = { + "ban": 50, + "events": {"m.room.name": 100, "m.room.power_levels": 100}, + "events_default": 0, + "invite": 50, + "kick": 50, + "notifications": {"room": 20}, + "redact": 50, + "state_default": 50, + "users": {"@example:localhost": 100}, + "users_default": 0, + } + + def _test(self, input): + a = copy_power_levels_contents(input) + + self.assertEqual(a["ban"], 50) + self.assertEqual(a["events"]["m.room.name"], 100) + + # make sure that changing the copy changes the copy and not the orig + a["ban"] = 10 + a["events"]["m.room.power_levels"] = 20 + + self.assertEqual(input["ban"], 50) + self.assertEqual(input["events"]["m.room.power_levels"], 100) + + def test_unfrozen(self): + self._test(self.test_content) + + def test_frozen(self): + input = freeze(self.test_content) + self._test(input) From b36095ae5cd88d95802ecf01f8b0f541593ea773 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 11:08:38 +0000 Subject: [PATCH 075/213] Set the PL for aliases events to 0. --- synapse/events/utils.py | 2 +- synapse/handlers/room.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index be57c6d9be..f70f5032fb 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -449,7 +449,7 @@ def copy_power_levels_contents( # we should only have one level of nesting if not isinstance(v1, int): raise TypeError( - "Invalid power_levels value for %s.%s: %r" % (k, k1, v) + "Invalid power_levels value for %s.%s: %r" % (k, k1, v1) ) h[k1] = v1 continue diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 532ee22fa4..a95b45d791 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -287,7 +287,16 @@ class RoomCreationHandler(BaseHandler): except AuthError as e: logger.warning("Unable to update PLs in old room: %s", e) - logger.info("Setting correct PLs in new room to %s", old_room_pl_state.content) + new_pl_content = copy_power_levels_contents(old_room_pl_state.content) + + # pre-msc2260 rooms may not have the right setting for aliases. If no other + # value is set, set it now. + events_default = new_pl_content.get("events_default", 0) + new_pl_content.setdefault("events", {}).setdefault( + EventTypes.Aliases, events_default + ) + + logger.info("Setting correct PLs in new room to %s", new_pl_content) yield self.event_creation_handler.create_and_send_nonmember_event( requester, { @@ -295,7 +304,7 @@ class RoomCreationHandler(BaseHandler): "state_key": "", "room_id": new_room_id, "sender": requester.user.to_string(), - "content": old_room_pl_state.content, + "content": new_pl_content, }, ratelimit=False, ) @@ -812,6 +821,10 @@ class RoomCreationHandler(BaseHandler): EventTypes.RoomHistoryVisibility: 100, EventTypes.CanonicalAlias: 50, EventTypes.RoomAvatar: 50, + # MSC2260: Allow everybody to send alias events by default + # This will be reudundant on pre-MSC2260 rooms, since the + # aliases event is special-cased. + EventTypes.Aliases: 0, }, "events_default": 0, "state_default": 50, From dcd85b976dc93851d7f5246fc0684d5c5f7d7b5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 17:46:34 +0000 Subject: [PATCH 076/213] Make /directory/room/ handle restrictive power levels Fixes a bug where the alias would be added, but `PUT /directory/room/` would return a 403. --- synapse/handlers/directory.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index a07d2f1a17..8c5980cb0c 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -151,7 +151,12 @@ class DirectoryHandler(BaseHandler): yield self._create_association(room_alias, room_id, servers, creator=user_id) if send_event: - yield self.send_room_alias_update_event(requester, room_id) + try: + yield self.send_room_alias_update_event(requester, room_id) + except AuthError as e: + # sending the aliases event may fail due to the user not having + # permission in the room; this is permitted. + logger.info("Skipping updating aliases event due to auth error %s", e) @defer.inlineCallbacks def delete_association(self, requester, room_alias, send_event=True): From 750d4d7599d1985bc262853494b21e9fee34c637 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 28 Jan 2020 11:09:14 +0000 Subject: [PATCH 077/213] changelog --- changelog.d/6790.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6790.feature diff --git a/changelog.d/6790.feature b/changelog.d/6790.feature new file mode 100644 index 0000000000..df9e4b77ab --- /dev/null +++ b/changelog.d/6790.feature @@ -0,0 +1 @@ +Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). From a855b7c3a82458602bd62ed00bffed269f2acfec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Jan 2020 12:06:31 +0000 Subject: [PATCH 078/213] Remove unused DeviceRow class (#6800) --- changelog.d/6800.bugfix | 1 + synapse/federation/send_queue.py | 21 +-------------------- 2 files changed, 2 insertions(+), 20 deletions(-) create mode 100644 changelog.d/6800.bugfix diff --git a/changelog.d/6800.bugfix b/changelog.d/6800.bugfix new file mode 100644 index 0000000000..322a2758af --- /dev/null +++ b/changelog.d/6800.bugfix @@ -0,0 +1 @@ +Fix race in federation sender worker that delayed sending of device updates. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0bb82a6bb3..001bb304ae 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -454,28 +454,9 @@ class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu buff.edus.setdefault(self.edu.destination, []).append(self.edu) -class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", ("destination",))): # str - """Streams the fact that either a) there is pending to device messages for - users on the remote, or b) a local users device has changed and needs to - be sent to the remote. - """ - - TypeId = "d" - - @staticmethod - def from_data(data): - return DeviceRow(destination=data["destination"]) - - def to_data(self): - return {"destination": self.destination} - - def add_to_buffer(self, buff): - buff.device_destinations.add(self.destination) - - TypeToRow = { Row.TypeId: Row - for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow, DeviceRow) + for Row in (PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow,) } From 5a246611e3cbf27cf1dd7e4453adc8040cddd6a2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 30 Jan 2020 11:25:59 +0000 Subject: [PATCH 079/213] Type defintions for use in refactoring for redaction changes (#6803) * Bump signedjson to 1.1 ... so that we can use the type definitions * Fix breakage caused by upgrade to signedjson 1.1 Thanks, @illicitonion... --- changelog.d/6803.misc | 1 + synapse/events/__init__.py | 5 +++-- synapse/python_dependencies.py | 4 +++- synapse/types.py | 7 ++++++- tests/storage/test_keys.py | 15 +++++++++++---- 5 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 changelog.d/6803.misc diff --git a/changelog.d/6803.misc b/changelog.d/6803.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6803.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 72c09327f4..f813fa2fe7 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -23,6 +23,7 @@ from unpaddedbase64 import encode_base64 from synapse.api.errors import UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.types import JsonDict from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -197,7 +198,7 @@ class EventBase(object): def is_state(self): return hasattr(self, "state_key") and self.state_key is not None - def get_dict(self): + def get_dict(self) -> JsonDict: d = dict(self._event_dict) d.update({"signatures": self.signatures, "unsigned": dict(self.unsigned)}) @@ -209,7 +210,7 @@ class EventBase(object): def get_internal_metadata_dict(self): return self.internal_metadata.get_dict() - def get_pdu_json(self, time_now=None): + def get_pdu_json(self, time_now=None) -> JsonDict: pdu_json = self.get_dict() if time_now is not None and "age_ts" in pdu_json["unsigned"]: diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 5871feaafd..8de8cb2c12 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -1,6 +1,7 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +44,8 @@ REQUIREMENTS = [ "frozendict>=1", "unpaddedbase64>=1.1.0", "canonicaljson>=1.1.3", - "signedjson>=1.0.0", + # we use the type definitions added in signedjson 1.1. + "signedjson>=1.1.0", "pynacl>=1.2.1", "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1. diff --git a/synapse/types.py b/synapse/types.py index 65e4d8c181..f3cd465735 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -17,7 +17,7 @@ import re import string import sys from collections import namedtuple -from typing import Dict, Tuple, TypeVar +from typing import Any, Dict, Tuple, TypeVar import attr from signedjson.key import decode_verify_key_bytes @@ -43,6 +43,11 @@ T = TypeVar("T") StateMap = Dict[Tuple[str, str], T] +# the type of a JSON-serialisable dict. This could be made stronger, but it will +# do for now. +JsonDict = Dict[str, Any] + + class Requester( namedtuple( "Requester", ["user", "access_token_id", "is_guest", "device_id", "app_service"] diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index e07ff01201..95f309fbbc 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -14,6 +14,7 @@ # limitations under the License. import signedjson.key +import unpaddedbase64 from twisted.internet.defer import Deferred @@ -21,11 +22,17 @@ from synapse.storage.keys import FetchKeyResult import tests.unittest -KEY_1 = signedjson.key.decode_verify_key_base64( - "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" + +def decode_verify_key_base64(key_id: str, key_base64: str): + key_bytes = unpaddedbase64.decode_base64(key_base64) + return signedjson.key.decode_verify_key_bytes(key_id, key_bytes) + + +KEY_1 = decode_verify_key_base64( + "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" ) -KEY_2 = signedjson.key.decode_verify_key_base64( - "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +KEY_2 = decode_verify_key_base64( + "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" ) From c80a9fe13dc098037a37bb0920a00b2c8cb53174 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 15:06:58 +0000 Subject: [PATCH 080/213] When a client asks for remote keys check if should resync. (#6797) If we detect that the remote users' keys may have changed then we should attempt to resync against the remote server rather than using the (potentially) stale local cache. --- changelog.d/6797.misc | 1 + synapse/storage/data_stores/main/devices.py | 32 +++++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6797.misc diff --git a/changelog.d/6797.misc b/changelog.d/6797.misc new file mode 100644 index 0000000000..e9127bac51 --- /dev/null +++ b/changelog.d/6797.misc @@ -0,0 +1 @@ +When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index 30bf66b2b6..a34415ff14 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -32,7 +32,7 @@ from synapse.logging.opentracing import ( from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import Database -from synapse.types import get_verify_key_from_cross_signing_key +from synapse.types import Collection, get_verify_key_from_cross_signing_key from synapse.util.caches.descriptors import ( Cache, cached, @@ -443,8 +443,15 @@ class DeviceWorkerStore(SQLBaseStore): """ user_ids = set(user_id for user_id, _ in query_list) user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids)) - user_ids_in_cache = set( - user_id for user_id, stream_id in user_map.items() if stream_id + + # We go and check if any of the users need to have their device lists + # resynced. If they do then we remove them from the cached list. + users_needing_resync = yield self.get_user_ids_requiring_device_list_resync( + user_ids + ) + user_ids_in_cache = ( + set(user_id for user_id, stream_id in user_map.items() if stream_id) + - users_needing_resync ) user_ids_not_in_cache = user_ids - user_ids_in_cache @@ -641,6 +648,25 @@ class DeviceWorkerStore(SQLBaseStore): return results + @defer.inlineCallbacks + def get_user_ids_requiring_device_list_resync(self, user_ids: Collection[str]): + """Given a list of remote users return the list of users that we + should resync the device lists for. + + Returns: + Deferred[Set[str]] + """ + + rows = yield self.db.simple_select_many_batch( + table="device_lists_remote_resync", + column="user_id", + iterable=user_ids, + retcols=("user_id",), + desc="get_user_ids_requiring_device_list_resync", + ) + + return {row["user_id"] for row in rows} + def mark_remote_user_device_cache_as_stale(self, user_id: str): """Records that the server has reason to believe the cache of the devices for the remote users is out of date. From a5bab2d058747eb7165b20808b34c970e34a4b11 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 16:10:30 +0000 Subject: [PATCH 081/213] When server leaves room check for stale device lists. (#6801) When a server leaves a room it may stop sharing a room with remote users, and thus not get any updates to their device lists. So we need to check for this case and delete those device lists from the cache. We don't need to do this if we stop sharing a room because the remote user leaves the room, because we track that case via looking at membership changes. --- changelog.d/6801.bugfix | 1 + .../storage/data_stores/main/roommember.py | 37 +++++++++++++- synapse/storage/persist_events.py | 51 +++++++++++++++++-- 3 files changed, 83 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6801.bugfix diff --git a/changelog.d/6801.bugfix b/changelog.d/6801.bugfix new file mode 100644 index 0000000000..f401fa5d69 --- /dev/null +++ b/changelog.d/6801.bugfix @@ -0,0 +1 @@ +Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py index 9acef7c950..042289f0e0 100644 --- a/synapse/storage/data_stores/main/roommember.py +++ b/synapse/storage/data_stores/main/roommember.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import Iterable, List +from typing import Iterable, List, Set from six import iteritems, itervalues @@ -40,7 +40,7 @@ from synapse.storage.roommember import ( ProfileInfo, RoomsForUser, ) -from synapse.types import get_domain_from_id +from synapse.types import Collection, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList @@ -439,6 +439,39 @@ class RoomMemberWorkerStore(EventsWorkerStore): return results + async def get_users_server_still_shares_room_with( + self, user_ids: Collection[str] + ) -> Set[str]: + """Given a list of users return the set that the server still share a + room with. + """ + + if not user_ids: + return set() + + def _get_users_server_still_shares_room_with_txn(txn): + sql = """ + SELECT state_key FROM current_state_events + WHERE + type = 'm.room.member' + AND membership = 'join' + AND %s + GROUP BY state_key + """ + + clause, args = make_in_list_sql_clause( + self.database_engine, "state_key", user_ids + ) + + txn.execute(sql % (clause,), args) + + return set(row[0] for row in txn) + + return await self.db.runInteraction( + "get_users_server_still_shares_room_with", + _get_users_server_still_shares_room_with_txn, + ) + @defer.inlineCallbacks def get_rooms_for_user(self, user_id, on_invalidate=None): """Returns a set of room_ids the user is currently joined to. diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index d060c8b992..86166fd4c1 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -18,7 +18,7 @@ import itertools import logging from collections import deque, namedtuple -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple from six import iteritems from six.moves import range @@ -318,6 +318,11 @@ class EventsPersistenceStorage(object): # room state_delta_for_room = {} + # Set of remote users which were in rooms the server has left. We + # should check if we still share any rooms and if not we mark their + # device lists as stale. + potentially_left_users = set() # type: Set[str] + if not backfilled: with Measure(self._clock, "_calculate_state_and_extrem"): # Work out the new "current state" for each room. @@ -421,7 +426,11 @@ class EventsPersistenceStorage(object): # the room then we delete the current state and # extremities. is_still_joined = await self._is_server_still_joined( - room_id, ev_ctx_rm, delta, current_state + room_id, + ev_ctx_rm, + delta, + current_state, + potentially_left_users, ) if not is_still_joined: logger.info("Server no longer in room %s", room_id) @@ -444,6 +453,8 @@ class EventsPersistenceStorage(object): backfilled=backfilled, ) + await self._handle_potentially_left_users(potentially_left_users) + async def _calculate_new_extremities( self, room_id: str, @@ -688,6 +699,7 @@ class EventsPersistenceStorage(object): ev_ctx_rm: List[Tuple[FrozenEvent, EventContext]], delta: DeltaState, current_state: Optional[StateMap[str]], + potentially_left_users: Set[str], ) -> bool: """Check if the server will still be joined after the given events have been persised. @@ -699,6 +711,9 @@ class EventsPersistenceStorage(object): and what the new current state will be. current_state: The new current state if it already been calculated, otherwise None. + potentially_left_users: If the server has left the room, then joined + remote users will be added to this set to indicate that the + server may no longer be sharing a room with them. """ if not any( @@ -741,5 +756,33 @@ class EventsPersistenceStorage(object): is_still_joined = any(row["membership"] == Membership.JOIN for row in rows) if is_still_joined: return True - else: - return False + + # The server will leave the room, so we go and find out which remote + # users will still be joined when we leave. + remote_event_ids = [ + event_id + for (typ, state_key,), event_id in current_state.items() + if typ == EventTypes.Member and not self.is_mine_id(state_key) + ] + rows = await self.main_store.get_membership_from_event_ids(remote_event_ids) + potentially_left_users.update( + row["user_id"] for row in rows if row["membership"] == Membership.JOIN + ) + + return False + + async def _handle_potentially_left_users(self, user_ids: Set[str]): + """Given a set of remote users check if the server still shares a room with + them. If not then mark those users' device cache as stale. + """ + + if not user_ids: + return + + joined_users = await self.main_store.get_users_server_still_shares_room_with( + user_ids + ) + left_users = user_ids - joined_users + + for user_id in left_users: + await self.main_store.mark_remote_user_device_list_as_unsubscribed(user_id) From c3d4ad8afdbe181707451410100dec4817c2c01a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 16:42:11 +0000 Subject: [PATCH 082/213] Fix sending server up commands from workers (#6811) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/6811.bugfix | 1 + synapse/federation/transport/client.py | 5 ++++- synapse/federation/transport/server.py | 26 +++++++++++++++----------- synapse/replication/tcp/client.py | 4 ++++ synapse/server.pyi | 12 +++++++++++- tox.ini | 1 + 6 files changed, 36 insertions(+), 13 deletions(-) create mode 100644 changelog.d/6811.bugfix diff --git a/changelog.d/6811.bugfix b/changelog.d/6811.bugfix new file mode 100644 index 0000000000..361f2fc2e8 --- /dev/null +++ b/changelog.d/6811.bugfix @@ -0,0 +1 @@ +Fix waking up other workers when remote server is detected to have come back online. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 198257414b..dc563538de 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +from typing import Any, Dict from six.moves import urllib @@ -352,7 +353,9 @@ class TransportLayerClient(object): else: path = _create_v1_path("/publicRooms") - args = {"include_all_networks": "true" if include_all_networks else "false"} + args = { + "include_all_networks": "true" if include_all_networks else "false" + } # type: Dict[str, Any] if third_party_instance_id: args["third_party_instance_id"] = (third_party_instance_id,) if limit: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d8cf9ed299..125eadd796 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -18,6 +18,7 @@ import functools import logging import re +from typing import Optional, Tuple, Type from twisted.internet.defer import maybeDeferred @@ -267,6 +268,8 @@ class BaseFederationServlet(object): returned. """ + PATH = "" # Overridden in subclasses, the regex to match against the path. + REQUIRE_AUTH = True PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version @@ -347,9 +350,6 @@ class BaseFederationServlet(object): return response - # Extra logic that functools.wraps() doesn't finish - new_func.__self__ = func.__self__ - return new_func def register(self, server): @@ -824,7 +824,7 @@ class PublicRoomList(BaseFederationServlet): if not self.allow_access: raise FederationDeniedError(origin) - limit = int(content.get("limit", 100)) + limit = int(content.get("limit", 100)) # type: Optional[int] since_token = content.get("since", None) search_filter = content.get("filter", None) @@ -971,7 +971,7 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet): if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - result = await self.groups_handler.update_room_in_group( + result = await self.handler.update_room_in_group( group_id, requester_user_id, room_id, config_key, content ) @@ -1422,11 +1422,13 @@ FEDERATION_SERVLET_CLASSES = ( On3pidBindServlet, FederationVersionServlet, RoomComplexityServlet, -) +) # type: Tuple[Type[BaseFederationServlet], ...] -OPENID_SERVLET_CLASSES = (OpenIdUserInfo,) +OPENID_SERVLET_CLASSES = ( + OpenIdUserInfo, +) # type: Tuple[Type[BaseFederationServlet], ...] -ROOM_LIST_CLASSES = (PublicRoomList,) +ROOM_LIST_CLASSES = (PublicRoomList,) # type: Tuple[Type[PublicRoomList], ...] GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsProfileServlet, @@ -1447,17 +1449,19 @@ GROUP_SERVER_SERVLET_CLASSES = ( FederationGroupsAddRoomsServlet, FederationGroupsAddRoomsConfigServlet, FederationGroupsSettingJoinPolicyServlet, -) +) # type: Tuple[Type[BaseFederationServlet], ...] GROUP_LOCAL_SERVLET_CLASSES = ( FederationGroupsLocalInviteServlet, FederationGroupsRemoveLocalUserServlet, FederationGroupsBulkPublicisedServlet, -) +) # type: Tuple[Type[BaseFederationServlet], ...] -GROUP_ATTESTATION_SERVLET_CLASSES = (FederationGroupsRenewAttestaionServlet,) +GROUP_ATTESTATION_SERVLET_CLASSES = ( + FederationGroupsRenewAttestaionServlet, +) # type: Tuple[Type[BaseFederationServlet], ...] DEFAULT_SERVLET_GROUPS = ( "federation", diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index fc06a7b053..02ab5b66ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -31,6 +31,7 @@ from .commands import ( Command, FederationAckCommand, InvalidateCacheCommand, + RemoteServerUpCommand, RemovePusherCommand, UserIpCommand, UserSyncCommand, @@ -210,6 +211,9 @@ class ReplicationClientHandler(AbstractReplicationClientHandler): cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen) self.send_command(cmd) + def send_remote_server_up(self, server: str): + self.send_command(RemoteServerUpCommand(server)) + def await_sync(self, data): """Returns a deferred that is resolved when we receive a SYNC command with given data. diff --git a/synapse/server.pyi b/synapse/server.pyi index 0731403047..90347ac23e 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -2,8 +2,8 @@ import twisted.internet import synapse.api.auth import synapse.config.homeserver +import synapse.crypto.keyring import synapse.federation.sender -import synapse.federation.transaction_queue import synapse.federation.transport.client import synapse.handlers import synapse.handlers.auth @@ -17,6 +17,7 @@ import synapse.handlers.room_member import synapse.handlers.set_password import synapse.http.client import synapse.notifier +import synapse.replication.tcp.client import synapse.rest.media.v1.media_repository import synapse.server_notices.server_notices_manager import synapse.server_notices.server_notices_sender @@ -27,6 +28,9 @@ class HomeServer(object): @property def config(self) -> synapse.config.homeserver.HomeServerConfig: pass + @property + def hostname(self) -> str: + pass def get_auth(self) -> synapse.api.auth.Auth: pass def get_auth_handler(self) -> synapse.handlers.auth.AuthHandler: @@ -97,3 +101,9 @@ class HomeServer(object): pass def get_reactor(self) -> twisted.internet.base.ReactorBase: pass + def get_keyring(self) -> synapse.crypto.keyring.Keyring: + pass + def get_tcp_replication( + self, + ) -> synapse.replication.tcp.client.ReplicationClientHandler: + pass diff --git a/tox.ini b/tox.ini index 1d946a02ba..88ef12bebd 100644 --- a/tox.ini +++ b/tox.ini @@ -179,6 +179,7 @@ extras = all commands = mypy \ synapse/api \ synapse/config/ \ + synapse/federation/transport \ synapse/handlers/ui_auth \ synapse/logging/ \ synapse/module_api \ From b660327056cdced860d532ab2404a26946da7ef5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 17:06:38 +0000 Subject: [PATCH 083/213] Resync remote device list when detected as stale. (#6786) --- changelog.d/6786.misc | 1 + synapse/handlers/devicemessage.py | 10 ++++++++-- synapse/handlers/federation.py | 18 ++++++++++++++++-- tests/handlers/test_typing.py | 6 +++--- 4 files changed, 28 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6786.misc diff --git a/changelog.d/6786.misc b/changelog.d/6786.misc new file mode 100644 index 0000000000..94c692e53a --- /dev/null +++ b/changelog.d/6786.misc @@ -0,0 +1 @@ +Attempt to resync remote users' devices when detected as stale. diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 5c5fe77be2..05c4b3eec0 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -21,6 +21,7 @@ from canonicaljson import json from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( get_active_span_text_map, log_kv, @@ -48,6 +49,8 @@ class DeviceMessageHandler(object): "m.direct_to_device", self.on_direct_to_device_edu ) + self._device_list_updater = hs.get_device_handler().device_list_updater + @defer.inlineCallbacks def on_direct_to_device_edu(self, origin, content): local_messages = {} @@ -134,8 +137,11 @@ class DeviceMessageHandler(object): unknown_devices, ) yield self.store.mark_remote_user_device_cache_as_stale(sender_user_id) - # TODO: Poke something to start trying to refetch user's - # keys. + + # Immediately attempt a resync in the background + run_in_background( + self._device_list_updater.user_device_resync, sender_user_id + ) @defer.inlineCallbacks def send_device_message(self, sender_user_id, message_type, messages): diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a67020a259..ca484e5458 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -57,6 +57,7 @@ from synapse.logging.context import ( run_in_background, ) from synapse.logging.utils import log_function +from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationFederationSendEventsRestServlet, @@ -156,6 +157,13 @@ class FederationHandler(BaseHandler): hs ) + if hs.config.worker_app: + self._user_device_resync = ReplicationUserDevicesResyncRestServlet.make_client( + hs + ) + else: + self._device_list_updater = hs.get_device_handler().device_list_updater + # When joining a room we need to queue any events for that room up self.room_queues = {} self._room_pdu_linearizer = Linearizer("fed_room_pdu") @@ -759,8 +767,14 @@ class FederationHandler(BaseHandler): await self.store.mark_remote_user_device_cache_as_stale( event.sender ) - # TODO: Poke something to start trying to refetch user's - # keys. + + # Immediately attempt a resync in the background + if self.config.worker_app: + return run_in_background(self._user_device_resync, event.sender) + else: + return run_in_background( + self._device_list_updater.user_device_resync, event.sender + ) @log_function async def backfill(self, dest, room_id, limit, extremities): diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 596ddc6970..68b9847bd2 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -81,6 +81,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ] ) + # the tests assume that we are starting at unix time 1000 + reactor.pump((1000,)) + hs = self.setup_test_homeserver( notifier=Mock(), http_client=mock_federation_client, keyring=mock_keyring ) @@ -90,9 +93,6 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): return hs def prepare(self, reactor, clock, hs): - # the tests assume that we are starting at unix time 1000 - reactor.pump((1000,)) - mock_notifier = hs.get_notifier() self.on_new_event = mock_notifier.on_new_event From 57ad702af0511aff36ca69fb2e9fc3399cce3a8d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 17:17:44 +0000 Subject: [PATCH 084/213] Backgroud update to clean out rooms from current state (#6802) --- changelog.d/6802.misc | 1 + .../57/delete_old_current_state_events.sql | 19 +++ synapse/storage/data_stores/main/state.py | 108 +++++++++++++++++- 3 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6802.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql diff --git a/changelog.d/6802.misc b/changelog.d/6802.misc new file mode 100644 index 0000000000..a77ba1d7a5 --- /dev/null +++ b/changelog.d/6802.misc @@ -0,0 +1 @@ +Add background update to clean out left rooms from current state. diff --git a/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql new file mode 100644 index 0000000000..a133d87a19 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/delete_old_current_state_events.sql @@ -0,0 +1,19 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add background update to go and delete current state events for rooms the +-- server is no longer in. +INSERT into background_updates (update_name, progress_json) + VALUES ('delete_old_current_state_events', '{}'); diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index bd7b0276f1..9b6f68e777 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -21,12 +21,13 @@ from six import iteritems from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership from synapse.api.errors import NotFoundError from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.events_worker import EventsWorkerStore +from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore from synapse.storage.database import Database from synapse.storage.state import StateFilter from synapse.util.caches import intern_string @@ -300,14 +301,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return set(row["state_group"] for row in rows) -class MainStateBackgroundUpdateStore(SQLBaseStore): +class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" + DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" def __init__(self, database: Database, db_conn, hs): super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs) + self.server_name = hs.hostname + self.db.updates.register_background_index_update( self.CURRENT_STATE_INDEX_UPDATE_NAME, index_name="current_state_events_member_index", @@ -321,6 +325,106 @@ class MainStateBackgroundUpdateStore(SQLBaseStore): table="event_to_state_groups", columns=["state_group"], ) + self.db.updates.register_background_update_handler( + self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms, + ) + + async def _background_remove_left_rooms(self, progress, batch_size): + """Background update to delete rows from `current_state_events` and + `event_forward_extremities` tables of rooms that the server is no + longer joined to. + """ + + last_room_id = progress.get("last_room_id", "") + + def _background_remove_left_rooms_txn(txn): + sql = """ + SELECT DISTINCT room_id FROM current_state_events + WHERE room_id > ? ORDER BY room_id LIMIT ? + """ + + txn.execute(sql, (last_room_id, batch_size)) + room_ids = list(row[0] for row in txn) + if not room_ids: + return True, set() + + sql = """ + SELECT room_id + FROM current_state_events + WHERE + room_id > ? AND room_id <= ? + AND type = 'm.room.member' + AND membership = 'join' + AND state_key LIKE ? + GROUP BY room_id + """ + + txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name)) + + joined_room_ids = set(row[0] for row in txn) + + left_rooms = set(room_ids) - joined_room_ids + + # First we get all users that we still think were joined to the + # room. This is so that we can mark those device lists as + # potentially stale, since there may have been a period where the + # server didn't share a room with the remote user and therefore may + # have missed any device updates. + rows = self.db.simple_select_many_txn( + txn, + table="current_state_events", + column="room_id", + iterable=left_rooms, + keyvalues={"type": EventTypes.Member, "membership": Membership.JOIN}, + retcols=("state_key",), + ) + + potentially_left_users = set(row["state_key"] for row in rows) + + # Now lets actually delete the rooms from the DB. + self.db.simple_delete_many_txn( + txn, + table="current_state_events", + column="room_id", + iterable=left_rooms, + keyvalues={}, + ) + + self.db.simple_delete_many_txn( + txn, + table="event_forward_extremities", + column="room_id", + iterable=left_rooms, + keyvalues={}, + ) + + self.db.updates._background_update_progress_txn( + txn, + self.DELETE_CURRENT_STATE_UPDATE_NAME, + {"last_room_id": room_ids[-1]}, + ) + + return False, potentially_left_users + + finished, potentially_left_users = await self.db.runInteraction( + "_background_remove_left_rooms", _background_remove_left_rooms_txn + ) + + if finished: + await self.db.updates._end_background_update( + self.DELETE_CURRENT_STATE_UPDATE_NAME + ) + + # Now go and check if we still share a room with the remote users in + # the deleted rooms. If not mark their device lists as stale. + joined_users = await self.get_users_server_still_shares_room_with( + potentially_left_users + ) + + for user_id in potentially_left_users - joined_users: + await self.mark_remote_user_device_list_as_unsubscribed(user_id) + + return batch_size class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore): From 184303b8650a90256f84bc9801b749a5b81b6d4b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 30 Jan 2020 17:20:55 +0000 Subject: [PATCH 085/213] MSC2260: Block direct sends of m.room.aliases events (#6794) as per MSC2260 --- changelog.d/6794.feature | 1 + synapse/rest/client/v1/room.py | 12 ++++++++ tests/rest/admin/test_admin.py | 7 ----- tests/rest/client/v1/test_directory.py | 41 ++++++++++---------------- 4 files changed, 28 insertions(+), 33 deletions(-) create mode 100644 changelog.d/6794.feature diff --git a/changelog.d/6794.feature b/changelog.d/6794.feature new file mode 100644 index 0000000000..df9e4b77ab --- /dev/null +++ b/changelog.d/6794.feature @@ -0,0 +1 @@ +Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5aef8238b8..6f31584c51 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -184,6 +184,12 @@ class RoomStateEventRestServlet(TransactionRestServlet): content = parse_json_object_from_request(request) + if event_type == EventTypes.Aliases: + # MSC2260 + raise SynapseError( + 400, "Cannot send m.room.aliases events via /rooms/{room_id}/state" + ) + event_dict = { "type": event_type, "content": content, @@ -231,6 +237,12 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) content = parse_json_object_from_request(request) + if event_type == EventTypes.Aliases: + # MSC2260 + raise SynapseError( + 400, "Cannot send m.room.aliases events via /rooms/{room_id}/send" + ) + event_dict = { "type": event_type, "content": content, diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 0342aed416..e5984aaad8 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -868,13 +868,6 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Set this new alias as the canonical alias for this room - self.helper.send_state( - room_id, - "m.room.aliases", - {"aliases": [test_alias]}, - tok=self.admin_user_tok, - state_key="test", - ) self.helper.send_state( room_id, "m.room.canonical_alias", diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 633b7dbda0..914cf54927 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -51,26 +51,30 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.user = self.register_user("user", "test") self.user_tok = self.login("user", "test") - def test_state_event_not_in_room(self): - self.ensure_user_left_room() - self.set_alias_via_state_event(403) + def test_cannot_set_alias_via_state_event(self): + self.ensure_user_joined_room() + url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % ( + self.room_id, + self.hs.hostname, + ) + + data = {"aliases": [self.random_alias(5)]} + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.user_tok + ) + self.render(request) + self.assertEqual(channel.code, 400, channel.result) def test_directory_endpoint_not_in_room(self): self.ensure_user_left_room() self.set_alias_via_directory(403) - def test_state_event_in_room_too_long(self): - self.ensure_user_joined_room() - self.set_alias_via_state_event(400, alias_length=256) - def test_directory_in_room_too_long(self): self.ensure_user_joined_room() self.set_alias_via_directory(400, alias_length=256) - def test_state_event_in_room(self): - self.ensure_user_joined_room() - self.set_alias_via_state_event(200) - def test_directory_in_room(self): self.ensure_user_joined_room() self.set_alias_via_directory(200) @@ -102,21 +106,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(channel.code, 200, channel.result) - def set_alias_via_state_event(self, expected_code, alias_length=5): - url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % ( - self.room_id, - self.hs.hostname, - ) - - data = {"aliases": [self.random_alias(alias_length)]} - request_data = json.dumps(data) - - request, channel = self.make_request( - "PUT", url, request_data, access_token=self.user_tok - ) - self.render(request) - self.assertEqual(channel.code, expected_code, channel.result) - def set_alias_via_directory(self, expected_code, alias_length=5): url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length) data = {"room_id": self.room_id} From e0992fcc5be9e850a5007d1d09fea79bea949cf6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Jan 2020 17:55:34 +0000 Subject: [PATCH 086/213] Log when we delete room in bg update (#6816) --- changelog.d/6816.misc | 1 + synapse/storage/data_stores/main/state.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6816.misc diff --git a/changelog.d/6816.misc b/changelog.d/6816.misc new file mode 100644 index 0000000000..a77ba1d7a5 --- /dev/null +++ b/changelog.d/6816.misc @@ -0,0 +1 @@ +Add background update to clean out left rooms from current state. diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 9b6f68e777..4167f83c9b 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -365,6 +365,8 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): left_rooms = set(room_ids) - joined_room_ids + logger.info("Deleting current state left rooms: %r", left_rooms) + # First we get all users that we still think were joined to the # room. This is so that we can mark those device lists as # potentially stale, since there may have been a period where the From 46a446828d1b4b1ca2d9b0dcae97323a1bbc0c0b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 30 Jan 2020 22:13:02 +0000 Subject: [PATCH 087/213] pass room version into FederationHandler.on_invite_request (#6805) --- changelog.d/6805.misc | 1 + synapse/federation/federation_server.py | 2 +- synapse/handlers/federation.py | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6805.misc diff --git a/changelog.d/6805.misc b/changelog.d/6805.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6805.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8eddb3bf2c..9562faa3ee 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -410,7 +410,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) pdu = await self._check_sigs_and_hash(room_version, pdu) - ret_pdu = await self.handler.on_invite_request(origin, pdu) + ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ca484e5458..01372f6d47 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1482,13 +1482,13 @@ class FederationHandler(BaseHandler): return {"state": list(state.values()), "auth_chain": auth_chain} @defer.inlineCallbacks - def on_invite_request(self, origin, pdu): + def on_invite_request( + self, origin: str, event: EventBase, room_version: RoomVersion + ): """ We've got an invite event. Process and persist it. Sign it. Respond with the now signed event. """ - event = pdu - if event.state_key is None: raise SynapseError(400, "The invite event did not have a state key") From ef6bdafb29abe14cb40f6b83a46e70d82cd3e041 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 Jan 2020 17:55:48 +0000 Subject: [PATCH 088/213] Store the room version in EventBuilder --- synapse/events/builder.py | 12 +++++++----- tests/handlers/test_presence.py | 4 ++-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 3997751337..291fb38a26 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -23,6 +23,7 @@ from synapse.api.room_versions import ( KNOWN_EVENT_FORMAT_VERSIONS, KNOWN_ROOM_VERSIONS, EventFormatVersions, + RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.types import EventID @@ -40,7 +41,7 @@ class EventBuilder(object): content/unsigned/internal_metadata fields are still mutable) Attributes: - format_version (int): Event format version + room_version: Version of the target room room_id (str) type (str) sender (str) @@ -63,7 +64,7 @@ class EventBuilder(object): _hostname = attr.ib() _signing_key = attr.ib() - format_version = attr.ib() + room_version = attr.ib(type=RoomVersion) room_id = attr.ib() type = attr.ib() @@ -108,7 +109,8 @@ class EventBuilder(object): ) auth_ids = yield self._auth.compute_auth_events(self, state_ids) - if self.format_version == EventFormatVersions.V1: + format_version = self.room_version.event_format + if format_version == EventFormatVersions.V1: auth_events = yield self._store.add_event_hashes(auth_ids) prev_events = yield self._store.add_event_hashes(prev_event_ids) else: @@ -148,7 +150,7 @@ class EventBuilder(object): clock=self._clock, hostname=self._hostname, signing_key=self._signing_key, - format_version=self.format_version, + format_version=format_version, event_dict=event_dict, internal_metadata_dict=self.internal_metadata.get_dict(), ) @@ -201,7 +203,7 @@ class EventBuilderFactory(object): clock=self.clock, hostname=self.hostname, signing_key=self.signing_key, - format_version=room_version.event_format, + room_version=room_version, type=key_values["type"], state_key=key_values.get("state_key"), room_id=key_values["room_id"], diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index d4293b4312..69914428e2 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -19,7 +19,7 @@ from mock import Mock, call from signedjson.key import generate_signing_key from synapse.api.constants import EventTypes, Membership, PresenceState -from synapse.events import room_version_to_event_format +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder from synapse.handlers.presence import ( EXTERNAL_PROCESS_EXPIRY, @@ -597,7 +597,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): clock=self.clock, hostname=hostname, signing_key=self.random_signing_key, - format_version=room_version_to_event_format(room_version), + room_version=KNOWN_ROOM_VERSIONS[room_version], room_id=room_id, type=EventTypes.Member, sender=user_id, From 54f3f369bd94ce22b3e052d4b795ad5d0c4618bc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 Jan 2020 17:58:01 +0000 Subject: [PATCH 089/213] Pass room_version into create_local_event_from_event_dict --- synapse/events/builder.py | 40 +++++++++++-------------- synapse/federation/federation_client.py | 4 +-- 2 files changed, 19 insertions(+), 25 deletions(-) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 291fb38a26..a26f4c9044 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -12,8 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional import attr +from nacl.signing import SigningKey from twisted.internet import defer @@ -26,11 +28,15 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.types import EventID +from synapse.events import ( + EventBase, + _EventInternalMetadata, + event_type_from_format_version, +) +from synapse.types import EventID, JsonDict +from synapse.util import Clock from synapse.util.stringutils import random_string -from . import _EventInternalMetadata, event_type_from_format_version - @attr.s(slots=True, cmp=False, frozen=True) class EventBuilder(object): @@ -150,7 +156,7 @@ class EventBuilder(object): clock=self._clock, hostname=self._hostname, signing_key=self._signing_key, - format_version=format_version, + room_version=self.room_version, event_dict=event_dict, internal_metadata_dict=self.internal_metadata.get_dict(), ) @@ -216,29 +222,19 @@ class EventBuilderFactory(object): def create_local_event_from_event_dict( - clock, - hostname, - signing_key, - format_version, - event_dict, - internal_metadata_dict=None, -): + clock: Clock, + hostname: str, + signing_key: SigningKey, + room_version: RoomVersion, + event_dict: JsonDict, + internal_metadata_dict: Optional[JsonDict] = None, +) -> EventBase: """Takes a fully formed event dict, ensuring that fields like `origin` and `origin_server_ts` have correct values for a locally produced event, then signs and hashes it. - - Args: - clock (Clock) - hostname (str) - signing_key - format_version (int) - event_dict (dict) - internal_metadata_dict (dict|None) - - Returns: - FrozenEvent """ + format_version = room_version.event_format if format_version not in KNOWN_EVENT_FORMAT_VERSIONS: raise Exception("No event format defined for version %r" % (format_version,)) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d57e8ca7a2..9be4b69cad 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -470,8 +470,6 @@ class FederationClient(FederationBase): if not room_version: raise UnsupportedRoomVersionError() - event_format = room_version_to_event_format(room_version_id) - pdu_dict = ret.get("event", None) if not isinstance(pdu_dict, dict): raise InvalidResponseError("Bad 'event' field in response") @@ -490,7 +488,7 @@ class FederationClient(FederationBase): self._clock, self.hostname, self.signing_key, - format_version=event_format, + room_version=room_version, event_dict=pdu_dict, ) From 2a81393a4b905c8bd4c31da04a8b4407462948b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 Jan 2020 17:40:33 +0000 Subject: [PATCH 090/213] Pass room_version into add_hashes_and_signatures --- synapse/crypto/event_signing.py | 20 +++++++++++++------- synapse/events/builder.py | 2 +- tests/crypto/test_event_signing.py | 9 +++++++-- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index e65bd61d97..1f2bccf700 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -20,10 +20,13 @@ import logging from canonicaljson import encode_canonical_json from signedjson.sign import sign_json +from signedjson.types import SigningKey from unpaddedbase64 import decode_base64, encode_base64 from synapse.api.errors import Codes, SynapseError +from synapse.api.room_versions import RoomVersion from synapse.events.utils import prune_event, prune_event_dict +from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -137,20 +140,23 @@ def compute_event_signature(event_dict, signature_name, signing_key): def add_hashes_and_signatures( - event_dict, signature_name, signing_key, hash_algorithm=hashlib.sha256 + room_version: RoomVersion, + event_dict: JsonDict, + signature_name: str, + signing_key: SigningKey, ): """Add content hash and sign the event Args: - event_dict (dict): The event to add hashes to and sign - signature_name (str): The name of the entity signing the event + room_version: the version of the room this event is in + + event_dict: The event to add hashes to and sign + signature_name: The name of the entity signing the event (typically the server's hostname). - signing_key (syutil.crypto.SigningKey): The key to sign with - hash_algorithm: A hasher from `hashlib`, e.g. hashlib.sha256, to use - to hash the event + signing_key: The key to sign with """ - name, digest = compute_content_hash(event_dict, hash_algorithm=hash_algorithm) + name, digest = compute_content_hash(event_dict, hash_algorithm=hashlib.sha256) event_dict.setdefault("hashes", {})[name] = encode_base64(digest) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index a26f4c9044..8d63ad6dc3 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -255,7 +255,7 @@ def create_local_event_from_event_dict( event_dict.setdefault("signatures", {}) - add_hashes_and_signatures(event_dict, hostname, signing_key) + add_hashes_and_signatures(room_version, event_dict, hostname, signing_key) return event_type_from_format_version(format_version)( event_dict, internal_metadata_dict=internal_metadata_dict ) diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 126e176004..6143a50ab2 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -17,6 +17,7 @@ import nacl.signing from unpaddedbase64 import decode_base64 +from synapse.api.room_versions import RoomVersions from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEvent @@ -49,7 +50,9 @@ class EventSigningTestCase(unittest.TestCase): "unsigned": {"age_ts": 1000000}, } - add_hashes_and_signatures(event_dict, HOSTNAME, self.signing_key) + add_hashes_and_signatures( + RoomVersions.V1, event_dict, HOSTNAME, self.signing_key + ) event = FrozenEvent(event_dict) @@ -81,7 +84,9 @@ class EventSigningTestCase(unittest.TestCase): "unsigned": {"age_ts": 1000000}, } - add_hashes_and_signatures(event_dict, HOSTNAME, self.signing_key) + add_hashes_and_signatures( + RoomVersions.V1, event_dict, HOSTNAME, self.signing_key + ) event = FrozenEvent(event_dict) From 540c5e168b3f7f22d7af905d6d01dcf2a615dff3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 Jan 2020 18:19:06 +0000 Subject: [PATCH 091/213] changelog --- changelog.d/6806.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6806.misc diff --git a/changelog.d/6806.misc b/changelog.d/6806.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6806.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. From 7d846e870422c65f3fb436e5b0e543dae17719fc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Jan 2020 09:49:13 +0000 Subject: [PATCH 092/213] Fix bug with getting missing auth event during join 500'ed (#6810) --- changelog.d/6810.misc | 1 + synapse/handlers/federation.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6810.misc diff --git a/changelog.d/6810.misc b/changelog.d/6810.misc new file mode 100644 index 0000000000..5537355bea --- /dev/null +++ b/changelog.d/6810.misc @@ -0,0 +1 @@ +Record room versions in the `rooms` table. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 01372f6d47..1f92640f86 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1929,7 +1929,11 @@ class FederationHandler(BaseHandler): for e_id in missing_auth_events: m_ev = yield self.federation_client.get_pdu( - [origin], e_id, room_version=room_version, outlier=True, timeout=10000 + [origin], + e_id, + room_version=room_version.identifier, + outlier=True, + timeout=10000, ) if m_ev and m_ev.event_id == e_id: event_map[e_id] = m_ev From d7bf793cc1e3f5268285286341835ac54753eff6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 10:06:21 +0000 Subject: [PATCH 093/213] s/get_room_version/get_room_version_id/ ... to make way for a forthcoming get_room_version which returns a RoomVersion object. --- synapse/federation/federation_client.py | 10 +++++----- synapse/federation/federation_server.py | 16 ++++++++-------- synapse/handlers/federation.py | 18 +++++++++--------- synapse/handlers/message.py | 8 +++++--- synapse/handlers/pagination.py | 2 +- synapse/handlers/room.py | 2 +- synapse/state/__init__.py | 2 +- synapse/storage/data_stores/main/state.py | 2 +- synapse/storage/persist_events.py | 2 +- tests/handlers/test_presence.py | 2 +- tests/test_state.py | 2 +- tests/unittest.py | 4 +++- 12 files changed, 37 insertions(+), 33 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d57e8ca7a2..4ac3d81cba 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -198,7 +198,7 @@ class FederationClient(FederationBase): logger.debug("backfill transaction_data=%r", transaction_data) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdus = [ @@ -336,7 +336,7 @@ class FederationClient(FederationBase): def get_event_auth(self, destination, room_id, event_id): res = yield self.transport_layer.get_event_auth(destination, room_id, event_id) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) auth_chain = [ @@ -649,7 +649,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_invite(self, destination, room_id, event_id, pdu): - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) content = yield self._do_send_invite(destination, pdu, room_version) @@ -657,7 +657,7 @@ class FederationClient(FederationBase): logger.debug("Got response to send_invite: %s", pdu_dict) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdu = event_from_pdu_json(pdu_dict, format_ver) @@ -859,7 +859,7 @@ class FederationClient(FederationBase): timeout=timeout, ) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) events = [ diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 9562faa3ee..a4c97ed458 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -234,7 +234,7 @@ class FederationServer(FederationBase): continue try: - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue @@ -334,7 +334,7 @@ class FederationServer(FederationBase): ) ) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) resp["room_version"] = room_version return 200, resp @@ -385,7 +385,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) if room_version not in supported_versions: logger.warning( "Room version %s not in %s", room_version, supported_versions @@ -417,7 +417,7 @@ class FederationServer(FederationBase): async def on_send_join_request(self, origin, content, room_id): logger.debug("on_send_join_request: content: %s", content) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdu = event_from_pdu_json(content, format_ver) @@ -440,7 +440,7 @@ class FederationServer(FederationBase): await self.check_server_matches_acl(origin_host, room_id) pdu = await self.handler.on_make_leave_request(origin, room_id, user_id) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @@ -448,7 +448,7 @@ class FederationServer(FederationBase): async def on_send_leave_request(self, origin, content, room_id): logger.debug("on_send_leave_request: content: %s", content) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdu = event_from_pdu_json(content, format_ver) @@ -495,7 +495,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) auth_chain = [ @@ -664,7 +664,7 @@ class FederationServer(FederationBase): logger.info("Accepting join PDU %s from %s", pdu.event_id, origin) # We've already checked that we know the room version by this point - room_version = await self.store.get_room_version(pdu.room_id) + room_version = await self.store.get_room_version_id(pdu.room_id) # Check signature. try: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 01372f6d47..30c720f093 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -388,7 +388,7 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) state_map = await resolve_events_with_store( room_id, room_version, @@ -1110,7 +1110,7 @@ class FederationHandler(BaseHandler): Logs a warning if we can't find the given event. """ - room_version = await self.store.get_room_version(room_id) + room_version = await self.store.get_room_version_id(room_id) event_infos = [] @@ -1373,7 +1373,7 @@ class FederationHandler(BaseHandler): event_content = {"membership": Membership.JOIN} - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, @@ -1607,7 +1607,7 @@ class FederationHandler(BaseHandler): ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, { @@ -2055,7 +2055,7 @@ class FederationHandler(BaseHandler): do_soft_fail_check = False if do_soft_fail_check: - room_version = yield self.store.get_room_version(event.room_id) + room_version = yield self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". @@ -2191,7 +2191,7 @@ class FederationHandler(BaseHandler): Returns: defer.Deferred[EventContext]: updated context object """ - room_version = yield self.store.get_room_version(event.room_id) + room_version = yield self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: @@ -2363,7 +2363,7 @@ class FederationHandler(BaseHandler): remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_state = remote_auth_events.values() - room_version = yield self.store.get_room_version(event.room_id) + room_version = yield self.store.get_room_version_id(event.room_id) new_state = yield self.state_handler.resolve_events( room_version, (local_state, remote_state), event ) @@ -2587,7 +2587,7 @@ class FederationHandler(BaseHandler): } if (yield self.auth.check_host_in_room(room_id, self.hs.hostname)): - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new(room_version, event_dict) EventValidator().validate_builder(builder) @@ -2650,7 +2650,7 @@ class FederationHandler(BaseHandler): Returns: Deferred: resolves (to None) """ - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9a0f661b9b..bdf16c84d3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -459,7 +459,9 @@ class EventCreationHandler(object): room_version = event_dict["content"]["room_version"] else: try: - room_version = yield self.store.get_room_version(event_dict["room_id"]) + room_version = yield self.store.get_room_version_id( + event_dict["room_id"] + ) except NotFoundError: raise AuthError(403, "Unknown room") @@ -788,7 +790,7 @@ class EventCreationHandler(object): ): room_version = event.content.get("room_version", RoomVersions.V1.identifier) else: - room_version = yield self.store.get_room_version(event.room_id) + room_version = yield self.store.get_room_version_id(event.room_id) event_allowed = yield self.third_party_event_rules.check_event_allowed( event, context @@ -963,7 +965,7 @@ class EventCreationHandler(object): auth_events = yield self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events.values()} - room_version = yield self.store.get_room_version(event.room_id) + room_version = yield self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] if event_auth.check_redaction( diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 71d76202c9..caf841a643 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -281,7 +281,7 @@ class PaginationHandler(object): """Purge the given room from the database""" with (await self.pagination_lock.write(room_id)): # check we know about the room - await self.store.get_room_version(room_id) + await self.store.get_room_version_id(room_id) # first check that we have no users in this room joined = await defer.maybeDeferred( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a95b45d791..1382399557 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -178,7 +178,7 @@ class RoomCreationHandler(BaseHandler): }, token_id=requester.access_token_id, ) - old_room_version = yield self.store.get_room_version(old_room_id) + old_room_version = yield self.store.get_room_version_id(old_room_id) yield self.auth.check_from_context( old_room_version, tombstone_event, tombstone_context ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index cacd0c0c2b..fdd6bef6b4 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -394,7 +394,7 @@ class StateHandler(object): delta_ids=delta_ids, ) - room_version = yield self.store.get_room_version(room_id) + room_version = yield self.store.get_room_version_id(room_id) result = yield self._state_resolution_handler.resolve_state_groups( room_id, diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 4167f83c9b..6700942523 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -62,7 +62,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) @cached(max_entries=10000) - async def get_room_version(self, room_id: str) -> str: + async def get_room_version_id(self, room_id: str) -> str: """Get the room_version of a given room Raises: diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 86166fd4c1..af3fd67ab9 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -661,7 +661,7 @@ class EventsPersistenceStorage(object): break if not room_version: - room_version = await self.main_store.get_room_version(room_id) + room_version = await self.main_store.get_room_version_id(room_id) logger.debug("calling resolve_state_groups from preserve_events") res = await self._state_resolution_handler.resolve_state_groups( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index d4293b4312..e92e090c3c 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -588,7 +588,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): hostname = get_domain_from_id(user_id) - room_version = self.get_success(self.store.get_room_version(room_id)) + room_version = self.get_success(self.store.get_room_version_id(room_id)) builder = EventBuilder( state=self.state, diff --git a/tests/test_state.py b/tests/test_state.py index e0aae06be4..1e4449fa1c 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -119,7 +119,7 @@ class StateGroupStore(object): def register_event_id_state_group(self, event_id, state_group): self._event_to_state_group[event_id] = state_group - def get_room_version(self, room_id): + def get_room_version_id(self, room_id): return RoomVersions.V1.identifier diff --git a/tests/unittest.py b/tests/unittest.py index b56e249386..98bf27d39c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -589,7 +589,9 @@ class HomeserverTestCase(TestCase): event_builder_factory = self.hs.get_event_builder_factory() event_creation_handler = self.hs.get_event_creation_handler() - room_version = self.get_success(self.hs.get_datastore().get_room_version(room)) + room_version = self.get_success( + self.hs.get_datastore().get_room_version_id(room) + ) builder = event_builder_factory.for_room_version( KNOWN_ROOM_VERSIONS[room_version], From 08f41a6f05f304f6a14ab0339cf7225ec3d9851b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 10:28:15 +0000 Subject: [PATCH 094/213] Add `get_room_version` method So that we can start factoring out some of this boilerplatey boilerplate. --- synapse/api/errors.py | 6 ++---- synapse/storage/data_stores/main/state.py | 25 ++++++++++++++++++++++- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 1c9456e583..0c20601600 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -402,11 +402,9 @@ class UnsupportedRoomVersionError(SynapseError): """The client's request to create a room used a room version that the server does not support.""" - def __init__(self): + def __init__(self, msg="Homeserver does not support this room version"): super(UnsupportedRoomVersionError, self).__init__( - code=400, - msg="Homeserver does not support this room version", - errcode=Codes.UNSUPPORTED_ROOM_VERSION, + code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION, ) diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 6700942523..3d34103e67 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +23,8 @@ from six import iteritems from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import NotFoundError +from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.storage._base import SQLBaseStore @@ -61,6 +63,27 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def __init__(self, database: Database, db_conn, hs): super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) + async def get_room_version(self, room_id: str) -> RoomVersion: + """Get the room_version of a given room + + Raises: + NotFoundError: if the room is unknown + + UnsupportedRoomVersionError: if the room uses an unknown room version. + Typically this happens if support for the room's version has been + removed from Synapse. + """ + room_version_id = await self.get_room_version_id(room_id) + v = KNOWN_ROOM_VERSIONS.get(room_version_id) + + if not v: + raise UnsupportedRoomVersionError( + "Room %s uses a room version %s which is no longer supported" + % (room_id, room_version_id) + ) + + return v + @cached(max_entries=10000) async def get_room_version_id(self, room_id: str) -> str: """Get the room_version of a given room From f6fa2c0b31e2b3695a91f34a06974f428bd5d45c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 10:30:29 +0000 Subject: [PATCH 095/213] newsfile --- changelog.d/6820.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6820.misc diff --git a/changelog.d/6820.misc b/changelog.d/6820.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6820.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. From 7f93eb190301024d373a573fc75a58f592469e9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 31 Jan 2020 13:47:43 +0000 Subject: [PATCH 096/213] pass room_version into compute_event_signature (#6807) --- changelog.d/6807.misc | 1 + synapse/crypto/event_signing.py | 28 ++++++++++++++++++++-------- synapse/handlers/federation.py | 5 ++++- 3 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 changelog.d/6807.misc diff --git a/changelog.d/6807.misc b/changelog.d/6807.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6807.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 1f2bccf700..5f733c1cf5 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- - +# # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ import collections.abc import hashlib import logging +from typing import Dict from canonicaljson import encode_canonical_json from signedjson.sign import sign_json @@ -115,18 +117,28 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256): return hashed.name, hashed.digest() -def compute_event_signature(event_dict, signature_name, signing_key): +def compute_event_signature( + room_version: RoomVersion, + event_dict: JsonDict, + signature_name: str, + signing_key: SigningKey, +) -> Dict[str, Dict[str, str]]: """Compute the signature of the event for the given name and key. Args: - event_dict (dict): The event as a dict - signature_name (str): The name of the entity signing the event + room_version: the version of the room that this event is in. + (the room version determines the redaction algorithm and hence the + json to be signed) + + event_dict: The event as a dict + + signature_name: The name of the entity signing the event (typically the server's hostname). - signing_key (syutil.crypto.SigningKey): The key to sign with + + signing_key: The key to sign with Returns: - dict[str, dict[str, str]]: Returns a dictionary in the same format of - an event's signatures field. + a dictionary in the same format of an event's signatures field. """ redact_json = prune_event_dict(event_dict) redact_json.pop("age_ts", None) @@ -161,5 +173,5 @@ def add_hashes_and_signatures( event_dict.setdefault("hashes", {})[name] = encode_base64(digest) event_dict["signatures"] = compute_event_signature( - event_dict, signature_name=signature_name, signing_key=signing_key + room_version, event_dict, signature_name=signature_name, signing_key=signing_key ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0f10c3e9b1..c86d3177e9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1528,7 +1528,10 @@ class FederationHandler(BaseHandler): event.signatures.update( compute_event_signature( - event.get_pdu_json(), self.hs.hostname, self.hs.config.signing_key[0] + room_version, + event.get_pdu_json(), + self.hs.hostname, + self.hs.config.signing_key[0], ) ) From 83b0ea047b355ade44985af123f4807faa7892ab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Jan 2020 14:04:15 +0000 Subject: [PATCH 097/213] Fix deleting of stale marker for device lists (#6819) We were in fact only deleting stale marker when we got an incremental update, rather than when we did a full resync. --- changelog.d/6819.misc | 1 + synapse/storage/data_stores/main/devices.py | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6819.misc diff --git a/changelog.d/6819.misc b/changelog.d/6819.misc new file mode 100644 index 0000000000..4f9a4ac7a5 --- /dev/null +++ b/changelog.d/6819.misc @@ -0,0 +1 @@ +Detect unknown remote devices and mark cache as stale. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index a34415ff14..ea0503476f 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -940,13 +940,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): lock=False, ) - # If we're replacing the remote user's device list cache presumably - # we've done a full resync, so we remove the entry that says we need - # to resync - self.db.simple_delete_txn( - txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id}, - ) - def update_remote_device_list_cache(self, user_id, devices, stream_id): """Replace the entire cache of the remote user's devices. @@ -1003,6 +996,13 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): lock=False, ) + # If we're replacing the remote user's device list cache presumably + # we've done a full resync, so we remove the entry that says we need + # to resync + self.db.simple_delete_txn( + txn, table="device_lists_remote_resync", keyvalues={"user_id": user_id}, + ) + @defer.inlineCallbacks def add_device_change_to_streams(self, user_id, device_ids, hosts): """Persist that a user's devices have been updated, and which hosts From ac0d45b78b647f6744b5850da88a4ca8c76666b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Jan 2020 15:35:37 +0000 Subject: [PATCH 098/213] 1.10.0rc1 --- CHANGES.md | 44 ++++++++++++++++++++++++++++++++++++++++ UPGRADE.rst | 4 ++-- changelog.d/6729.misc | 1 - changelog.d/6734.bugfix | 1 - changelog.d/6748.misc | 1 - changelog.d/6751.misc | 1 - changelog.d/6757.misc | 1 - changelog.d/6761.bugfix | 1 - changelog.d/6767.bugfix | 1 - changelog.d/6771.bugfix | 1 - changelog.d/6775.doc | 1 - changelog.d/6776.misc | 1 - changelog.d/6786.misc | 1 - changelog.d/6787.feature | 1 - changelog.d/6788.misc | 1 - changelog.d/6790.feature | 1 - changelog.d/6792.misc | 1 - changelog.d/6794.feature | 1 - changelog.d/6795.bugfix | 1 - changelog.d/6796.bugfix | 1 - changelog.d/6797.misc | 1 - changelog.d/6799.bugfix | 1 - changelog.d/6800.bugfix | 1 - changelog.d/6801.bugfix | 1 - changelog.d/6802.misc | 1 - changelog.d/6803.misc | 1 - changelog.d/6805.misc | 1 - changelog.d/6806.misc | 1 - changelog.d/6807.misc | 1 - changelog.d/6810.misc | 1 - changelog.d/6811.bugfix | 1 - changelog.d/6816.misc | 1 - changelog.d/6819.misc | 1 - changelog.d/6820.misc | 1 - synapse/__init__.py | 2 +- 35 files changed, 47 insertions(+), 35 deletions(-) delete mode 100644 changelog.d/6729.misc delete mode 100644 changelog.d/6734.bugfix delete mode 100644 changelog.d/6748.misc delete mode 100644 changelog.d/6751.misc delete mode 100644 changelog.d/6757.misc delete mode 100644 changelog.d/6761.bugfix delete mode 100644 changelog.d/6767.bugfix delete mode 100644 changelog.d/6771.bugfix delete mode 100644 changelog.d/6775.doc delete mode 100644 changelog.d/6776.misc delete mode 100644 changelog.d/6786.misc delete mode 100644 changelog.d/6787.feature delete mode 100644 changelog.d/6788.misc delete mode 100644 changelog.d/6790.feature delete mode 100644 changelog.d/6792.misc delete mode 100644 changelog.d/6794.feature delete mode 100644 changelog.d/6795.bugfix delete mode 100644 changelog.d/6796.bugfix delete mode 100644 changelog.d/6797.misc delete mode 100644 changelog.d/6799.bugfix delete mode 100644 changelog.d/6800.bugfix delete mode 100644 changelog.d/6801.bugfix delete mode 100644 changelog.d/6802.misc delete mode 100644 changelog.d/6803.misc delete mode 100644 changelog.d/6805.misc delete mode 100644 changelog.d/6806.misc delete mode 100644 changelog.d/6807.misc delete mode 100644 changelog.d/6810.misc delete mode 100644 changelog.d/6811.bugfix delete mode 100644 changelog.d/6816.misc delete mode 100644 changelog.d/6819.misc delete mode 100644 changelog.d/6820.misc diff --git a/CHANGES.md b/CHANGES.md index 4c413b72ee..6686cafa5b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,47 @@ +Synapse 1.10.0rc1 (2020-01-31) +============================== + +Features +-------- + +- Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794)) + + +Bugfixes +-------- + +- Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). ([\#6734](https://github.com/matrix-org/synapse/issues/6734)) +- Minor fixes to `PUT /_synapse/admin/v2/users` admin api. ([\#6761](https://github.com/matrix-org/synapse/issues/6761)) +- Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. ([\#6767](https://github.com/matrix-org/synapse/issues/6767)) +- Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). ([\#6771](https://github.com/matrix-org/synapse/issues/6771)) +- Fix outbound federation request metrics. ([\#6795](https://github.com/matrix-org/synapse/issues/6795)) +- Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. ([\#6796](https://github.com/matrix-org/synapse/issues/6796)) +- Fix race in federation sender worker that delayed sending of device updates. ([\#6799](https://github.com/matrix-org/synapse/issues/6799), [\#6800](https://github.com/matrix-org/synapse/issues/6800)) +- Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. ([\#6801](https://github.com/matrix-org/synapse/issues/6801)) +- Fix waking up other workers when remote server is detected to have come back online. ([\#6811](https://github.com/matrix-org/synapse/issues/6811)) + + +Improved Documentation +---------------------- + +- Clarify documentation related to `user_dir` and `federation_reader` workers. ([\#6775](https://github.com/matrix-org/synapse/issues/6775)) + + +Internal Changes +---------------- + +- Record room versions in the `rooms` table. ([\#6729](https://github.com/matrix-org/synapse/issues/6729), [\#6788](https://github.com/matrix-org/synapse/issues/6788), [\#6810](https://github.com/matrix-org/synapse/issues/6810)) +- Propagate cache invalidates from workers to other workers. ([\#6748](https://github.com/matrix-org/synapse/issues/6748)) +- Remove some unnecessary admin handler abstraction methods. ([\#6751](https://github.com/matrix-org/synapse/issues/6751)) +- Add some debugging for media storage providers. ([\#6757](https://github.com/matrix-org/synapse/issues/6757)) +- Detect unknown remote devices and mark cache as stale. ([\#6776](https://github.com/matrix-org/synapse/issues/6776), [\#6819](https://github.com/matrix-org/synapse/issues/6819)) +- Attempt to resync remote users' devices when detected as stale. ([\#6786](https://github.com/matrix-org/synapse/issues/6786)) +- Delete current state from the database when server leaves a room. ([\#6792](https://github.com/matrix-org/synapse/issues/6792)) +- When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. ([\#6797](https://github.com/matrix-org/synapse/issues/6797)) +- Add background update to clean out left rooms from current state. ([\#6802](https://github.com/matrix-org/synapse/issues/6802), [\#6816](https://github.com/matrix-org/synapse/issues/6816)) +- Refactoring work in preparation for changing the event redaction algorithm. ([\#6803](https://github.com/matrix-org/synapse/issues/6803), [\#6805](https://github.com/matrix-org/synapse/issues/6805), [\#6806](https://github.com/matrix-org/synapse/issues/6806), [\#6807](https://github.com/matrix-org/synapse/issues/6807), [\#6820](https://github.com/matrix-org/synapse/issues/6820)) + + Synapse 1.9.1 (2020-01-28) ========================== diff --git a/UPGRADE.rst b/UPGRADE.rst index 470246f128..1c5db1c4a8 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -76,8 +76,8 @@ for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb -Upgrading to **** -=============================== +Upgrading to v1.10.0 +==================== Synapse will now log a warning on start up if used with a PostgreSQL database that has a non-recommended locale set. diff --git a/changelog.d/6729.misc b/changelog.d/6729.misc deleted file mode 100644 index 5537355bea..0000000000 --- a/changelog.d/6729.misc +++ /dev/null @@ -1 +0,0 @@ -Record room versions in the `rooms` table. diff --git a/changelog.d/6734.bugfix b/changelog.d/6734.bugfix deleted file mode 100644 index 79c6bab4d1..0000000000 --- a/changelog.d/6734.bugfix +++ /dev/null @@ -1 +0,0 @@ -Warn if postgres database has a non-C locale, as that can cause issues when upgrading locales (e.g. due to upgrading OS). diff --git a/changelog.d/6748.misc b/changelog.d/6748.misc deleted file mode 100644 index de320d4cd9..0000000000 --- a/changelog.d/6748.misc +++ /dev/null @@ -1 +0,0 @@ -Propagate cache invalidates from workers to other workers. diff --git a/changelog.d/6751.misc b/changelog.d/6751.misc deleted file mode 100644 index 7222520528..0000000000 --- a/changelog.d/6751.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some unnecessary admin handler abstraction methods. \ No newline at end of file diff --git a/changelog.d/6757.misc b/changelog.d/6757.misc deleted file mode 100644 index a50c5e974a..0000000000 --- a/changelog.d/6757.misc +++ /dev/null @@ -1 +0,0 @@ -Add some debugging for media storage providers. diff --git a/changelog.d/6761.bugfix b/changelog.d/6761.bugfix deleted file mode 100644 index 1c664c02df..0000000000 --- a/changelog.d/6761.bugfix +++ /dev/null @@ -1 +0,0 @@ -Minor fixes to `PUT /_synapse/admin/v2/users` admin api. diff --git a/changelog.d/6767.bugfix b/changelog.d/6767.bugfix deleted file mode 100644 index 63c7c63315..0000000000 --- a/changelog.d/6767.bugfix +++ /dev/null @@ -1 +0,0 @@ -Validate `client_secret` parameter using the regex provided by the Client-Server API, temporarily allowing `:` characters for older clients. The `:` character will be removed in a future release. diff --git a/changelog.d/6771.bugfix b/changelog.d/6771.bugfix deleted file mode 100644 index 623ba24acb..0000000000 --- a/changelog.d/6771.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix persisting redaction events that have been redacted (or otherwise don't have a redacts key). diff --git a/changelog.d/6775.doc b/changelog.d/6775.doc deleted file mode 100644 index c6078ef82d..0000000000 --- a/changelog.d/6775.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify documentation related to `user_dir` and `federation_reader` workers. diff --git a/changelog.d/6776.misc b/changelog.d/6776.misc deleted file mode 100644 index 4f9a4ac7a5..0000000000 --- a/changelog.d/6776.misc +++ /dev/null @@ -1 +0,0 @@ -Detect unknown remote devices and mark cache as stale. diff --git a/changelog.d/6786.misc b/changelog.d/6786.misc deleted file mode 100644 index 94c692e53a..0000000000 --- a/changelog.d/6786.misc +++ /dev/null @@ -1 +0,0 @@ -Attempt to resync remote users' devices when detected as stale. diff --git a/changelog.d/6787.feature b/changelog.d/6787.feature deleted file mode 100644 index df9e4b77ab..0000000000 --- a/changelog.d/6787.feature +++ /dev/null @@ -1 +0,0 @@ -Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). diff --git a/changelog.d/6788.misc b/changelog.d/6788.misc deleted file mode 100644 index 5537355bea..0000000000 --- a/changelog.d/6788.misc +++ /dev/null @@ -1 +0,0 @@ -Record room versions in the `rooms` table. diff --git a/changelog.d/6790.feature b/changelog.d/6790.feature deleted file mode 100644 index df9e4b77ab..0000000000 --- a/changelog.d/6790.feature +++ /dev/null @@ -1 +0,0 @@ -Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). diff --git a/changelog.d/6792.misc b/changelog.d/6792.misc deleted file mode 100644 index fa31d509b3..0000000000 --- a/changelog.d/6792.misc +++ /dev/null @@ -1 +0,0 @@ -Delete current state from the database when server leaves a room. diff --git a/changelog.d/6794.feature b/changelog.d/6794.feature deleted file mode 100644 index df9e4b77ab..0000000000 --- a/changelog.d/6794.feature +++ /dev/null @@ -1 +0,0 @@ -Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). diff --git a/changelog.d/6795.bugfix b/changelog.d/6795.bugfix deleted file mode 100644 index d1585653b1..0000000000 --- a/changelog.d/6795.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix outbound federation request metrics. diff --git a/changelog.d/6796.bugfix b/changelog.d/6796.bugfix deleted file mode 100644 index 206a157311..0000000000 --- a/changelog.d/6796.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where querying a remote user's device keys that weren't cached resulted in only returning a single device. diff --git a/changelog.d/6797.misc b/changelog.d/6797.misc deleted file mode 100644 index e9127bac51..0000000000 --- a/changelog.d/6797.misc +++ /dev/null @@ -1 +0,0 @@ -When a client asks for a remote user's device keys check if the local cache for that user has been marked as potentially stale. diff --git a/changelog.d/6799.bugfix b/changelog.d/6799.bugfix deleted file mode 100644 index 322a2758af..0000000000 --- a/changelog.d/6799.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race in federation sender worker that delayed sending of device updates. diff --git a/changelog.d/6800.bugfix b/changelog.d/6800.bugfix deleted file mode 100644 index 322a2758af..0000000000 --- a/changelog.d/6800.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race in federation sender worker that delayed sending of device updates. diff --git a/changelog.d/6801.bugfix b/changelog.d/6801.bugfix deleted file mode 100644 index f401fa5d69..0000000000 --- a/changelog.d/6801.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where Synapse didn't invalidate cache of remote users' devices when Synapse left a room. diff --git a/changelog.d/6802.misc b/changelog.d/6802.misc deleted file mode 100644 index a77ba1d7a5..0000000000 --- a/changelog.d/6802.misc +++ /dev/null @@ -1 +0,0 @@ -Add background update to clean out left rooms from current state. diff --git a/changelog.d/6803.misc b/changelog.d/6803.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6803.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6805.misc b/changelog.d/6805.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6805.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6806.misc b/changelog.d/6806.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6806.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6807.misc b/changelog.d/6807.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6807.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/changelog.d/6810.misc b/changelog.d/6810.misc deleted file mode 100644 index 5537355bea..0000000000 --- a/changelog.d/6810.misc +++ /dev/null @@ -1 +0,0 @@ -Record room versions in the `rooms` table. diff --git a/changelog.d/6811.bugfix b/changelog.d/6811.bugfix deleted file mode 100644 index 361f2fc2e8..0000000000 --- a/changelog.d/6811.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix waking up other workers when remote server is detected to have come back online. diff --git a/changelog.d/6816.misc b/changelog.d/6816.misc deleted file mode 100644 index a77ba1d7a5..0000000000 --- a/changelog.d/6816.misc +++ /dev/null @@ -1 +0,0 @@ -Add background update to clean out left rooms from current state. diff --git a/changelog.d/6819.misc b/changelog.d/6819.misc deleted file mode 100644 index 4f9a4ac7a5..0000000000 --- a/changelog.d/6819.misc +++ /dev/null @@ -1 +0,0 @@ -Detect unknown remote devices and mark cache as stale. diff --git a/changelog.d/6820.misc b/changelog.d/6820.misc deleted file mode 100644 index 08aa80bcd9..0000000000 --- a/changelog.d/6820.misc +++ /dev/null @@ -1 +0,0 @@ -Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/__init__.py b/synapse/__init__.py index a236888d3c..bd942d3e1c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.9.1" +__version__ = "1.10.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 0f8ffa38b5b5137c74c6bf088f5f654553170e11 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Jan 2020 15:38:16 +0000 Subject: [PATCH 099/213] Fix link in upgrade.rst --- UPGRADE.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index 1c5db1c4a8..3cad8c2837 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -82,7 +82,7 @@ Upgrading to v1.10.0 Synapse will now log a warning on start up if used with a PostgreSQL database that has a non-recommended locale set. -See [docs/postgres.md](docs/postgres.md) for details. +See `docs/postgres.md `_ for details. Upgrading to v1.8.0 From 68ef7ebbef3f6ccc697d68f11b74a3f65613379f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Jan 2020 15:45:08 +0000 Subject: [PATCH 100/213] Update changelog --- CHANGES.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6686cafa5b..ab6fce3e7d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,13 @@ Synapse 1.10.0rc1 (2020-01-31) ============================== +**WARNING**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. + + Features -------- -- Implement updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794)) +- Add experimental support for updated authorization rules for aliases events, from [MSC2260](https://github.com/matrix-org/matrix-doc/pull/2260). ([\#6787](https://github.com/matrix-org/synapse/issues/6787), [\#6790](https://github.com/matrix-org/synapse/issues/6790), [\#6794](https://github.com/matrix-org/synapse/issues/6794)) Bugfixes From b0d112e78b96168852260b3986d348ae3a98292f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 3 Feb 2020 13:15:24 +0000 Subject: [PATCH 101/213] Fix `room_version` in `on_invite_request` flow (#6827) I messed this up a bit in #6805, but fortunately we weren't actually doing anything with the room_version so it didn't matter that it was a str not a RoomVersion. --- changelog.d/6827.misc | 1 + synapse/federation/federation_server.py | 13 ++++++++----- synapse/federation/transport/server.py | 4 ++-- 3 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6827.misc diff --git a/changelog.d/6827.misc b/changelog.d/6827.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6827.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a4c97ed458..d92d5e8064 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -54,7 +54,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, ) -from synapse.types import get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util import glob_to_regex, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache @@ -396,20 +396,23 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - async def on_invite_request(self, origin, content, room_version): - if room_version not in KNOWN_ROOM_VERSIONS: + async def on_invite_request( + self, origin: str, content: JsonDict, room_version_id: str + ): + room_version = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not room_version: raise SynapseError( 400, "Homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) - format_ver = room_version_to_event_format(room_version) + format_ver = room_version.event_format pdu = event_from_pdu_json(content, format_ver) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 125eadd796..ae48ba8157 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -579,7 +579,7 @@ class FederationV1InviteServlet(BaseFederationServlet): # state resolution algorithm, and we don't use that for processing # invites content = await self.handler.on_invite_request( - origin, content, room_version=RoomVersions.V1.identifier + origin, content, room_version_id=RoomVersions.V1.identifier ) # V1 federation API is defined to return a content of `[200, {...}]` @@ -606,7 +606,7 @@ class FederationV2InviteServlet(BaseFederationServlet): event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state content = await self.handler.on_invite_request( - origin, event, room_version=room_version + origin, event, room_version_id=room_version ) return 200, content From 370080531ef7ae3f075ff8f577f42c2b6e25295c Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 3 Feb 2020 13:18:42 +0000 Subject: [PATCH 102/213] Allow URL-encoded user IDs on user admin api paths (#6825) --- changelog.d/6825.bugfix | 1 + synapse/rest/admin/users.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6825.bugfix diff --git a/changelog.d/6825.bugfix b/changelog.d/6825.bugfix new file mode 100644 index 0000000000..d3cacd6d9a --- /dev/null +++ b/changelog.d/6825.bugfix @@ -0,0 +1 @@ +Allow URL-encoded User IDs on `/_synapse/admin/v2/users/[/admin]` endpoints. Thanks to @NHAS for reporting. \ No newline at end of file diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 3455741195..f1c4434f5c 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -105,7 +105,7 @@ class UsersRestServletV2(RestServlet): class UserRestServletV2(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P@[^/]+)$"),) + PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]+)$"),) """Get request to list user details. This needs user to have administrator access in Synapse. @@ -568,7 +568,7 @@ class UserAdminServlet(RestServlet): {} """ - PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P@[^/]*)/admin$"),) + PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P[^/]*)/admin$"),) def __init__(self, hs): self.hs = hs From b3e44f0bdf4caff4107826c63b9d281c4d9ff509 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:30:23 +0000 Subject: [PATCH 103/213] make FederationHandler.on_query_auth async --- synapse/handlers/federation.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86d3177e9..61df39d4bf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2120,15 +2120,14 @@ class FederationHandler(BaseHandler): logger.warning("Soft-failing %r because %s", event, e) event.internal_metadata.soft_failed = True - @defer.inlineCallbacks - def on_query_auth( + async def on_query_auth( self, origin, event_id, room_id, remote_auth_chain, rejects, missing ): - in_room = yield self.auth.check_host_in_room(room_id, origin) + in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") - event = yield self.store.get_event( + event = await self.store.get_event( event_id, allow_none=False, check_room_id=room_id ) @@ -2136,19 +2135,19 @@ class FederationHandler(BaseHandler): # don't want to fall into the trap of `missing` being wrong. for e in remote_auth_chain: try: - yield self._handle_new_event(origin, e) + await self._handle_new_event(origin, e) except AuthError: pass # Now get the current auth_chain for the event. - local_auth_chain = yield self.store.get_auth_chain( + local_auth_chain = await self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) # TODO: Check if we would now reject event_id. If so we need to tell # everyone. - ret = yield self.construct_auth_difference(local_auth_chain, remote_auth_chain) + ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain) logger.debug("on_query_auth returning: %s", ret) From 7571bf86f0399b2376427f3a6d91b8850e45b8f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:32:48 +0000 Subject: [PATCH 104/213] make FederationHandler.on_send_join_request async --- synapse/handlers/federation.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 61df39d4bf..7d6db77ae5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1411,9 +1411,7 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - @log_function - def on_send_join_request(self, origin, pdu): + async def on_send_join_request(self, origin, pdu): """ We have received a join event for a room. Fully process it and respond with the current state and auth chains. """ @@ -1450,9 +1448,9 @@ class FederationHandler(BaseHandler): # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - context = yield self._handle_new_event(origin, event) + context = await self._handle_new_event(origin, event) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1470,14 +1468,14 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.Member: if event.content["membership"] == Membership.JOIN: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, event.room_id) + await self.user_joined_room(user, event.room_id) - prev_state_ids = yield context.get_prev_state_ids() + prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) - auth_chain = yield self.store.get_auth_chain(state_ids) + auth_chain = await self.store.get_auth_chain(state_ids) - state = yield self.store.get_events(list(prev_state_ids.values())) + state = await self.store.get_events(list(prev_state_ids.values())) return {"state": list(state.values()), "auth_chain": auth_chain} From af8ba6b52502dda2c0b50a023f4fd0ef63b67237 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:33:42 +0000 Subject: [PATCH 105/213] make FederationHandler.on_invite_request async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7d6db77ae5..b924c72c77 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1479,8 +1479,7 @@ class FederationHandler(BaseHandler): return {"state": list(state.values()), "auth_chain": auth_chain} - @defer.inlineCallbacks - def on_invite_request( + async def on_invite_request( self, origin: str, event: EventBase, room_version: RoomVersion ): """ We've got an invite event. Process and persist it. Sign it. @@ -1490,7 +1489,7 @@ class FederationHandler(BaseHandler): if event.state_key is None: raise SynapseError(400, "The invite event did not have a state key") - is_blocked = yield self.store.is_room_blocked(event.room_id) + is_blocked = await self.store.is_room_blocked(event.room_id) if is_blocked: raise SynapseError(403, "This room has been blocked on this server") @@ -1533,8 +1532,8 @@ class FederationHandler(BaseHandler): ) ) - context = yield self.state_handler.compute_event_context(event) - yield self.persist_events_and_notify([(event, context)]) + context = await self.state_handler.compute_event_context(event) + await self.persist_events_and_notify([(event, context)]) return event From 98681f90cbe70c317556ec1596df256f49d4d38a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:35:30 +0000 Subject: [PATCH 106/213] make FederationHandler.on_make_join_request async --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b924c72c77..e75ebb168e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1348,20 +1348,17 @@ class FederationHandler(BaseHandler): "Error handling queued PDU %s from %s: %s", p.event_id, origin, e ) - @defer.inlineCallbacks - @log_function - def on_make_join_request(self, origin, room_id, user_id): + async def on_make_join_request( + self, origin: str, room_id: str, user_id: str + ) -> EventBase: """ We've received a /make_join/ request, so we create a partial join event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: - origin (str): The (verified) server name of the requesting server. - room_id (str): Room to create join event in - user_id (str): The user to create the join for - - Returns: - Deferred[FrozenEvent] + origin: The (verified) server name of the requesting server. + room_id: Room to create join event in + user_id: The user to create the join for """ if get_domain_from_id(user_id) != origin: logger.info( @@ -1373,7 +1370,7 @@ class FederationHandler(BaseHandler): event_content = {"membership": Membership.JOIN} - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, @@ -1387,14 +1384,14 @@ class FederationHandler(BaseHandler): ) try: - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) except AuthError as e: logger.warning("Failed to create join to %s because %s", room_id, e) raise e - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1405,7 +1402,7 @@ class FederationHandler(BaseHandler): # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - yield self.auth.check_from_context( + await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) From d184cbc0319cbff17d04028c265cd774356f6e54 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:39:24 +0000 Subject: [PATCH 107/213] make FederationHandler.on_send_leave_request async --- synapse/handlers/federation.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e75ebb168e..cdde10c8b9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1641,9 +1641,7 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - @log_function - def on_send_leave_request(self, origin, pdu): + async def on_send_leave_request(self, origin, pdu): """ We have received a leave event for a room. Fully process it.""" event = pdu @@ -1663,9 +1661,9 @@ class FederationHandler(BaseHandler): event.internal_metadata.outlier = False - context = yield self._handle_new_event(origin, event) + context = await self._handle_new_event(origin, event) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: From 6e89ec5e32907a5e6b036526e3eb6cb4e76d843f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:40:41 +0000 Subject: [PATCH 108/213] make FederationHandler.on_make_leave_request async --- synapse/handlers/federation.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cdde10c8b9..3813680c34 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1581,20 +1581,17 @@ class FederationHandler(BaseHandler): assert event.room_id == room_id return origin, event, room_version - @defer.inlineCallbacks - @log_function - def on_make_leave_request(self, origin, room_id, user_id): + async def on_make_leave_request( + self, origin: str, room_id: str, user_id: str + ) -> EventBase: """ We've received a /make_leave/ request, so we create a partial leave event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. Args: - origin (str): The (verified) server name of the requesting server. - room_id (str): Room to create leave event in - user_id (str): The user to create the leave for - - Returns: - Deferred[FrozenEvent] + origin: The (verified) server name of the requesting server. + room_id: Room to create leave event in + user_id: The user to create the leave for """ if get_domain_from_id(user_id) != origin: logger.info( @@ -1604,7 +1601,7 @@ class FederationHandler(BaseHandler): ) raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) builder = self.event_builder_factory.new( room_version, { @@ -1616,11 +1613,11 @@ class FederationHandler(BaseHandler): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -1632,7 +1629,7 @@ class FederationHandler(BaseHandler): try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - yield self.auth.check_from_context( + await self.auth.check_from_context( room_version, event, context, do_sig_check=False ) except AuthError as e: From c556ed9e15640da5b8aff15c4547609202eab6f1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:43:51 +0000 Subject: [PATCH 109/213] make FederationHandler._handle_new_events async --- synapse/handlers/federation.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3813680c34..9c16f54304 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1815,13 +1815,12 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _handle_new_events( + async def _handle_new_events( self, origin: str, event_infos: Iterable[_NewEventInfo], backfilled: bool = False, - ): + ) -> None: """Creates the appropriate contexts and persists events. The events should not depend on one another, e.g. this should be used to persist a bunch of outliers, but not a chunk of individual events that depend @@ -1830,11 +1829,10 @@ class FederationHandler(BaseHandler): Notifies about the events where appropriate. """ - @defer.inlineCallbacks - def prep(ev_info: _NewEventInfo): + async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = yield self._prep_event( + res = await self._prep_event( origin, event, state=ev_info.state, @@ -1843,14 +1841,14 @@ class FederationHandler(BaseHandler): ) return res - contexts = yield make_deferred_yieldable( + contexts = await make_deferred_yieldable( defer.gatherResults( [run_in_background(prep, ev_info) for ev_info in event_infos], consumeErrors=True, ) ) - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [ (ev_info.event, context) for ev_info, context in zip(event_infos, contexts) From 1cdc253e0a965515823bc34c6223dbeda0c55669 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:48:33 +0000 Subject: [PATCH 110/213] make FederationHandler._handle_new_event async --- synapse/handlers/federation.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9c16f54304..81eb7eecbd 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1782,11 +1782,10 @@ class FederationHandler(BaseHandler): def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) - @defer.inlineCallbacks - def _handle_new_event( + async def _handle_new_event( self, origin, event, state=None, auth_events=None, backfilled=False ): - context = yield self._prep_event( + context = await self._prep_event( origin, event, state=state, auth_events=auth_events, backfilled=backfilled ) @@ -1799,11 +1798,11 @@ class FederationHandler(BaseHandler): and not backfilled and not context.rejected ): - yield self.action_generator.handle_push_actions_for_event( + await self.action_generator.handle_push_actions_for_event( event, context ) - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [(event, context)], backfilled=backfilled ) success = True @@ -2296,7 +2295,9 @@ class FederationHandler(BaseHandler): logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) - yield self._handle_new_event(origin, e, auth_events=auth) + yield defer.ensureDeferred( + self._handle_new_event(origin, e, auth_events=auth) + ) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e From 8033b257a7f4f8e21d5b52228234f429f1552dd2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:49:32 +0000 Subject: [PATCH 111/213] make FederationHandler._prep_event async --- synapse/handlers/federation.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 81eb7eecbd..8cb3a505f8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1966,54 +1966,41 @@ class FederationHandler(BaseHandler): yield self.persist_events_and_notify([(event, new_event_context)]) - @defer.inlineCallbacks - def _prep_event( + async def _prep_event( self, origin: str, event: EventBase, state: Optional[Iterable[EventBase]], auth_events: Optional[StateMap[EventBase]], backfilled: bool, - ): - """ - - Args: - origin: - event: - state: - auth_events: - backfilled: - - Returns: - Deferred, which resolves to synapse.events.snapshot.EventContext - """ - context = yield self.state_handler.compute_event_context(event, old_state=state) + ) -> EventContext: + context = await self.state_handler.compute_event_context(event, old_state=state) if not auth_events: - prev_state_ids = yield context.get_prev_state_ids() - auth_events_ids = yield self.auth.compute_auth_events( + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = await self.auth.compute_auth_events( event, prev_state_ids, for_verification=True ) - auth_events = yield self.store.get_events(auth_events_ids) + auth_events = await self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events.values()} # This is a hack to fix some old rooms where the initial join event # didn't reference the create event in its auth events. if event.type == EventTypes.Member and not event.auth_event_ids(): if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = yield self.store.get_event( + c = await self.store.get_event( event.prev_event_ids()[0], allow_none=True ) if c and c.type == EventTypes.Create: auth_events[(c.type, c.state_key)] = c - context = yield self.do_auth(origin, event, context, auth_events=auth_events) + context = await self.do_auth(origin, event, context, auth_events=auth_events) if not context.rejected: - yield self._check_for_soft_fail(event, state, backfilled) + await self._check_for_soft_fail(event, state, backfilled) if event.type == EventTypes.GuestAccess and not context.rejected: - yield self.maybe_kick_guest_users(event) + await self.maybe_kick_guest_users(event) return context From bc9b75c6f08db3672d6620a91e91be2e0188c1a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:51:24 +0000 Subject: [PATCH 112/213] make FederationHandler.do_auth async --- synapse/handlers/federation.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8cb3a505f8..780029547d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2149,16 +2149,20 @@ class FederationHandler(BaseHandler): return missing_events - @defer.inlineCallbacks - @log_function - def do_auth(self, origin, event, context, auth_events): + async def do_auth( + self, + origin: str, + event: EventBase, + context: EventContext, + auth_events: StateMap[EventBase], + ) -> EventContext: """ Args: - origin (str): - event (synapse.events.EventBase): - context (synapse.events.snapshot.EventContext): - auth_events (dict[(str, str)->synapse.events.EventBase]): + origin: + event: + context: + auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room @@ -2168,13 +2172,13 @@ class FederationHandler(BaseHandler): Also NB that this function adds entries to it. Returns: - defer.Deferred[EventContext]: updated context object + updated context object """ - room_version = yield self.store.get_room_version_id(event.room_id) + room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: - context = yield self._update_auth_events_and_context_for_auth( + context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events ) except Exception: From a25ddf26a35f2bd25b5284736b39de70a37f8570 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:53:30 +0000 Subject: [PATCH 113/213] make FederationHandler._update_auth_events_and_context_for_auth async --- synapse/handlers/federation.py | 41 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 780029547d..8dd2e81fd4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2200,10 +2200,13 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _update_auth_events_and_context_for_auth( - self, origin, event, context, auth_events - ): + async def _update_auth_events_and_context_for_auth( + self, + origin: str, + event: EventBase, + context: EventContext, + auth_events: StateMap[EventBase], + ) -> EventContext: """Helper for do_auth. See there for docs. Checks whether a given event has the expected auth events. If it @@ -2211,16 +2214,16 @@ class FederationHandler(BaseHandler): we can come to a consensus (e.g. if one server missed some valid state). - This attempts to resovle any potential divergence of state between + This attempts to resolve any potential divergence of state between servers, but is not essential and so failures should not block further processing of the event. Args: - origin (str): - event (synapse.events.EventBase): - context (synapse.events.snapshot.EventContext): + origin: + event: + context: - auth_events (dict[(str, str)->synapse.events.EventBase]): + auth_events: Map from (event_type, state_key) to event Normally, our calculated auth_events based on the state of the room @@ -2231,7 +2234,7 @@ class FederationHandler(BaseHandler): Also NB that this function adds entries to it. Returns: - defer.Deferred[EventContext]: updated context + updated context """ event_auth_events = set(event.auth_event_ids()) @@ -2245,7 +2248,7 @@ class FederationHandler(BaseHandler): # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: - have_events = yield self.store.have_seen_events(missing_auth) + have_events = await self.store.have_seen_events(missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) @@ -2254,7 +2257,7 @@ class FederationHandler(BaseHandler): logger.info("auth_events contains unknown events: %s", missing_auth) try: try: - remote_auth_chain = yield self.federation_client.get_event_auth( + remote_auth_chain = await self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) except RequestSendFailed as e: @@ -2263,7 +2266,7 @@ class FederationHandler(BaseHandler): logger.info("Failed to get event auth from remote: %s", e) return context - seen_remotes = yield self.store.have_seen_events( + seen_remotes = await self.store.have_seen_events( [e.event_id for e in remote_auth_chain] ) @@ -2286,9 +2289,7 @@ class FederationHandler(BaseHandler): logger.debug( "do_auth %s missing_auth: %s", event.event_id, e.event_id ) - yield defer.ensureDeferred( - self._handle_new_event(origin, e, auth_events=auth) - ) + await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e @@ -2322,7 +2323,7 @@ class FederationHandler(BaseHandler): # XXX: currently this checks for redactions but I'm not convinced that is # necessary? - different_events = yield self.store.get_events_as_list(different_auth) + different_events = await self.store.get_events_as_list(different_auth) for d in different_events: if d.room_id != event.room_id: @@ -2348,8 +2349,8 @@ class FederationHandler(BaseHandler): remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) remote_state = remote_auth_events.values() - room_version = yield self.store.get_room_version_id(event.room_id) - new_state = yield self.state_handler.resolve_events( + room_version = await self.store.get_room_version_id(event.room_id) + new_state = await self.state_handler.resolve_events( room_version, (local_state, remote_state), event ) @@ -2364,7 +2365,7 @@ class FederationHandler(BaseHandler): auth_events.update(new_state) - context = yield self._update_context_for_auth_events( + context = await self._update_context_for_auth_events( event, context, auth_events ) From 0d5f2f4bb0a1153d7d8b7d66d5c06268b2f2ea18 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 15:55:35 +0000 Subject: [PATCH 114/213] make FederationHandler._update_context_for_auth_events async --- synapse/handlers/federation.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8dd2e81fd4..ea49f928e5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2371,21 +2371,21 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _update_context_for_auth_events(self, event, context, auth_events): + async def _update_context_for_auth_events( + self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] + ) -> EventContext: """Update the state_ids in an event context after auth event resolution, storing the changes as a new state group. Args: - event (Event): The event we're handling the context for + event: The event we're handling the context for - context (synapse.events.snapshot.EventContext): initial event context + context: initial event context - auth_events (dict[(str, str)->EventBase]): Events to update in the event - context. + auth_events: Events to update in the event context. Returns: - Deferred[EventContext]: new event context + new event context """ # exclude the state key of the new event from the current_state in the context. if event.is_state(): @@ -2396,19 +2396,19 @@ class FederationHandler(BaseHandler): k: a.event_id for k, a in iteritems(auth_events) if k != event_key } - current_state_ids = yield context.get_current_state_ids() + current_state_ids = await context.get_current_state_ids() current_state_ids = dict(current_state_ids) current_state_ids.update(state_updates) - prev_state_ids = yield context.get_prev_state_ids() + prev_state_ids = await context.get_prev_state_ids() prev_state_ids = dict(prev_state_ids) prev_state_ids.update({k: a.event_id for k, a in iteritems(auth_events)}) # create a new state group as a delta from the existing one. prev_group = context.state_group - state_group = yield self.state_store.store_state_group( + state_group = await self.state_store.store_state_group( event.event_id, event.room_id, prev_group=prev_group, From 957129f4a7febacba64bc68fbcd7375db5156186 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:00:46 +0000 Subject: [PATCH 115/213] make FederationHandler.construct_auth_difference async --- synapse/handlers/federation.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea49f928e5..1d9084b326 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2425,8 +2425,9 @@ class FederationHandler(BaseHandler): delta_ids=state_updates, ) - @defer.inlineCallbacks - def construct_auth_difference(self, local_auth, remote_auth): + async def construct_auth_difference( + self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase] + ) -> Dict: """ Given a local and remote auth chain, find the differences. This assumes that we have already processed all events in remote_auth @@ -2535,7 +2536,7 @@ class FederationHandler(BaseHandler): reason_map = {} for e in base_remote_rejected: - reason = yield self.store.get_rejection_reason(e.event_id) + reason = await self.store.get_rejection_reason(e.event_id) if reason is None: # TODO: e is not in the current state, so we should # construct some proof of that. From 863087d18643d224f3b8eb4a511429cc8669a135 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:02:50 +0000 Subject: [PATCH 116/213] make FederationHandler.on_exchange_third_party_invite_request async --- synapse/handlers/federation.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1d9084b326..1eafc1bf1a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -65,7 +65,7 @@ from synapse.replication.http.federation import ( from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour -from synapse.types import StateMap, UserID, get_domain_from_id +from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination @@ -2621,33 +2621,31 @@ class FederationHandler(BaseHandler): destinations, room_id, event_dict ) - @defer.inlineCallbacks - @log_function - def on_exchange_third_party_invite_request(self, room_id, event_dict): + async def on_exchange_third_party_invite_request( + self, room_id: str, event_dict: JsonDict + ) -> None: """Handle an exchange_third_party_invite request from a remote server The remote server will call this when it wants to turn a 3pid invite into a normal m.room.member invite. Args: - room_id (str): The ID of the room. + room_id: The ID of the room. event_dict (dict[str, Any]): Dictionary containing the event body. - Returns: - Deferred: resolves (to None) """ - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) # NB: event_dict has a particular specced format we might need to fudge # if we change event formats too much. builder = self.event_builder_factory.new(room_version, event_dict) - event, context = yield self.event_creation_handler.create_new_client_event( + event, context = await self.event_creation_handler.create_new_client_event( builder=builder ) - event_allowed = yield self.third_party_event_rules.check_event_allowed( + event_allowed = await self.third_party_event_rules.check_event_allowed( event, context ) if not event_allowed: @@ -2658,16 +2656,16 @@ class FederationHandler(BaseHandler): 403, "This event is not allowed in this context", Codes.FORBIDDEN ) - event, context = yield self.add_display_name_to_third_party_invite( + event, context = await self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) try: - yield self.auth.check_from_context(room_version, event, context) + await self.auth.check_from_context(room_version, event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e - yield self._check_signature(event, context) + await self._check_signature(event, context) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. @@ -2675,7 +2673,7 @@ class FederationHandler(BaseHandler): # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() - yield member_handler.send_membership_event(None, event, context) + await member_handler.send_membership_event(None, event, context) @defer.inlineCallbacks def add_display_name_to_third_party_invite( From 94f7b4cd54612f9f1a67b3090f7b249ac20a0c76 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:06:46 +0000 Subject: [PATCH 117/213] make FederationHandler.on_event_auth async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1eafc1bf1a..4e9f240e14 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1199,13 +1199,12 @@ class FederationHandler(BaseHandler): return pdu - @defer.inlineCallbacks - def on_event_auth(self, event_id): - event = yield self.store.get_event(event_id) - auth = yield self.store.get_auth_chain( + async def on_event_auth(self, event_id: str) -> List[EventBase]: + event = await self.store.get_event(event_id) + auth = await self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) - return [e for e in auth] + return list(auth) @log_function @defer.inlineCallbacks From ebd6a15af3994d803b8deb2172dbd0deaeb59915 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:13:13 +0000 Subject: [PATCH 118/213] make FederationHandler.do_invite_join async --- synapse/handlers/federation.py | 30 ++++++++++++++---------------- synapse/handlers/room_member.py | 6 ++++-- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4e9f240e14..127dc0fc02 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1206,9 +1206,9 @@ class FederationHandler(BaseHandler): ) return list(auth) - @log_function - @defer.inlineCallbacks - def do_invite_join(self, target_hosts, room_id, joinee, content): + async def do_invite_join( + self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict + ) -> None: """ Attempts to join the `joinee` to the room `room_id` via the servers contained in `target_hosts`. @@ -1221,17 +1221,17 @@ class FederationHandler(BaseHandler): have finished processing the join. Args: - target_hosts (Iterable[str]): List of servers to attempt to join the room with. + target_hosts: List of servers to attempt to join the room with. - room_id (str): The ID of the room to join. + room_id: The ID of the room to join. - joinee (str): The User ID of the joining user. + joinee: The User ID of the joining user. - content (dict): The event content to use for the join event. + content: The event content to use for the join event. """ logger.debug("Joining %s to %s", joinee, room_id) - origin, event, room_version_obj = yield self._make_and_verify_event( + origin, event, room_version_obj = await self._make_and_verify_event( target_hosts, room_id, joinee, @@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler): self.room_queues[room_id] = [] - yield self._clean_room_for_join(room_id) + await self._clean_room_for_join(room_id) handled_events = set() @@ -1261,7 +1261,7 @@ class FederationHandler(BaseHandler): pass event_format_version = room_version_obj.event_format - ret = yield self.federation_client.send_join( + ret = await self.federation_client.send_join( target_hosts, event, event_format_version ) @@ -1280,7 +1280,7 @@ class FederationHandler(BaseHandler): logger.debug("do_invite_join event: %s", event) try: - yield self.store.store_room( + await self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=False, @@ -1290,13 +1290,13 @@ class FederationHandler(BaseHandler): # FIXME pass - yield self._persist_auth_tree( + await self._persist_auth_tree( origin, auth_chain, state, event, room_version_obj ) # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information - predecessor = yield self.store.get_room_predecessor(room_id) + predecessor = await self.store.get_room_predecessor(room_id) if not predecessor or not isinstance(predecessor.get("room_id"), str): return old_room_id = predecessor["room_id"] @@ -1306,7 +1306,7 @@ class FederationHandler(BaseHandler): # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() - yield member_handler.transfer_room_state_on_room_upgrade( + await member_handler.transfer_room_state_on_room_upgrade( old_room_id, room_id ) @@ -1323,8 +1323,6 @@ class FederationHandler(BaseHandler): run_in_background(self._handle_queued_pdus, room_queue) - return True - async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 15e8aa5249..ce8150db6e 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -944,8 +944,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): # join dance for now, since we're kinda implicitly checking # that we are allowed to join when we decide whether or not we # need to do the invite/join dance. - yield self.federation_handler.do_invite_join( - remote_room_hosts, room_id, user.to_string(), content + yield defer.ensureDeferred( + self.federation_handler.do_invite_join( + remote_room_hosts, room_id, user.to_string(), content + ) ) yield self._user_joined_room(user, room_id) From dbdf843012847536f5e87f66efd48174ad348be3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:14:58 +0000 Subject: [PATCH 119/213] make FederationHandler._persist_auth_tree async --- synapse/handlers/federation.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 127dc0fc02..7bc5632f4d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1852,15 +1852,14 @@ class FederationHandler(BaseHandler): backfilled=backfilled, ) - @defer.inlineCallbacks - def _persist_auth_tree( + async def _persist_auth_tree( self, origin: str, auth_events: List[EventBase], state: List[EventBase], event: EventBase, room_version: RoomVersion, - ): + ) -> None: """Checks the auth chain is valid (and passes auth checks) for the state and event. Then persists the auth chain and state atomically. Persists the event separately. Notifies about the persisted events @@ -1875,14 +1874,11 @@ class FederationHandler(BaseHandler): event room_version: The room version we expect this room to have, and will raise if it doesn't match the version in the create event. - - Returns: - Deferred """ events_to_context = {} for e in itertools.chain(auth_events, state): e.internal_metadata.outlier = True - ctx = yield self.state_handler.compute_event_context(e) + ctx = await self.state_handler.compute_event_context(e) events_to_context[e.event_id] = ctx event_map = { @@ -1914,7 +1910,7 @@ class FederationHandler(BaseHandler): missing_auth_events.add(e_id) for e_id in missing_auth_events: - m_ev = yield self.federation_client.get_pdu( + m_ev = await self.federation_client.get_pdu( [origin], e_id, room_version=room_version.identifier, @@ -1950,18 +1946,18 @@ class FederationHandler(BaseHandler): raise events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR - yield self.persist_events_and_notify( + await self.persist_events_and_notify( [ (e, events_to_context[e.event_id]) for e in itertools.chain(auth_events, state) ] ) - new_event_context = yield self.state_handler.compute_event_context( + new_event_context = await self.state_handler.compute_event_context( event, old_state=state ) - yield self.persist_events_and_notify([(event, new_event_context)]) + await self.persist_events_and_notify([(event, new_event_context)]) async def _prep_event( self, From c3f296af32d0da0dc9e8797949ed95d9f4ca1c7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:16:31 +0000 Subject: [PATCH 120/213] make FederationHandler._check_for_soft_fail async --- synapse/handlers/federation.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7bc5632f4d..c4a291d1c0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1997,27 +1997,23 @@ class FederationHandler(BaseHandler): return context - @defer.inlineCallbacks - def _check_for_soft_fail( + async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool - ): - """Checks if we should soft fail the event, if so marks the event as + ) -> None: + """Checks if we should soft fail the event; if so, marks the event as such. Args: event state: The state at the event if we don't have all the event's prev events backfilled: Whether the event is from backfill - - Returns: - Deferred """ # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier() if do_soft_fail_check: - extrem_ids = yield self.store.get_latest_event_ids_in_room(event.room_id) + extrem_ids = await self.store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids) prev_event_ids = set(event.prev_event_ids()) @@ -2028,7 +2024,7 @@ class FederationHandler(BaseHandler): do_soft_fail_check = False if do_soft_fail_check: - room_version = yield self.store.get_room_version_id(event.room_id) + room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". @@ -2045,19 +2041,19 @@ class FederationHandler(BaseHandler): # given state at the event. This should correctly handle cases # like bans, especially with state res v2. - state_sets = yield self.state_store.get_state_groups( + state_sets = await self.state_store.get_state_groups( event.room_id, extrem_ids ) state_sets = list(state_sets.values()) state_sets.append(state) - current_state_ids = yield self.state_handler.resolve_events( + current_state_ids = await self.state_handler.resolve_events( room_version, state_sets, event ) current_state_ids = { k: e.event_id for k, e in iteritems(current_state_ids) } else: - current_state_ids = yield self.state_handler.get_current_state_ids( + current_state_ids = await self.state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids ) @@ -2073,7 +2069,7 @@ class FederationHandler(BaseHandler): e for k, e in iteritems(current_state_ids) if k in auth_types ] - current_auth_events = yield self.store.get_events(current_state_ids) + current_auth_events = await self.store.get_events(current_state_ids) current_auth_events = { (e.type, e.state_key): e for e in current_auth_events.values() } From 4286e429a7359cdeb670fdd63839d0b73836026b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:19:18 +0000 Subject: [PATCH 121/213] make FederationHandler.do_remotely_reject_invite async --- synapse/handlers/federation.py | 13 +++++++------ synapse/handlers/room_member.py | 6 ++++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c4a291d1c0..73ef130ace 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1531,9 +1531,10 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): - origin, event, room_version = yield self._make_and_verify_event( + async def do_remotely_reject_invite( + self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict + ) -> EventBase: + origin, event, room_version = await self._make_and_verify_event( target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not @@ -1549,10 +1550,10 @@ class FederationHandler(BaseHandler): except ValueError: pass - yield self.federation_client.send_leave(target_hosts, event) + await self.federation_client.send_leave(target_hosts, event) - context = yield self.state_handler.compute_event_context(event) - yield self.persist_events_and_notify([(event, context)]) + context = await self.state_handler.compute_event_context(event) + await self.persist_events_and_notify([(event, context)]) return event diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ce8150db6e..4260426369 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -984,8 +984,10 @@ class RoomMemberMasterHandler(RoomMemberHandler): """ fed_handler = self.federation_handler try: - ret = yield fed_handler.do_remotely_reject_invite( - remote_room_hosts, room_id, target.to_string(), content=content, + ret = yield defer.ensureDeferred( + fed_handler.do_remotely_reject_invite( + remote_room_hosts, room_id, target.to_string(), content=content, + ) ) return ret except Exception as e: From 3b7e0e002bad4ba39e9d4b4188fae0132571bded Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:22:30 +0000 Subject: [PATCH 122/213] make FederationHandler._make_and_verify_event async --- synapse/handlers/federation.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 73ef130ace..e5fa55b973 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1557,15 +1557,20 @@ class FederationHandler(BaseHandler): return event - @defer.inlineCallbacks - def _make_and_verify_event( - self, target_hosts, room_id, user_id, membership, content={}, params=None - ): + async def _make_and_verify_event( + self, + target_hosts: Iterable[str], + room_id: str, + user_id: str, + membership: str, + content: JsonDict = {}, + params: Optional[Dict[str, str]] = None, + ) -> Tuple[str, EventBase, RoomVersion]: ( origin, event, room_version, - ) = yield self.federation_client.make_membership_event( + ) = await self.federation_client.make_membership_event( target_hosts, room_id, user_id, membership, content, params=params ) From 05299599b61bd19805a5b7843b907b1b5954e1de Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:24:07 +0000 Subject: [PATCH 123/213] make FederationHandler.persist_events_and_notify async --- synapse/handlers/federation.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e5fa55b973..ec010c1f9f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2816,27 +2816,27 @@ class FederationHandler(BaseHandler): if "valid" not in response or not response["valid"]: raise AuthError(403, "Third party certificate was invalid") - @defer.inlineCallbacks - def persist_events_and_notify(self, event_and_contexts, backfilled=False): + async def persist_events_and_notify( + self, + event_and_contexts: Sequence[Tuple[EventBase, EventContext]], + backfilled: bool = False, + ) -> None: """Persists events and tells the notifier/pushers about them, if necessary. Args: - event_and_contexts(list[tuple[FrozenEvent, EventContext]]) - backfilled (bool): Whether these events are a result of + event_and_contexts: + backfilled: Whether these events are a result of backfilling or not - - Returns: - Deferred """ if self.config.worker_app: - yield self._send_events_to_master( + await self._send_events_to_master( store=self.store, event_and_contexts=event_and_contexts, backfilled=backfilled, ) else: - max_stream_id = yield self.storage.persistence.persist_events( + max_stream_id = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) @@ -2847,7 +2847,7 @@ class FederationHandler(BaseHandler): if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - yield self._notify_persisted_event(event, max_stream_id) + await self._notify_persisted_event(event, max_stream_id) def _notify_persisted_event(self, event, max_stream_id): """Checks to see if notifier/pushers should be notified about the From 814cc00cb96168479a5c462e9078f3b60901589d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:25:41 +0000 Subject: [PATCH 124/213] make FederationHandler._notify_persisted_event async --- synapse/handlers/federation.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ec010c1f9f..19cd55b8cf 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2849,13 +2849,15 @@ class FederationHandler(BaseHandler): for event, _ in event_and_contexts: await self._notify_persisted_event(event, max_stream_id) - def _notify_persisted_event(self, event, max_stream_id): + async def _notify_persisted_event( + self, event: EventBase, max_stream_id: int + ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. Args: - event (FrozenEvent) - max_stream_id (int): The max_stream_id returned by persist_events + event: + max_stream_id: The max_stream_id returned by persist_events """ extra_users = [] @@ -2879,7 +2881,7 @@ class FederationHandler(BaseHandler): event, event_stream_id, max_stream_id, extra_users=extra_users ) - return self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) def _clean_room_for_join(self, room_id): """Called to clean up any data in DB for a given room, ready for the From 52642860dabac96467d7a7947976c53eb5dc4c82 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:27:05 +0000 Subject: [PATCH 125/213] make FederationHandler._clean_room_for_join async --- synapse/handlers/federation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 19cd55b8cf..e252e69888 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2883,17 +2883,17 @@ class FederationHandler(BaseHandler): await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) - def _clean_room_for_join(self, room_id): + async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the server to join the room. Args: - room_id (str) + room_id """ if self.config.worker_app: - return self._clean_room_for_join_client(room_id) + await self._clean_room_for_join_client(room_id) else: - return self.store.clean_room_for_join(room_id) + await self.store.clean_room_for_join(room_id) def user_joined_room(self, user, room_id): """Called when a new user has joined the room From f64c96662ed794bbdd8960002658602a1b92eb2b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:28:31 +0000 Subject: [PATCH 126/213] make FederationHandler.user_joined_room async --- synapse/handlers/federation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e252e69888..c94573b547 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2895,15 +2895,15 @@ class FederationHandler(BaseHandler): else: await self.store.clean_room_for_join(room_id) - def user_joined_room(self, user, room_id): + async def user_joined_room(self, user: UserID, room_id: str) -> None: """Called when a new user has joined the room """ if self.config.worker_app: - return self._notify_user_membership_change( + await self._notify_user_membership_change( room_id=room_id, user_id=user.to_string(), change="joined" ) else: - return defer.succeed(user_joined_room(self.distributor, user, room_id)) + user_joined_room(self.distributor, user, room_id) @defer.inlineCallbacks def get_room_complexity(self, remote_room_hosts, room_id): From e49eb1a886c6f139887b1e71f8234e02e738a84a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 16:30:21 +0000 Subject: [PATCH 127/213] changelog --- changelog.d/6837.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6837.misc diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc new file mode 100644 index 0000000000..0496f12de8 --- /dev/null +++ b/changelog.d/6837.misc @@ -0,0 +1 @@ +Port much of `synapse.handlers.federation` to async/await. From ae5b3104f0023171b2bb89f08a066e5974ee7666 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Feb 2020 17:10:54 +0000 Subject: [PATCH 128/213] Fix stacktraces when using ObservableDeferred and async/await (#6836) --- changelog.d/6836.misc | 1 + synapse/util/async_helpers.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6836.misc diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc new file mode 100644 index 0000000000..232488e1e5 --- /dev/null +++ b/changelog.d/6836.misc @@ -0,0 +1 @@ +Fix stacktraces when using `ObservableDeferred` and async/await. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 04b6abdc24..581dffd8a0 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -73,6 +73,10 @@ class ObservableDeferred(object): def errback(f): object.__setattr__(self, "_result", (False, f)) while self._observers: + # This is a little bit of magic to correctly propagate stack + # traces when we `await` on one of the observer deferreds. + f.value.__failure__ = f + try: # TODO: Handle errors here. self._observers.pop().errback(f) From b9391c957572224c3a7c22870102fcbd24dea4e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Feb 2020 18:05:44 +0000 Subject: [PATCH 129/213] Add typing to SyncHandler (#6821) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/6821.misc | 1 + synapse/events/__init__.py | 18 +- synapse/handlers/sync.py | 697 +++++++++++++++++--------------- tests/storage/test_redaction.py | 5 +- tox.ini | 1 + 5 files changed, 377 insertions(+), 345 deletions(-) create mode 100644 changelog.d/6821.misc diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc new file mode 100644 index 0000000000..1d5265d5e2 --- /dev/null +++ b/changelog.d/6821.misc @@ -0,0 +1 @@ +Add type hints to `SyncHandler`. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index f813fa2fe7..92f76703b3 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -189,8 +189,14 @@ class EventBase(object): redacts = _event_dict_property("redacts", None) room_id = _event_dict_property("room_id") sender = _event_dict_property("sender") + state_key = _event_dict_property("state_key") + type = _event_dict_property("type") user_id = _event_dict_property("sender") + @property + def event_id(self) -> str: + raise NotImplementedError() + @property def membership(self): return self.content["membership"] @@ -281,10 +287,7 @@ class FrozenEvent(EventBase): else: frozen_dict = event_dict - self.event_id = event_dict["event_id"] - self.type = event_dict["type"] - if "state_key" in event_dict: - self.state_key = event_dict["state_key"] + self._event_id = event_dict["event_id"] super(FrozenEvent, self).__init__( frozen_dict, @@ -294,6 +297,10 @@ class FrozenEvent(EventBase): rejected_reason=rejected_reason, ) + @property + def event_id(self) -> str: + return self._event_id + def __str__(self): return self.__repr__() @@ -332,9 +339,6 @@ class FrozenEventV2(EventBase): frozen_dict = event_dict self._event_id = None - self.type = event_dict["type"] - if "state_key" in event_dict: - self.state_key = event_dict["state_key"] super(FrozenEventV2, self).__init__( frozen_dict, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd95f85e3f..5f060241b4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -14,20 +14,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections import itertools import logging +from typing import Any, Dict, FrozenSet, List, Optional, Set, Tuple from six import iteritems, itervalues +import attr from prometheus_client import Counter from synapse.api.constants import EventTypes, Membership +from synapse.api.filtering import FilterCollection +from synapse.events import EventBase from synapse.logging.context import LoggingContext from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter -from synapse.types import RoomStreamToken +from synapse.types import ( + Collection, + JsonDict, + RoomStreamToken, + StateMap, + StreamToken, + UserID, +) from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache @@ -62,17 +72,22 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000 LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100 -SyncConfig = collections.namedtuple( - "SyncConfig", ["user", "filter_collection", "is_guest", "request_key", "device_id"] -) +@attr.s(slots=True, frozen=True) +class SyncConfig: + user = attr.ib(type=UserID) + filter_collection = attr.ib(type=FilterCollection) + is_guest = attr.ib(type=bool) + request_key = attr.ib(type=Tuple[Any, ...]) + device_id = attr.ib(type=str) -class TimelineBatch( - collections.namedtuple("TimelineBatch", ["prev_batch", "events", "limited"]) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class TimelineBatch: + prev_batch = attr.ib(type=StreamToken) + events = attr.ib(type=List[EventBase]) + limited = attr.ib(bool) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -81,23 +96,17 @@ class TimelineBatch( __bool__ = __nonzero__ # python3 -class JoinedSyncResult( - collections.namedtuple( - "JoinedSyncResult", - [ - "room_id", # str - "timeline", # TimelineBatch - "state", # dict[(str, str), FrozenEvent] - "ephemeral", - "account_data", - "unread_notifications", - "summary", - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class JoinedSyncResult: + room_id = attr.ib(type=str) + timeline = attr.ib(type=TimelineBatch) + state = attr.ib(type=StateMap[EventBase]) + ephemeral = attr.ib(type=List[JsonDict]) + account_data = attr.ib(type=List[JsonDict]) + unread_notifications = attr.ib(type=JsonDict) + summary = attr.ib(type=Optional[JsonDict]) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -113,20 +122,14 @@ class JoinedSyncResult( __bool__ = __nonzero__ # python3 -class ArchivedSyncResult( - collections.namedtuple( - "ArchivedSyncResult", - [ - "room_id", # str - "timeline", # TimelineBatch - "state", # dict[(str, str), FrozenEvent] - "account_data", - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class ArchivedSyncResult: + room_id = attr.ib(type=str) + timeline = attr.ib(type=TimelineBatch) + state = attr.ib(type=StateMap[EventBase]) + account_data = attr.ib(type=List[JsonDict]) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if room needs to be part of the sync result. """ @@ -135,70 +138,88 @@ class ArchivedSyncResult( __bool__ = __nonzero__ # python3 -class InvitedSyncResult( - collections.namedtuple( - "InvitedSyncResult", - ["room_id", "invite"], # str # FrozenEvent: the invite event - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class InvitedSyncResult: + room_id = attr.ib(type=str) + invite = attr.ib(type=EventBase) - def __nonzero__(self): + def __nonzero__(self) -> bool: """Invited rooms should always be reported to the client""" return True __bool__ = __nonzero__ # python3 -class GroupsSyncResult( - collections.namedtuple("GroupsSyncResult", ["join", "invite", "leave"]) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class GroupsSyncResult: + join = attr.ib(type=JsonDict) + invite = attr.ib(type=JsonDict) + leave = attr.ib(type=JsonDict) - def __nonzero__(self): + def __nonzero__(self) -> bool: return bool(self.join or self.invite or self.leave) __bool__ = __nonzero__ # python3 -class DeviceLists( - collections.namedtuple( - "DeviceLists", - [ - "changed", # list of user_ids whose devices may have changed - "left", # list of user_ids whose devices we no longer track - ], - ) -): - __slots__ = [] +@attr.s(slots=True, frozen=True) +class DeviceLists: + """ + Attributes: + changed: List of user_ids whose devices may have changed + left: List of user_ids whose devices we no longer track + """ - def __nonzero__(self): + changed = attr.ib(type=Collection[str]) + left = attr.ib(type=Collection[str]) + + def __nonzero__(self) -> bool: return bool(self.changed or self.left) __bool__ = __nonzero__ # python3 -class SyncResult( - collections.namedtuple( - "SyncResult", - [ - "next_batch", # Token for the next sync - "presence", # List of presence events for the user. - "account_data", # List of account_data events for the user. - "joined", # JoinedSyncResult for each joined room. - "invited", # InvitedSyncResult for each invited room. - "archived", # ArchivedSyncResult for each archived room. - "to_device", # List of direct messages for the device. - "device_lists", # List of user_ids whose devices have changed - "device_one_time_keys_count", # Dict of algorithm to count for one time keys - # for this device - "groups", - ], - ) -): - __slots__ = [] +@attr.s +class _RoomChanges: + """The set of room entries to include in the sync, plus the set of joined + and left room IDs since last sync. + """ - def __nonzero__(self): + room_entries = attr.ib(type=List["RoomSyncResultBuilder"]) + invited = attr.ib(type=List[InvitedSyncResult]) + newly_joined_rooms = attr.ib(type=List[str]) + newly_left_rooms = attr.ib(type=List[str]) + + +@attr.s(slots=True, frozen=True) +class SyncResult: + """ + Attributes: + next_batch: Token for the next sync + presence: List of presence events for the user. + account_data: List of account_data events for the user. + joined: JoinedSyncResult for each joined room. + invited: InvitedSyncResult for each invited room. + archived: ArchivedSyncResult for each archived room. + to_device: List of direct messages for the device. + device_lists: List of user_ids whose devices have changed + device_one_time_keys_count: Dict of algorithm to count for one time keys + for this device + groups: Group updates, if any + """ + + next_batch = attr.ib(type=StreamToken) + presence = attr.ib(type=List[JsonDict]) + account_data = attr.ib(type=List[JsonDict]) + joined = attr.ib(type=List[JoinedSyncResult]) + invited = attr.ib(type=List[InvitedSyncResult]) + archived = attr.ib(type=List[ArchivedSyncResult]) + to_device = attr.ib(type=List[JsonDict]) + device_lists = attr.ib(type=DeviceLists) + device_one_time_keys_count = attr.ib(type=JsonDict) + groups = attr.ib(type=Optional[GroupsSyncResult]) + + def __nonzero__(self) -> bool: """Make the result appear empty if there are no updates. This is used to tell if the notifier needs to wait for more events when polling for events. @@ -240,13 +261,15 @@ class SyncHandler(object): ) async def wait_for_sync_for_user( - self, sync_config, since_token=None, timeout=0, full_state=False - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + timeout: int = 0, + full_state: bool = False, + ) -> SyncResult: """Get the sync for a client if we have new data for it now. Otherwise wait for new data to arrive on the server. If the timeout expires, then return an empty sync result. - Returns: - Deferred[SyncResult] """ # If the user is not part of the mau group, then check that limits have # not been exceeded (if not part of the group by this point, almost certain @@ -265,8 +288,12 @@ class SyncHandler(object): return res async def _wait_for_sync_for_user( - self, sync_config, since_token, timeout, full_state - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + timeout: int = 0, + full_state: bool = False, + ) -> SyncResult: if since_token is None: sync_type = "initial_sync" elif full_state: @@ -305,25 +332,33 @@ class SyncHandler(object): return result - def current_sync_for_user(self, sync_config, since_token=None, full_state=False): + async def current_sync_for_user( + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + full_state: bool = False, + ) -> SyncResult: """Get the sync for client needed to match what the server has now. - Returns: - A Deferred SyncResult. """ - return self.generate_sync_result(sync_config, since_token, full_state) + return await self.generate_sync_result(sync_config, since_token, full_state) - async def push_rules_for_user(self, user): + async def push_rules_for_user(self, user: UserID) -> JsonDict: user_id = user.to_string() rules = await self.store.get_push_rules_for_user(user_id) rules = format_push_rules_for_user(user, rules) return rules - async def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): + async def ephemeral_by_room( + self, + sync_result_builder: "SyncResultBuilder", + now_token: StreamToken, + since_token: Optional[StreamToken] = None, + ) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]: """Get the ephemeral events for each room the user is in Args: - sync_result_builder(SyncResultBuilder) - now_token (StreamToken): Where the server is currently up to. - since_token (StreamToken): Where the server was when the client + sync_result_builder + now_token: Where the server is currently up to. + since_token: Where the server was when the client last synced. Returns: A tuple of the now StreamToken, updated to reflect the which typing @@ -348,7 +383,7 @@ class SyncHandler(object): ) now_token = now_token.copy_and_replace("typing_key", typing_key) - ephemeral_by_room = {} + ephemeral_by_room = {} # type: JsonDict for event in typing: # we want to exclude the room_id from the event, but modifying the @@ -380,13 +415,13 @@ class SyncHandler(object): async def _load_filtered_recents( self, - room_id, - sync_config, - now_token, - since_token=None, - recents=None, - newly_joined_room=False, - ): + room_id: str, + sync_config: SyncConfig, + now_token: StreamToken, + since_token: Optional[StreamToken] = None, + potential_recents: Optional[List[EventBase]] = None, + newly_joined_room: bool = False, + ) -> TimelineBatch: """ Returns: a Deferred TimelineBatch @@ -397,21 +432,29 @@ class SyncHandler(object): sync_config.filter_collection.blocks_all_room_timeline() ) - if recents is None or newly_joined_room or timeline_limit < len(recents): + if ( + potential_recents is None + or newly_joined_room + or timeline_limit < len(potential_recents) + ): limited = True else: limited = False - if recents: - recents = sync_config.filter_collection.filter_room_timeline(recents) + if potential_recents: + recents = sync_config.filter_collection.filter_room_timeline( + potential_recents + ) # We check if there are any state events, if there are then we pass # all current state events to the filter_events function. This is to # ensure that we always include current state in the timeline - current_state_ids = frozenset() + current_state_ids = frozenset() # type: FrozenSet[str] if any(e.is_state() for e in recents): - current_state_ids = await self.state.get_current_state_ids(room_id) - current_state_ids = frozenset(itervalues(current_state_ids)) + current_state_ids_map = await self.state.get_current_state_ids( + room_id + ) + current_state_ids = frozenset(itervalues(current_state_ids_map)) recents = await filter_events_for_client( self.storage, @@ -463,8 +506,10 @@ class SyncHandler(object): # ensure that we always include current state in the timeline current_state_ids = frozenset() if any(e.is_state() for e in loaded_recents): - current_state_ids = await self.state.get_current_state_ids(room_id) - current_state_ids = frozenset(itervalues(current_state_ids)) + current_state_ids_map = await self.state.get_current_state_ids( + room_id + ) + current_state_ids = frozenset(itervalues(current_state_ids_map)) loaded_recents = await filter_events_for_client( self.storage, @@ -493,17 +538,15 @@ class SyncHandler(object): limited=limited or newly_joined_room, ) - async def get_state_after_event(self, event, state_filter=StateFilter.all()): + async def get_state_after_event( + self, event: EventBase, state_filter: StateFilter = StateFilter.all() + ) -> StateMap[str]: """ Get the room state after the given event Args: - event(synapse.events.EventBase): event of interest - state_filter (StateFilter): The state filter used to fetch state - from the database. - - Returns: - A Deferred map from ((type, state_key)->Event) + event: event of interest + state_filter: The state filter used to fetch state from the database. """ state_ids = await self.state_store.get_state_ids_for_event( event.event_id, state_filter=state_filter @@ -514,18 +557,17 @@ class SyncHandler(object): return state_ids async def get_state_at( - self, room_id, stream_position, state_filter=StateFilter.all() - ): + self, + room_id: str, + stream_position: StreamToken, + state_filter: StateFilter = StateFilter.all(), + ) -> StateMap[str]: """ Get the room state at a particular stream position Args: - room_id(str): room for which to get state - stream_position(StreamToken): point at which to get state - state_filter (StateFilter): The state filter used to fetch state - from the database. - - Returns: - A Deferred map from ((type, state_key)->Event) + room_id: room for which to get state + stream_position: point at which to get state + state_filter: The state filter used to fetch state from the database. """ # FIXME this claims to get the state at a stream position, but # get_recent_events_for_room operates by topo ordering. This therefore @@ -546,23 +588,25 @@ class SyncHandler(object): state = {} return state - async def compute_summary(self, room_id, sync_config, batch, state, now_token): + async def compute_summary( + self, + room_id: str, + sync_config: SyncConfig, + batch: TimelineBatch, + state: StateMap[EventBase], + now_token: StreamToken, + ) -> Optional[JsonDict]: """ Works out a room summary block for this room, summarising the number of joined members in the room, and providing the 'hero' members if the room has no name so clients can consistently name rooms. Also adds state events to 'state' if needed to describe the heroes. - Args: - room_id(str): - sync_config(synapse.handlers.sync.SyncConfig): - batch(synapse.handlers.sync.TimelineBatch): The timeline batch for - the room that will be sent to the user. - state(dict): dict of (type, state_key) -> Event as returned by - compute_state_delta - now_token(str): Token of the end of the current batch. - - Returns: - A deferred dict describing the room summary + Args + room_id + sync_config + batch: The timeline batch for the room that will be sent to the user. + state: State as returned by compute_state_delta + now_token: Token of the end of the current batch. """ # FIXME: we could/should get this from room_stats when matthew/stats lands @@ -681,7 +725,7 @@ class SyncHandler(object): return summary - def get_lazy_loaded_members_cache(self, cache_key): + def get_lazy_loaded_members_cache(self, cache_key: Tuple[str, str]) -> LruCache: cache = self.lazy_loaded_members_cache.get(cache_key) if cache is None: logger.debug("creating LruCache for %r", cache_key) @@ -692,23 +736,24 @@ class SyncHandler(object): return cache async def compute_state_delta( - self, room_id, batch, sync_config, since_token, now_token, full_state - ): + self, + room_id: str, + batch: TimelineBatch, + sync_config: SyncConfig, + since_token: Optional[StreamToken], + now_token: StreamToken, + full_state: bool, + ) -> StateMap[EventBase]: """ Works out the difference in state between the start of the timeline and the previous sync. Args: - room_id(str): - batch(synapse.handlers.sync.TimelineBatch): The timeline batch for - the room that will be sent to the user. - sync_config(synapse.handlers.sync.SyncConfig): - since_token(str|None): Token of the end of the previous batch. May - be None. - now_token(str): Token of the end of the current batch. - full_state(bool): Whether to force returning the full state. - - Returns: - A deferred dict of (type, state_key) -> Event + room_id: + batch: The timeline batch for the room that will be sent to the user. + sync_config: + since_token: Token of the end of the previous batch. May be None. + now_token: Token of the end of the current batch. + full_state: Whether to force returning the full state. """ # TODO(mjark) Check if the state events were received by the server # after the previous sync, since we need to include those state @@ -800,6 +845,10 @@ class SyncHandler(object): # about them). state_filter = StateFilter.all() + # If this is an initial sync then full_state should be set, and + # that case is handled above. We assert here to ensure that this + # is indeed the case. + assert since_token is not None state_at_previous_sync = await self.get_state_at( room_id, stream_position=since_token, state_filter=state_filter ) @@ -874,7 +923,7 @@ class SyncHandler(object): if t[0] == EventTypes.Member: cache.set(t[1], event_id) - state = {} + state = {} # type: Dict[str, EventBase] if state_ids: state = await self.store.get_events(list(state_ids.values())) @@ -885,7 +934,9 @@ class SyncHandler(object): ) } - async def unread_notifs_for_room_id(self, room_id, sync_config): + async def unread_notifs_for_room_id( + self, room_id: str, sync_config: SyncConfig + ) -> Optional[Dict[str, str]]: with Measure(self.clock, "unread_notifs_for_room_id"): last_unread_event_id = await self.store.get_last_receipt_event_id_for_user( user_id=sync_config.user.to_string(), @@ -893,7 +944,6 @@ class SyncHandler(object): receipt_type="m.read", ) - notifs = [] if last_unread_event_id: notifs = await self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id @@ -905,17 +955,12 @@ class SyncHandler(object): return None async def generate_sync_result( - self, sync_config, since_token=None, full_state=False - ): + self, + sync_config: SyncConfig, + since_token: Optional[StreamToken] = None, + full_state: bool = False, + ) -> SyncResult: """Generates a sync result. - - Args: - sync_config (SyncConfig) - since_token (StreamToken) - full_state (bool) - - Returns: - Deferred(SyncResult) """ # NB: The now_token gets changed by some of the generate_sync_* methods, # this is due to some of the underlying streams not supporting the ability @@ -977,7 +1022,7 @@ class SyncHandler(object): ) device_id = sync_config.device_id - one_time_key_counts = {} + one_time_key_counts = {} # type: JsonDict if device_id: one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id @@ -1007,7 +1052,9 @@ class SyncHandler(object): ) @measure_func("_generate_sync_entry_for_groups") - async def _generate_sync_entry_for_groups(self, sync_result_builder): + async def _generate_sync_entry_for_groups( + self, sync_result_builder: "SyncResultBuilder" + ) -> None: user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token @@ -1052,27 +1099,22 @@ class SyncHandler(object): @measure_func("_generate_sync_entry_for_device_list") async def _generate_sync_entry_for_device_list( self, - sync_result_builder, - newly_joined_rooms, - newly_joined_or_invited_users, - newly_left_rooms, - newly_left_users, - ): + sync_result_builder: "SyncResultBuilder", + newly_joined_rooms: Set[str], + newly_joined_or_invited_users: Set[str], + newly_left_rooms: Set[str], + newly_left_users: Set[str], + ) -> DeviceLists: """Generate the DeviceLists section of sync Args: - sync_result_builder (SyncResultBuilder) - newly_joined_rooms (set[str]): Set of rooms user has joined since + sync_result_builder + newly_joined_rooms: Set of rooms user has joined since previous sync + newly_joined_or_invited_users: Set of users that have joined or + been invited to a room since previous sync. + newly_left_rooms: Set of rooms user has left since previous sync + newly_left_users: Set of users that have left a room we're in since previous sync - newly_joined_or_invited_users (set[str]): Set of users that have - joined or been invited to a room since previous sync. - newly_left_rooms (set[str]): Set of rooms user has left since - previous sync - newly_left_users (set[str]): Set of users that have left a room - we're in since previous sync - - Returns: - Deferred[DeviceLists] """ user_id = sync_result_builder.sync_config.user.to_string() @@ -1133,15 +1175,11 @@ class SyncHandler(object): else: return DeviceLists(changed=[], left=[]) - async def _generate_sync_entry_for_to_device(self, sync_result_builder): + async def _generate_sync_entry_for_to_device( + self, sync_result_builder: "SyncResultBuilder" + ) -> None: """Generates the portion of the sync response. Populates `sync_result_builder` with the result. - - Args: - sync_result_builder(SyncResultBuilder) - - Returns: - Deferred(dict): A dictionary containing the per room account data. """ user_id = sync_result_builder.sync_config.user.to_string() device_id = sync_result_builder.sync_config.device_id @@ -1179,15 +1217,17 @@ class SyncHandler(object): else: sync_result_builder.to_device = [] - async def _generate_sync_entry_for_account_data(self, sync_result_builder): + async def _generate_sync_entry_for_account_data( + self, sync_result_builder: "SyncResultBuilder" + ) -> Dict[str, Dict[str, JsonDict]]: """Generates the account data portion of the sync response. Populates `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) + sync_result_builder Returns: - Deferred(dict): A dictionary containing the per room account data. + A dictionary containing the per room account data. """ sync_config = sync_result_builder.sync_config user_id = sync_result_builder.sync_config.user.to_string() @@ -1231,18 +1271,21 @@ class SyncHandler(object): return account_data_by_room async def _generate_sync_entry_for_presence( - self, sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users - ): + self, + sync_result_builder: "SyncResultBuilder", + newly_joined_rooms: Set[str], + newly_joined_or_invited_users: Set[str], + ) -> None: """Generates the presence portion of the sync response. Populates the `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) - newly_joined_rooms(list): List of rooms that the user has joined - since the last sync (or empty if an initial sync) - newly_joined_or_invited_users(list): List of users that have joined - or been invited to rooms since the last sync (or empty if an initial - sync) + sync_result_builder + newly_joined_rooms: Set of rooms that the user has joined since + the last sync (or empty if an initial sync) + newly_joined_or_invited_users: Set of users that have joined or + been invited to rooms since the last sync (or empty if an + initial sync) """ now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config @@ -1286,17 +1329,19 @@ class SyncHandler(object): sync_result_builder.presence = presence async def _generate_sync_entry_for_rooms( - self, sync_result_builder, account_data_by_room - ): + self, + sync_result_builder: "SyncResultBuilder", + account_data_by_room: Dict[str, Dict[str, JsonDict]], + ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. Args: - sync_result_builder(SyncResultBuilder) - account_data_by_room(dict): Dictionary of per room account data + sync_result_builder + account_data_by_room: Dictionary of per room account data Returns: - Deferred(tuple): Returns a 4-tuple of + Returns a 4-tuple of `(newly_joined_rooms, newly_joined_or_invited_users, newly_left_rooms, newly_left_users)` """ @@ -1307,7 +1352,7 @@ class SyncHandler(object): ) if block_all_room_ephemeral: - ephemeral_by_room = {} + ephemeral_by_room = {} # type: Dict[str, List[JsonDict]] else: now_token, ephemeral_by_room = await self.ephemeral_by_room( sync_result_builder, @@ -1328,7 +1373,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - return [], [], [], [] + return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1340,19 +1385,22 @@ class SyncHandler(object): ignored_users = frozenset() if since_token: - res = await self._get_rooms_changed(sync_result_builder, ignored_users) - room_entries, invited, newly_joined_rooms, newly_left_rooms = res - + room_changes = await self._get_rooms_changed( + sync_result_builder, ignored_users + ) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: - res = await self._get_all_rooms(sync_result_builder, ignored_users) - room_entries, invited, newly_joined_rooms = res - newly_left_rooms = [] + room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) tags_by_room = await self.store.get_tags_for_user(user_id) + room_entries = room_changes.room_entries + invited = room_changes.invited + newly_joined_rooms = room_changes.newly_joined_rooms + newly_left_rooms = room_changes.newly_left_rooms + def handle_room_entries(room_entry): return self._generate_room_entry( sync_result_builder, @@ -1392,13 +1440,15 @@ class SyncHandler(object): newly_left_users -= newly_joined_or_invited_users return ( - newly_joined_rooms, + set(newly_joined_rooms), newly_joined_or_invited_users, - newly_left_rooms, + set(newly_left_rooms), newly_left_users, ) - async def _have_rooms_changed(self, sync_result_builder): + async def _have_rooms_changed( + self, sync_result_builder: "SyncResultBuilder" + ) -> bool: """Returns whether there may be any new events that should be sent down the sync. Returns True if there are. """ @@ -1422,22 +1472,10 @@ class SyncHandler(object): return True return False - async def _get_rooms_changed(self, sync_result_builder, ignored_users): + async def _get_rooms_changed( + self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + ) -> _RoomChanges: """Gets the the changes that have happened since the last sync. - - Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. - - Returns: - Deferred(tuple): Returns a tuple of the form: - `(room_entries, invited_rooms, newly_joined_rooms, newly_left_rooms)` - - where: - room_entries is a list [RoomSyncResultBuilder] - invited_rooms is a list [InvitedSyncResult] - newly_joined_rooms is a list[str] of room ids - newly_left_rooms is a list[str] of room ids """ user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token @@ -1451,7 +1489,7 @@ class SyncHandler(object): user_id, since_token.room_key, now_token.room_key ) - mem_change_events_by_room_id = {} + mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]] for event in rooms_changed: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) @@ -1570,7 +1608,7 @@ class SyncHandler(object): # This is all screaming out for a refactor, as the logic here is # subtle and the moving parts numerous. if leave_event.internal_metadata.is_out_of_band_membership(): - batch_events = [leave_event] + batch_events = [leave_event] # type: Optional[List[EventBase]] else: batch_events = None @@ -1636,18 +1674,17 @@ class SyncHandler(object): ) room_entries.append(entry) - return room_entries, invited, newly_joined_rooms, newly_left_rooms + return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms) - async def _get_all_rooms(self, sync_result_builder, ignored_users): + async def _get_all_rooms( + self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + ) -> _RoomChanges: """Returns entries for all rooms for the user. Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. + sync_result_builder + ignored_users: Set of users ignored by user. - Returns: - Deferred(tuple): Returns a tuple of the form: - `([RoomSyncResultBuilder], [InvitedSyncResult], [])` """ user_id = sync_result_builder.sync_config.user.to_string() @@ -1709,30 +1746,30 @@ class SyncHandler(object): ) ) - return room_entries, invited, [] + return _RoomChanges(room_entries, invited, [], []) async def _generate_room_entry( self, - sync_result_builder, - ignored_users, - room_builder, - ephemeral, - tags, - account_data, - always_include=False, + sync_result_builder: "SyncResultBuilder", + ignored_users: Set[str], + room_builder: "RoomSyncResultBuilder", + ephemeral: List[JsonDict], + tags: Optional[List[JsonDict]], + account_data: Dict[str, JsonDict], + always_include: bool = False, ): """Populates the `joined` and `archived` section of `sync_result_builder` based on the `room_builder`. Args: - sync_result_builder(SyncResultBuilder) - ignored_users(set(str)): Set of users ignored by user. - room_builder(RoomSyncResultBuilder) - ephemeral(list): List of new ephemeral events for room - tags(list): List of *all* tags for room, or None if there has been + sync_result_builder + ignored_users: Set of users ignored by user. + room_builder + ephemeral: List of new ephemeral events for room + tags: List of *all* tags for room, or None if there has been no change. - account_data(list): List of new account data for room - always_include(bool): Always include this room in the sync response, + account_data: List of new account data for room + always_include: Always include this room in the sync response, even if empty. """ newly_joined = room_builder.newly_joined @@ -1758,7 +1795,7 @@ class SyncHandler(object): sync_config, now_token=upto_token, since_token=since_token, - recents=events, + potential_recents=events, newly_joined_room=newly_joined, ) @@ -1809,7 +1846,7 @@ class SyncHandler(object): room_id, batch, sync_config, since_token, now_token, full_state=full_state ) - summary = {} + summary = {} # type: Optional[JsonDict] # we include a summary in room responses when we're lazy loading # members (as the client otherwise doesn't have enough info to form @@ -1833,7 +1870,7 @@ class SyncHandler(object): ) if room_builder.rtype == "joined": - unread_notifications = {} + unread_notifications = {} # type: Dict[str, str] room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -1860,18 +1897,20 @@ class SyncHandler(object): % (room_id, user_id, len(state)) ) elif room_builder.rtype == "archived": - room_sync = ArchivedSyncResult( + archived_room_sync = ArchivedSyncResult( room_id=room_id, timeline=batch, state=state, account_data=account_data_events, ) - if room_sync or always_include: - sync_result_builder.archived.append(room_sync) + if archived_room_sync or always_include: + sync_result_builder.archived.append(archived_room_sync) else: raise Exception("Unrecognized rtype: %r", room_builder.rtype) - async def get_rooms_for_user_at(self, user_id, stream_ordering): + async def get_rooms_for_user_at( + self, user_id: str, stream_ordering: int + ) -> FrozenSet[str]: """Get set of joined rooms for a user at the given stream ordering. The stream ordering *must* be recent, otherwise this may throw an @@ -1879,12 +1918,11 @@ class SyncHandler(object): current token, which should be perfectly fine). Args: - user_id (str) - stream_ordering (int) + user_id + stream_ordering ReturnValue: - Deferred[frozenset[str]]: Set of room_ids the user is in at given - stream_ordering. + Set of room_ids the user is in at given stream_ordering. """ joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id) @@ -1911,11 +1949,10 @@ class SyncHandler(object): if user_id in users_in_room: joined_room_ids.add(room_id) - joined_room_ids = frozenset(joined_room_ids) - return joined_room_ids + return frozenset(joined_room_ids) -def _action_has_highlight(actions): +def _action_has_highlight(actions: List[JsonDict]) -> bool: for action in actions: try: if action.get("set_tweak", None) == "highlight": @@ -1927,22 +1964,23 @@ def _action_has_highlight(actions): def _calculate_state( - timeline_contains, timeline_start, previous, current, lazy_load_members -): + timeline_contains: StateMap[str], + timeline_start: StateMap[str], + previous: StateMap[str], + current: StateMap[str], + lazy_load_members: bool, +) -> StateMap[str]: """Works out what state to include in a sync response. Args: - timeline_contains (dict): state in the timeline - timeline_start (dict): state at the start of the timeline - previous (dict): state at the end of the previous sync (or empty dict + timeline_contains: state in the timeline + timeline_start: state at the start of the timeline + previous: state at the end of the previous sync (or empty dict if this is an initial sync) - current (dict): state at the end of the timeline - lazy_load_members (bool): whether to return members from timeline_start + current: state at the end of the timeline + lazy_load_members: whether to return members from timeline_start or not. assumes that timeline_start has already been filtered to include only the members the client needs to know about. - - Returns: - dict """ event_id_to_key = { e: key @@ -1979,15 +2017,16 @@ def _calculate_state( return {event_id_to_key[e]: e for e in state_ids} -class SyncResultBuilder(object): +@attr.s +class SyncResultBuilder: """Used to help build up a new SyncResult for a user Attributes: - sync_config (SyncConfig) - full_state (bool) - since_token (StreamToken) - now_token (StreamToken) - joined_room_ids (list[str]) + sync_config + full_state: The full_state flag as specified by user + since_token: The token supplied by user, or None. + now_token: The token to sync up to. + joined_room_ids: List of rooms the user is joined to # The following mirror the fields in a sync response presence (list) @@ -1995,61 +2034,45 @@ class SyncResultBuilder(object): joined (list[JoinedSyncResult]) invited (list[InvitedSyncResult]) archived (list[ArchivedSyncResult]) - device (list) groups (GroupsSyncResult|None) to_device (list) """ - def __init__( - self, sync_config, full_state, since_token, now_token, joined_room_ids - ): - """ - Args: - sync_config (SyncConfig) - full_state (bool): The full_state flag as specified by user - since_token (StreamToken): The token supplied by user, or None. - now_token (StreamToken): The token to sync up to. - joined_room_ids (list[str]): List of rooms the user is joined to - """ - self.sync_config = sync_config - self.full_state = full_state - self.since_token = since_token - self.now_token = now_token - self.joined_room_ids = joined_room_ids + sync_config = attr.ib(type=SyncConfig) + full_state = attr.ib(type=bool) + since_token = attr.ib(type=Optional[StreamToken]) + now_token = attr.ib(type=StreamToken) + joined_room_ids = attr.ib(type=FrozenSet[str]) - self.presence = [] - self.account_data = [] - self.joined = [] - self.invited = [] - self.archived = [] - self.device = [] - self.groups = None - self.to_device = [] + presence = attr.ib(type=List[JsonDict], default=attr.Factory(list)) + account_data = attr.ib(type=List[JsonDict], default=attr.Factory(list)) + joined = attr.ib(type=List[JoinedSyncResult], default=attr.Factory(list)) + invited = attr.ib(type=List[InvitedSyncResult], default=attr.Factory(list)) + archived = attr.ib(type=List[ArchivedSyncResult], default=attr.Factory(list)) + groups = attr.ib(type=Optional[GroupsSyncResult], default=None) + to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list)) +@attr.s class RoomSyncResultBuilder(object): """Stores information needed to create either a `JoinedSyncResult` or `ArchivedSyncResult`. + + Attributes: + room_id + rtype: One of `"joined"` or `"archived"` + events: List of events to include in the room (more events may be added + when generating result). + newly_joined: If the user has newly joined the room + full_state: Whether the full state should be sent in result + since_token: Earliest point to return events from, or None + upto_token: Latest point to return events from. """ - def __init__( - self, room_id, rtype, events, newly_joined, full_state, since_token, upto_token - ): - """ - Args: - room_id(str) - rtype(str): One of `"joined"` or `"archived"` - events(list[FrozenEvent]): List of events to include in the room - (more events may be added when generating result). - newly_joined(bool): If the user has newly joined the room - full_state(bool): Whether the full state should be sent in result - since_token(StreamToken): Earliest point to return events from, or None - upto_token(StreamToken): Latest point to return events from. - """ - self.room_id = room_id - self.rtype = rtype - self.events = events - self.newly_joined = newly_joined - self.full_state = full_state - self.since_token = since_token - self.upto_token = upto_token + room_id = attr.ib(type=str) + rtype = attr.ib(type=str) + events = attr.ib(type=Optional[List[EventBase]]) + newly_joined = attr.ib(type=bool) + full_state = attr.ib(type=bool) + since_token = attr.ib(type=Optional[StreamToken]) + upto_token = attr.ib(type=StreamToken) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index feb1c07cb2..b9ee6ec1ec 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -238,8 +238,11 @@ class RedactionTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def build(self, prev_event_ids): built_event = yield self._base_builder.build(prev_event_ids) - built_event.event_id = self._event_id + + built_event._event_id = self._event_id built_event._event_dict["event_id"] = self._event_id + assert built_event.event_id == self._event_id + return built_event @property diff --git a/tox.ini b/tox.ini index 88ef12bebd..ef22368cf1 100644 --- a/tox.ini +++ b/tox.ini @@ -180,6 +180,7 @@ commands = mypy \ synapse/api \ synapse/config/ \ synapse/federation/transport \ + synapse/handlers/sync.py \ synapse/handlers/ui_auth \ synapse/logging/ \ synapse/module_api \ From e81c093974faeb8d39dcedcbda7c2e4f5683091f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 19:15:08 +0000 Subject: [PATCH 130/213] make FederationHandler.on_get_missing_events async --- synapse/handlers/federation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c94573b547..ea2f6a91d7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2121,24 +2121,23 @@ class FederationHandler(BaseHandler): return ret - @defer.inlineCallbacks - def on_get_missing_events( + async def on_get_missing_events( self, origin, room_id, earliest_events, latest_events, limit ): - in_room = yield self.auth.check_host_in_room(room_id, origin) + in_room = await self.auth.check_host_in_room(room_id, origin) if not in_room: raise AuthError(403, "Host not in room.") limit = min(limit, 20) - missing_events = yield self.store.get_missing_events( + missing_events = await self.store.get_missing_events( room_id=room_id, earliest_events=earliest_events, latest_events=latest_events, limit=limit, ) - missing_events = yield filter_events_for_server( + missing_events = await filter_events_for_server( self.storage, origin, missing_events ) From 5d17c3159618b6e75bb58ba68a77a73572a85688 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 22:28:11 +0000 Subject: [PATCH 131/213] make FederationHandler.send_invite async --- synapse/handlers/federation.py | 5 ++--- synapse/handlers/message.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea2f6a91d7..5728ea2ee7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1184,13 +1184,12 @@ class FederationHandler(BaseHandler): ) raise SynapseError(http_client.BAD_REQUEST, "Too many auth_events") - @defer.inlineCallbacks - def send_invite(self, target_host, event): + async def send_invite(self, target_host, event): """ Sends the invite to the remote server for signing. Invites must be signed by the invitee's server before distribution. """ - pdu = yield self.federation_client.send_invite( + pdu = await self.federation_client.send_invite( destination=target_host, room_id=event.room_id, event_id=event.event_id, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index bdf16c84d3..be6ae18a92 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -932,10 +932,9 @@ class EventCreationHandler(object): # way? If we have been invited by a remote server, we need # to get them to sign the event. - returned_invite = yield federation_handler.send_invite( - invitee.domain, event + returned_invite = yield defer.ensureDeferred( + federation_handler.send_invite(invitee.domain, event) ) - event.unsigned.pop("room_state", None) # TODO: Make sure the signatures actually are correct. From 0536d0c9beb15fce5f2ca24b4565f79f3140943f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:35:40 +0000 Subject: [PATCH 132/213] make FederationClient.backfill async --- synapse/federation/federation_client.py | 26 +++++++++++-------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f99d17a7de..297292f389 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable +from typing import Dict, Iterable, List, Tuple from prometheus_client import Counter @@ -37,7 +37,7 @@ from synapse.api.room_versions import ( EventFormatVersions, RoomVersions, ) -from synapse.events import builder, room_version_to_event_format +from synapse.events import EventBase, builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function @@ -170,21 +170,17 @@ class FederationClient(FederationBase): sent_queries_counter.labels("client_one_time_keys").inc() return self.transport_layer.claim_client_keys(destination, content, timeout) - @defer.inlineCallbacks - @log_function - def backfill(self, dest, room_id, limit, extremities): - """Requests some more historic PDUs for the given context from the + async def backfill( + self, dest: str, room_id: str, limit: int, extremities: Iterable[str] + ) -> List[EventBase]: + """Requests some more historic PDUs for the given room from the given destination server. Args: dest (str): The remote homeserver to ask. room_id (str): The room_id to backfill. - limit (int): The maximum number of PDUs to return. - extremities (list): List of PDU id and origins of the first pdus - we have seen from the context - - Returns: - Deferred: Results in the received PDUs. + limit (int): The maximum number of events to return. + extremities (list): our current backwards extremities, to backfill from """ logger.debug("backfill extrem=%s", extremities) @@ -192,13 +188,13 @@ class FederationClient(FederationBase): if not extremities: return - transaction_data = yield self.transport_layer.backfill( + transaction_data = await self.transport_layer.backfill( dest, room_id, extremities, limit ) logger.debug("backfill transaction_data=%r", transaction_data) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdus = [ @@ -207,7 +203,7 @@ class FederationClient(FederationBase): ] # FIXME: We should handle signature failures more gracefully. - pdus[:] = yield make_deferred_yieldable( + pdus[:] = await make_deferred_yieldable( defer.gatherResults( self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True ).addErrback(unwrapFirstError) From 0cb0c7bcd529b0dda92aff0dae6277dfcb6f6e29 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:41:54 +0000 Subject: [PATCH 133/213] make FederationClient.get_pdu async --- synapse/federation/federation_client.py | 32 +++++++++++++------------ 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 297292f389..b9c8d8e325 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable, List, Tuple +from typing import Dict, Iterable, List, Optional, Tuple from prometheus_client import Counter @@ -211,11 +211,14 @@ class FederationClient(FederationBase): return pdus - @defer.inlineCallbacks - @log_function - def get_pdu( - self, destinations, event_id, room_version, outlier=False, timeout=None - ): + async def get_pdu( + self, + destinations: Iterable[str], + event_id: str, + room_version: str, + outlier: bool = False, + timeout: Optional[int] = None, + ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home servers. @@ -223,18 +226,17 @@ class FederationClient(FederationBase): one succeeds. Args: - destinations (list): Which homeservers to query - event_id (str): event to fetch - room_version (str): version of the room - outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if + destinations: Which homeservers to query + event_id: event to fetch + room_version: version of the room + outlier: Indicates whether the PDU is an `outlier`, i.e. if it's from an arbitary point in the context as opposed to part of the current block of PDUs. Defaults to `False` - timeout (int): How long to try (in ms) each destination for before + timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. Returns: - Deferred: Results in the requested PDU, or None if we were unable to find - it. + The requested PDU, or None if we were unable to find it. """ # TODO: Rate limit the number of times we try and get the same event. @@ -255,7 +257,7 @@ class FederationClient(FederationBase): continue try: - transaction_data = yield self.transport_layer.get_event( + transaction_data = await self.transport_layer.get_event( destination, event_id, timeout=timeout ) @@ -275,7 +277,7 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = yield self._check_sigs_and_hash(room_version, pdu) + signed_pdu = await self._check_sigs_and_hash(room_version, pdu) break From d73683c363a9177951b8078dd1628c1ade8de508 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:42:52 +0000 Subject: [PATCH 134/213] make FederationClient.get_room_state_ids async --- synapse/federation/federation_client.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b9c8d8e325..c3e556e7d9 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -307,15 +307,16 @@ class FederationClient(FederationBase): return signed_pdu - @defer.inlineCallbacks - def get_room_state_ids(self, destination: str, room_id: str, event_id: str): + async def get_room_state_ids( + self, destination: str, room_id: str, event_id: str + ) -> Tuple[List[str], List[str]]: """Calls the /state_ids endpoint to fetch the state at a particular point in the room, and the auth events for the given event Returns: - Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids) + a tuple of (state event_ids, auth event_ids) """ - result = yield self.transport_layer.get_room_state_ids( + result = await self.transport_layer.get_room_state_ids( destination, room_id, event_id=event_id ) From 24d814ca238c40fef61c7826d1a9d9f9dd3a144d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:43:40 +0000 Subject: [PATCH 135/213] make FederationClient.get_event_auth async --- synapse/federation/federation_client.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c3e556e7d9..1da33a7a64 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -330,19 +330,17 @@ class FederationClient(FederationBase): return state_event_ids, auth_event_ids - @defer.inlineCallbacks - @log_function - def get_event_auth(self, destination, room_id, event_id): - res = yield self.transport_layer.get_event_auth(destination, room_id, event_id) + async def get_event_auth(self, destination, room_id, event_id): + res = await self.transport_layer.get_event_auth(destination, room_id, event_id) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) auth_chain = [ event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"] ] - signed_auth = yield self._check_sigs_and_hash_and_fetch( + signed_auth = await self._check_sigs_and_hash_and_fetch( destination, auth_chain, outlier=True, room_version=room_version ) From 3f11cbb40494f0ac9622a5b686d3cf8379a44b5d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:51:26 +0000 Subject: [PATCH 136/213] make FederationClient.make_membership_event async --- synapse/federation/federation_client.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 1da33a7a64..b69aac9041 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, EventFormatVersions, + RoomVersion, RoomVersions, ) from synapse.events import EventBase, builder, room_version_to_event_format @@ -404,7 +405,7 @@ class FederationClient(FederationBase): raise SynapseError(502, "Failed to %s via any server" % (description,)) - def make_membership_event( + async def make_membership_event( self, destinations: Iterable[str], room_id: str, @@ -412,7 +413,7 @@ class FederationClient(FederationBase): membership: str, content: dict, params: Dict[str, str], - ): + ) -> Tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. @@ -433,19 +434,19 @@ class FederationClient(FederationBase): content: Any additional data to put into the content field of the event. params: Query parameters to include in the request. - Return: - Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of + + Returns: `(origin, event, room_version)` where origin is the remote homeserver which generated the event, and room_version is the version of the room. - Fails with a `UnsupportedRoomVersionError` if remote responds with - a room version we don't understand. + Raises: + UnsupportedRoomVersionError: if remote responds with + a room version we don't understand. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. + SynapseError: if the chosen remote server returns a 300/400 code. - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError: if no servers were reachable. """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: @@ -491,7 +492,7 @@ class FederationClient(FederationBase): return (destination, ev, room_version) - return self._try_destination_list( + return await self._try_destination_list( "make_" + membership, destinations, send_request ) From 8af9f11bea8d36188d7f8ff65f227b3cfdf9fa17 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:55:00 +0000 Subject: [PATCH 137/213] make FederationClient.send_join async --- synapse/federation/federation_client.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b69aac9041..c98b276805 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,7 @@ import copy import itertools import logging -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Optional, Tuple from prometheus_client import Counter @@ -496,27 +496,29 @@ class FederationClient(FederationBase): "make_" + membership, destinations, send_request ) - def send_join(self, destinations, pdu, event_format_version): + async def send_join( + self, destinations: Iterable[str], pdu: EventBase, event_format_version: int + ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, and send the event out to the rest of the federation. Args: - destinations (str): Candidate homeservers which are probably + destinations: Candidate homeservers which are probably participating in the room. - pdu (BaseEvent): event to be sent - event_format_version (int): The event format version + pdu: event to be sent + event_format_version: The event format version - Return: - Deferred: resolves to a dict with members ``origin`` (a string + Returns: + a dict with members ``origin`` (a string giving the serer the event was sent to, ``state`` (?) and ``auth_chain``. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. + Raises: + SynapseError: if the chosen remote server returns a 300/400 code. - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError: if no servers were reachable. """ def check_authchain_validity(signed_auth_chain): @@ -603,7 +605,7 @@ class FederationClient(FederationBase): "origin": destination, } - return self._try_destination_list("send_join", destinations, send_request) + return await self._try_destination_list("send_join", destinations, send_request) @defer.inlineCallbacks def _do_send_join(self, destination, pdu): From a46fabf17bca51eb3a73feb5b3de4072030033e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:55:11 +0000 Subject: [PATCH 138/213] make FederationClient.send_leave async --- synapse/federation/federation_client.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c98b276805..8d33d27137 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -730,7 +730,7 @@ class FederationClient(FederationBase): ) return content - def send_leave(self, destinations, pdu): + async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None: """Sends a leave event to one of a list of homeservers. Doing so will cause the remote server to add the event to the graph, @@ -739,17 +739,14 @@ class FederationClient(FederationBase): This is mostly useful to reject received invites. Args: - destinations (str): Candidate homeservers which are probably + destinations: Candidate homeservers which are probably participating in the room. - pdu (BaseEvent): event to be sent + pdu: event to be sent - Return: - Deferred: resolves to None. + Raises: + SynapseError if the chosen remote server returns a 300/400 code. - Fails with a ``SynapseError`` if the chosen remote server - returns a 300/400 code. - - Fails with a ``RuntimeError`` if no servers were reachable. + RuntimeError if no servers were reachable. """ @defer.inlineCallbacks @@ -759,7 +756,9 @@ class FederationClient(FederationBase): logger.debug("Got content: %s", content) return None - return self._try_destination_list("send_leave", destinations, send_request) + return await self._try_destination_list( + "send_leave", destinations, send_request + ) @defer.inlineCallbacks def _do_send_leave(self, destination, pdu): From 1330c311b79bc4a9b4d3349e72a2353bb54dcd90 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 20:59:10 +0000 Subject: [PATCH 139/213] make FederationClient._try_destination_list async --- synapse/federation/federation_client.py | 36 ++++++++++++++++++------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8d33d27137..11802dad0f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,17 @@ import copy import itertools import logging -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + TypeVar, +) from prometheus_client import Counter @@ -53,6 +63,8 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t PDU_RETRY_TIME_MS = 1 * 60 * 1000 +T = TypeVar("T") + class InvalidResponseError(RuntimeError): """Helper for _try_destination_list: indicates that the server returned a response @@ -349,17 +361,21 @@ class FederationClient(FederationBase): return signed_auth - @defer.inlineCallbacks - def _try_destination_list(self, description, destinations, callback): + async def _try_destination_list( + self, + description: str, + destinations: Iterable[str], + callback: Callable[[str], Awaitable[T]], + ) -> T: """Try an operation on a series of servers, until it succeeds Args: - description (unicode): description of the operation we're doing, for logging + description: description of the operation we're doing, for logging - destinations (Iterable[unicode]): list of server_names to try + destinations: list of server_names to try - callback (callable): Function to run for each server. Passed a single - argument: the server_name to try. May return a deferred. + callback: Function to run for each server. Passed a single + argument: the server_name to try. If the callback raises a CodeMessageException with a 300/400 code, attempts to perform the operation stop immediately and the exception is @@ -370,7 +386,7 @@ class FederationClient(FederationBase): suppressed if the exception is an InvalidResponseError. Returns: - The [Deferred] result of callback, if it succeeds + The result of callback, if it succeeds Raises: SynapseError if the chosen remote server returns a 300/400 code, or @@ -381,7 +397,7 @@ class FederationClient(FederationBase): continue try: - res = yield callback(destination) + res = await callback(destination) return res except InvalidResponseError as e: logger.warning("Failed to %s via %s: %s", description, destination, e) @@ -400,7 +416,7 @@ class FederationClient(FederationBase): ) except Exception: logger.warning( - "Failed to %s via %s", description, destination, exc_info=1 + "Failed to %s via %s", description, destination, exc_info=True ) raise SynapseError(502, "Failed to %s via any server" % (description,)) From ad09ee92622e0d37502fb49336f1e1474af458df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:07:13 +0000 Subject: [PATCH 140/213] make FederationClient.make_membership_event.send_request async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 11802dad0f..b59d08c4ae 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -471,9 +471,8 @@ class FederationClient(FederationBase): % (membership, ",".join(valid_memberships)) ) - @defer.inlineCallbacks - def send_request(destination): - ret = yield self.transport_layer.make_membership_event( + async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: + ret = await self.transport_layer.make_membership_event( destination, room_id, user_id, membership, params ) @@ -506,7 +505,7 @@ class FederationClient(FederationBase): event_dict=pdu_dict, ) - return (destination, ev, room_version) + return destination, ev, room_version return await self._try_destination_list( "make_" + membership, destinations, send_request From 3960527c2e1d9bbeb2f0f7b6218de06cd32bdcb4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:07:38 +0000 Subject: [PATCH 141/213] make FederationClient.send_join.send_request async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b59d08c4ae..8ca36c4d32 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -553,9 +553,8 @@ class FederationClient(FederationBase): "room appears to have unsupported version %s" % (room_version,) ) - @defer.inlineCallbacks - def send_request(destination): - content = yield self._do_send_join(destination, pdu) + async def send_request(destination) -> Dict[str, Any]: + content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) @@ -584,7 +583,7 @@ class FederationClient(FederationBase): # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") - valid_pdus = yield self._check_sigs_and_hash_and_fetch( + valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, From 638001116de3e472d916f8e809e15c658bab282a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:08:24 +0000 Subject: [PATCH 142/213] make FederationClient._do_send_join async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8ca36c4d32..b4609b78cb 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -621,12 +621,11 @@ class FederationClient(FederationBase): return await self._try_destination_list("send_join", destinations, send_request) - @defer.inlineCallbacks - def _do_send_join(self, destination, pdu): + async def _do_send_join(self, destination: str, pdu: EventBase): time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_join_v2( + content = await self.transport_layer.send_join_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -648,7 +647,7 @@ class FederationClient(FederationBase): logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") - resp = yield self.transport_layer.send_join_v1( + resp = await self.transport_layer.send_join_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From e88b90aaebd19822e310b708696036dc0b1f17f6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:08:51 +0000 Subject: [PATCH 143/213] make FederationClient.send_leave.send_request async --- synapse/federation/federation_client.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b4609b78cb..f98c36039d 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -762,12 +762,9 @@ class FederationClient(FederationBase): RuntimeError if no servers were reachable. """ - @defer.inlineCallbacks - def send_request(destination): - content = yield self._do_send_leave(destination, pdu) - + async def send_request(destination: str) -> None: + content = await self._do_send_leave(destination, pdu) logger.debug("Got content: %s", content) - return None return await self._try_destination_list( "send_leave", destinations, send_request From abadf44eb2c32c9a6f1da84239043755f854b327 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:09:07 +0000 Subject: [PATCH 144/213] make FederationClient._do_send_leave async --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f98c36039d..5043220d14 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -770,12 +770,11 @@ class FederationClient(FederationBase): "send_leave", destinations, send_request ) - @defer.inlineCallbacks - def _do_send_leave(self, destination, pdu): + async def _do_send_leave(self, destination, pdu): time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_leave_v2( + content = await self.transport_layer.send_leave_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -797,7 +796,7 @@ class FederationClient(FederationBase): logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") - resp = yield self.transport_layer.send_leave_v1( + resp = await self.transport_layer.send_leave_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From 6deeefb68cbd2bc54d8e108b16e6427454644019 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:14:30 +0000 Subject: [PATCH 145/213] make FederationClient.get_missing_events async --- synapse/federation/federation_client.py | 40 ++++++++++++------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5043220d14..1e9b207a24 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -25,6 +25,7 @@ from typing import ( Iterable, List, Optional, + Sequence, Tuple, TypeVar, ) @@ -828,34 +829,33 @@ class FederationClient(FederationBase): third_party_instance_id=third_party_instance_id, ) - @defer.inlineCallbacks - def get_missing_events( + async def get_missing_events( self, - destination, - room_id, - earliest_events_ids, - latest_events, - limit, - min_depth, - timeout, - ): + destination: str, + room_id: str, + earliest_events_ids: Sequence[str], + latest_events: Iterable[EventBase], + limit: int, + min_depth: int, + timeout: int, + ) -> List[EventBase]: """Tries to fetch events we are missing. This is called when we receive an event without having received all of its ancestors. Args: - destination (str) - room_id (str) - earliest_events_ids (list): List of event ids. Effectively the + destination + room_id + earliest_events_ids: List of event ids. Effectively the events we expected to receive, but haven't. `get_missing_events` should only return events that didn't happen before these. - latest_events (list): List of events we have received that we don't + latest_events: List of events we have received that we don't have all previous events for. - limit (int): Maximum number of events to return. - min_depth (int): Minimum depth of events tor return. - timeout (int): Max time to wait in ms + limit: Maximum number of events to return. + min_depth: Minimum depth of events to return. + timeout: Max time to wait in ms """ try: - content = yield self.transport_layer.get_missing_events( + content = await self.transport_layer.get_missing_events( destination=destination, room_id=room_id, earliest_events=earliest_events_ids, @@ -865,14 +865,14 @@ class FederationClient(FederationBase): timeout=timeout, ) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) events = [ event_from_pdu_json(e, format_ver) for e in content.get("events", []) ] - signed_events = yield self._check_sigs_and_hash_and_fetch( + signed_events = await self._check_sigs_and_hash_and_fetch( destination, events, outlier=False, room_version=room_version ) except HttpResponseException as e: From 4b4536dd02cc1128335383b6cc36afc1f0f6d71c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 21:15:08 +0000 Subject: [PATCH 146/213] newsfile --- changelog.d/6840.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6840.misc diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc new file mode 100644 index 0000000000..0496f12de8 --- /dev/null +++ b/changelog.d/6840.misc @@ -0,0 +1 @@ +Port much of `synapse.handlers.federation` to async/await. From ea23210b2dcc75489d9369b13636a56b7449d765 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Feb 2020 22:29:49 +0000 Subject: [PATCH 147/213] make FederationClient.send_invite async --- synapse/federation/federation_client.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 1e9b207a24..51f9b1d8d7 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -659,23 +659,22 @@ class FederationClient(FederationBase): # content. return resp[1] - @defer.inlineCallbacks - def send_invite(self, destination, room_id, event_id, pdu): - room_version = yield self.store.get_room_version_id(room_id) + async def send_invite(self, destination, room_id, event_id, pdu): + room_version = await self.store.get_room_version_id(room_id) - content = yield self._do_send_invite(destination, pdu, room_version) + content = await self._do_send_invite(destination, pdu, room_version) pdu_dict = content["event"] logger.debug("Got response to send_invite: %s", pdu_dict) - room_version = yield self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version_id(room_id) format_ver = room_version_to_event_format(room_version) pdu = event_from_pdu_json(pdu_dict, format_ver) # Check signatures are correct. - pdu = yield self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version, pdu) # FIXME: We should handle signature failures more gracefully. From 23d8a55c7af93d562b7e7f24b0d195fced692123 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 00:10:21 -0500 Subject: [PATCH 148/213] add device signatures to device key query results --- synapse/storage/data_stores/main/devices.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index ea0503476f..b7617efb80 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -320,6 +320,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) else: result["deleted"] = True @@ -524,6 +529,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) results.append(result) From 245ee142209f634a51942681bb141e617e1ecd55 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 00:21:07 -0500 Subject: [PATCH 149/213] add changelog --- changelog.d/6844.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6844.bugfix diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix new file mode 100644 index 0000000000..e84aa1029f --- /dev/null +++ b/changelog.d/6844.bugfix @@ -0,0 +1 @@ +Fix an issue with cross-signing where device signatures were not sent to remote servers. From c87572d6e426099fa36e2cd8260319531ec0fbb8 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Tue, 4 Feb 2020 16:21:09 +0000 Subject: [PATCH 150/213] Update CONTRIBUTING.md about merging PRs. (#6846) --- CONTRIBUTING.md | 14 ++++++++++++++ changelog.d/6846.doc | 1 + 2 files changed, 15 insertions(+) create mode 100644 changelog.d/6846.doc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5736ede6c4..4b01b6ac8c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs. +## Merge Strategy + +We use the commit history of develop/master extensively to identify +when regressions were introduced and what changes have been made. + +We aim to have a clean merge history, which means we normally squash-merge +changes into develop. For small changes this means there is no need to rebase +to clean up your PR before merging. Larger changes with an organised set of +commits may be merged as-is, if the history is judged to be useful. + +This use of squash-merging will mean PRs built on each other will be hard to +merge. We suggest avoiding these where possible, and if required, ensuring +each PR has a tidy set of commits to ease merging. + ## Conclusion That's it! Matrix is a very open and collaborative project as you might expect diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc new file mode 100644 index 0000000000..ad69d608c0 --- /dev/null +++ b/changelog.d/6846.doc @@ -0,0 +1 @@ +Add details of PR merge strategy to contributing docs. \ No newline at end of file From 6475382d807e1fed095d1e3fbd04884799ebd612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 Feb 2020 17:25:54 +0000 Subject: [PATCH 151/213] Fix detecting unknown devices from remote encrypted events. (#6848) We were looking at the wrong event type (`m.room.encryption` vs `m.room.encrypted`). Also fixup the duplicate `EvenTypes` entries. Introduced in #6776. --- changelog.d/6848.bugfix | 1 + synapse/api/constants.py | 3 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/storage/data_stores/main/stats.py | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 changelog.d/6848.bugfix diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix new file mode 100644 index 0000000000..65688e5d57 --- /dev/null +++ b/changelog.d/6848.bugfix @@ -0,0 +1 @@ +Fix detecting unknown devices from remote encrypted events. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 0ade47e624..cc8577552b 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -77,12 +77,11 @@ class EventTypes(object): Aliases = "m.room.aliases" Redaction = "m.room.redaction" ThirdPartyInvite = "m.room.third_party_invite" - Encryption = "m.room.encryption" RelatedGroups = "m.room.related_groups" RoomHistoryVisibility = "m.room.history_visibility" CanonicalAlias = "m.room.canonical_alias" - Encryption = "m.room.encryption" + Encrypted = "m.room.encrypted" RoomAvatar = "m.room.avatar" RoomEncryption = "m.room.encryption" GuestAccess = "m.room.guest_access" diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c86d3177e9..488200a2d1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -752,7 +752,7 @@ class FederationHandler(BaseHandler): # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. - if event.type == EventTypes.Encryption: + if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") if device_id is not None: cached_devices = await self.store.get_cached_devices_for_user( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1382399557..b609a65f47 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -360,7 +360,7 @@ class RoomCreationHandler(BaseHandler): (EventTypes.RoomHistoryVisibility, ""), (EventTypes.GuestAccess, ""), (EventTypes.RoomAvatar, ""), - (EventTypes.Encryption, ""), + (EventTypes.RoomEncryption, ""), (EventTypes.ServerACL, ""), (EventTypes.RelatedGroups, ""), (EventTypes.PowerLevels, ""), diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 7f7d56390e..68e6edace5 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -286,7 +286,7 @@ class StatsHandler(StateDeltasHandler): room_state["history_visibility"] = event_content.get( "history_visibility" ) - elif typ == EventTypes.Encryption: + elif typ == EventTypes.RoomEncryption: room_state["encryption"] = event_content.get("algorithm") elif typ == EventTypes.Name: room_state["name"] = event_content.get("name") diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py index 7bc186e9a1..7af1495e47 100644 --- a/synapse/storage/data_stores/main/stats.py +++ b/synapse/storage/data_stores/main/stats.py @@ -744,7 +744,7 @@ class StatsStore(StateDeltasStore): EventTypes.Create, EventTypes.JoinRules, EventTypes.RoomHistoryVisibility, - EventTypes.Encryption, + EventTypes.RoomEncryption, EventTypes.Name, EventTypes.Topic, EventTypes.RoomAvatar, @@ -816,7 +816,7 @@ class StatsStore(StateDeltasStore): room_state["history_visibility"] = event.content.get( "history_visibility" ) - elif event.type == EventTypes.Encryption: + elif event.type == EventTypes.RoomEncryption: room_state["encryption"] = event.content.get("algorithm") elif event.type == EventTypes.Name: room_state["name"] = event.content.get("name") From d88e0ec0802b3b0a49853fb6b777a35b7c195ea6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Feb 2020 21:31:08 +0000 Subject: [PATCH 152/213] Database updates to populate rooms.room_version (#6847) We're going to need this so that we can figure out how to handle redactions when fetching events from the database. --- changelog.d/6847.misc | 1 + .../57/rooms_version_column_2.sql.postgres | 35 +++++++++++++++++++ .../57/rooms_version_column_2.sql.sqlite | 22 ++++++++++++ 3 files changed, 58 insertions(+) create mode 100644 changelog.d/6847.misc create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres create mode 100644 synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc new file mode 100644 index 0000000000..094e911adb --- /dev/null +++ b/changelog.d/6847.misc @@ -0,0 +1 @@ +Populate `rooms.room_version` database column at startup, rather than in a background update. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres new file mode 100644 index 0000000000..c601cff6de --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres @@ -0,0 +1,35 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- when we first added the room_version column, it was populated via a background +-- update. We now need it to be populated before synapse starts, so we populate +-- any remaining rows with a NULL room version now. For servers which have completed +-- the background update, this will be pretty quick. + +-- the following query will set room_version to NULL if no create event is found for +-- the room in current_state_events, and will set it to '1' if a create event with no +-- room_version is found. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json::json->'content'->>'room_version','1') + FROM current_state_events cse INNER JOIN event_json ej USING (event_id) + WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key='' +) WHERE rooms.room_version IS NULL; + +-- we still allow the background update to complete: it has the useful side-effect of +-- populating `rooms` with any missing rooms (based on the current_state_events table). + +-- see also rooms_version_column_2.sql.sqlite which has a copy of the above query, using +-- sqlite syntax for the json extraction. diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite new file mode 100644 index 0000000000..335c6f2074 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite @@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- see rooms_version_column_2.sql.postgres for details of what's going on here. + +UPDATE rooms SET room_version=( + SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1') + FROM current_state_events cse INNER JOIN event_json ej USING (event_id) + WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key='' +) WHERE rooms.room_version IS NULL; From a831d2e4e3c424fb54f186bfa7d83a17965f933e Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 5 Feb 2020 08:57:38 +0000 Subject: [PATCH 153/213] Reduce performance logging to DEBUG (#6833) * Reduce tnx performance logging to DEBUG * Changelog.d --- changelog.d/6833.misc | 1 + synapse/storage/database.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6833.misc diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc new file mode 100644 index 0000000000..8a0605f90b --- /dev/null +++ b/changelog.d/6833.misc @@ -0,0 +1 @@ +Reducing log level to DEBUG for synapse.storage.TIME. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 1003dd84a5..3eeb2f7c04 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -343,7 +343,7 @@ class Database(object): top_three_counters = self._txn_perf_counters.interval(duration, limit=3) - perf_logger.info( + perf_logger.debug( "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters ) From 60d06724268891ad3b1e9dc6fe7cd080f9ba21b7 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 4 Feb 2020 12:03:54 -0500 Subject: [PATCH 154/213] Merge pull request #6844 from matrix-org/uhoreg/cross_signing_fix_device_fed add device signatures to device key query results --- changelog.d/6844.bugfix | 1 + synapse/storage/data_stores/main/devices.py | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/6844.bugfix diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix new file mode 100644 index 0000000000..e84aa1029f --- /dev/null +++ b/changelog.d/6844.bugfix @@ -0,0 +1 @@ +Fix an issue with cross-signing where device signatures were not sent to remote servers. diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py index ea0503476f..b7617efb80 100644 --- a/synapse/storage/data_stores/main/devices.py +++ b/synapse/storage/data_stores/main/devices.py @@ -320,6 +320,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) else: result["deleted"] = True @@ -524,6 +529,11 @@ class DeviceWorkerStore(SQLBaseStore): device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name + if "signatures" in device: + for sig_user_id, sigs in device["signatures"].items(): + result["keys"].setdefault("signatures", {}).setdefault( + sig_user_id, {} + ).update(sigs) results.append(result) From a58860e4802c31680ba43e59ec537984af9f5637 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Feb 2020 14:02:39 +0000 Subject: [PATCH 155/213] Check sender_key matches on inbound encrypted events. (#6850) If they don't then the device lists are probably out of sync. --- changelog.d/6850.misc | 1 + synapse/handlers/device.py | 8 +++- synapse/handlers/federation.py | 72 ++++++++++++++++++++++++++++------ 3 files changed, 67 insertions(+), 14 deletions(-) create mode 100644 changelog.d/6850.misc diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc new file mode 100644 index 0000000000..418569113f --- /dev/null +++ b/changelog.d/6850.misc @@ -0,0 +1 @@ +Detect unexpected sender keys on inbound encrypted events and resync device lists. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 26ef5e150c..a9bd431486 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -598,7 +598,13 @@ class DeviceListUpdater(object): # happens if we've missed updates. resync = yield self._need_to_do_resync(user_id, pending_updates) - logger.debug("Need to re-sync devices for %r? %r", user_id, resync) + if logger.isEnabledFor(logging.INFO): + logger.info( + "Received device list update for %s, requiring resync: %s. Devices: %s", + user_id, + resync, + ", ".join(u[0] for u in pending_updates), + ) if resync: yield self.user_device_resync(user_id) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 488200a2d1..e9441bbeff 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -754,27 +754,73 @@ class FederationHandler(BaseHandler): # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: device_id = event.content.get("device_id") + sender_key = event.content.get("sender_key") + + cached_devices = await self.store.get_cached_devices_for_user(event.sender) + + resync = False # Whether we should resync device lists. + + device = None if device_id is not None: - cached_devices = await self.store.get_cached_devices_for_user( - event.sender - ) - if device_id not in cached_devices: + device = cached_devices.get(device_id) + if device is None: logger.info( "Received event from remote device not in our cache: %s %s", event.sender, device_id, ) - await self.store.mark_remote_user_device_cache_as_stale( - event.sender + resync = True + + # We also check if the `sender_key` matches what we expect. + if sender_key is not None: + # Figure out what sender key we're expecting. If we know the + # device and recognize the algorithm then we can work out the + # exact key to expect. Otherwise check it matches any key we + # have for that device. + if device: + keys = device.get("keys", {}).get("keys", {}) + + if event.content.get("algorithm") == "m.megolm.v1.aes-sha2": + # For this algorithm we expect a curve25519 key. + key_name = "curve25519:%s" % (device_id,) + current_keys = [keys.get(key_name)] + else: + # We don't know understand the algorithm, so we just + # check it matches a key for the device. + current_keys = keys.values() + elif device_id: + # We don't have any keys for the device ID. + current_keys = [] + else: + # The event didn't include a device ID, so we just look for + # keys across all devices. + current_keys = ( + key + for device in cached_devices + for key in device.get("keys", {}).get("keys", {}).values() ) - # Immediately attempt a resync in the background - if self.config.worker_app: - return run_in_background(self._user_device_resync, event.sender) - else: - return run_in_background( - self._device_list_updater.user_device_resync, event.sender - ) + # We now check that the sender key matches (one of) the expected + # keys. + if sender_key not in current_keys: + logger.info( + "Received event from remote device with unexpected sender key: %s %s: %s", + event.sender, + device_id or "", + sender_key, + ) + resync = True + + if resync: + await self.store.mark_remote_user_device_cache_as_stale(event.sender) + + # Immediately attempt a resync in the background + if self.config.worker_app: + return run_in_background(self._user_device_resync, event.sender) + else: + return run_in_background( + self._device_list_updater.user_device_resync, event.sender + ) @log_function async def backfill(self, dest, room_id, limit, extremities): From 146fec08208144c8566da5ab6c2683229d162c1a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Feb 2020 15:47:00 +0000 Subject: [PATCH 156/213] Apply suggestions from code review Co-Authored-By: Erik Johnston --- synapse/federation/federation_client.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 51f9b1d8d7..b4525d28c2 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -528,7 +528,7 @@ class FederationClient(FederationBase): Returns: a dict with members ``origin`` (a string - giving the serer the event was sent to, ``state`` (?) and + giving the server the event was sent to, ``state`` (?) and ``auth_chain``. Raises: @@ -659,7 +659,9 @@ class FederationClient(FederationBase): # content. return resp[1] - async def send_invite(self, destination, room_id, event_id, pdu): + async def send_invite( + self, destination: str, room_id: str, event_id: str, pdu: EventBase, + ) -> EventBase: room_version = await self.store.get_room_version_id(room_id) content = await self._do_send_invite(destination, pdu, room_version) From 6bbd890f05b59ffa33769ae15ea73bdf3c8dc908 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Feb 2020 15:49:42 +0000 Subject: [PATCH 157/213] make FederationClient._do_send_invite async --- synapse/federation/federation_client.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b4525d28c2..3a840e068b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -53,6 +53,7 @@ from synapse.events import EventBase, builder, room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function +from synapse.types import JsonDict from synapse.util import unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -682,23 +683,19 @@ class FederationClient(FederationBase): return pdu - @defer.inlineCallbacks - def _do_send_invite(self, destination, pdu, room_version): + async def _do_send_invite( + self, destination: str, pdu: EventBase, room_version: str + ) -> JsonDict: """Actually sends the invite, first trying v2 API and falling back to v1 API if necessary. - Args: - destination (str): Target server - pdu (FrozenEvent) - room_version (str) - Returns: - dict: The event as a dict as returned by the remote server + The event as a dict as returned by the remote server """ time_now = self._clock.time_msec() try: - content = yield self.transport_layer.send_invite_v2( + content = await self.transport_layer.send_invite_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -737,7 +734,7 @@ class FederationClient(FederationBase): # Didn't work, try v1 API. # Note the v1 API returns a tuple of `(200, content)` - _, content = yield self.transport_layer.send_invite_v1( + _, content = await self.transport_layer.send_invite_v1( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, From f84700fba8cd9345d7a1a025462c7f8650f27386 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 14:07:31 +0000 Subject: [PATCH 158/213] Pass room version object into `FederationClient.get_pdu` --- synapse/federation/federation_client.py | 8 +++++--- synapse/handlers/federation.py | 8 ++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3a840e068b..d2d42e7009 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -230,7 +230,7 @@ class FederationClient(FederationBase): self, destinations: Iterable[str], event_id: str, - room_version: str, + room_version: RoomVersion, outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: @@ -262,7 +262,7 @@ class FederationClient(FederationBase): pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) - format_ver = room_version_to_event_format(room_version) + format_ver = room_version.event_format signed_pdu = None for destination in destinations: @@ -292,7 +292,9 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = await self._check_sigs_and_hash(room_version, pdu) + signed_pdu = await self._check_sigs_and_hash( + room_version.identifier, pdu + ) break diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5728ea2ee7..5ca410e2b3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1110,7 +1110,7 @@ class FederationHandler(BaseHandler): Logs a warning if we can't find the given event. """ - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) event_infos = [] @@ -1916,11 +1916,7 @@ class FederationHandler(BaseHandler): for e_id in missing_auth_events: m_ev = await self.federation_client.get_pdu( - [origin], - e_id, - room_version=room_version.identifier, - outlier=True, - timeout=10000, + [origin], e_id, room_version=room_version, outlier=True, timeout=10000, ) if m_ev and m_ev.event_id == e_id: event_map[e_id] = m_ev From ee0525b2b2245d66a3ce0c186cb1ac1725dd93e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Feb 2020 17:35:09 +0000 Subject: [PATCH 159/213] Simplify `room_version` handling in `FederationClient.send_invite` --- synapse/federation/federation_client.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d2d42e7009..110e42b9ed 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -665,7 +665,7 @@ class FederationClient(FederationBase): async def send_invite( self, destination: str, room_id: str, event_id: str, pdu: EventBase, ) -> EventBase: - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) content = await self._do_send_invite(destination, pdu, room_version) @@ -673,20 +673,17 @@ class FederationClient(FederationBase): logger.debug("Got response to send_invite: %s", pdu_dict) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - - pdu = event_from_pdu_json(pdu_dict, format_ver) + pdu = event_from_pdu_json(pdu_dict, room_version.event_format) # Check signatures are correct. - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) # FIXME: We should handle signature failures more gracefully. return pdu async def _do_send_invite( - self, destination: str, pdu: EventBase, room_version: str + self, destination: str, pdu: EventBase, room_version: RoomVersion ) -> JsonDict: """Actually sends the invite, first trying v2 API and falling back to v1 API if necessary. @@ -703,7 +700,7 @@ class FederationClient(FederationBase): event_id=pdu.event_id, content={ "event": pdu.get_pdu_json(time_now), - "room_version": room_version, + "room_version": room_version.identifier, "invite_room_state": pdu.unsigned.get("invite_room_state", []), }, ) @@ -721,8 +718,7 @@ class FederationClient(FederationBase): # Otherwise, we assume that the remote server doesn't understand # the v2 invite API. That's ok provided the room uses old-style event # IDs. - v = KNOWN_ROOM_VERSIONS.get(room_version) - if v.event_format != EventFormatVersions.V1: + if room_version.event_format != EventFormatVersions.V1: raise SynapseError( 400, "User's homeserver does not support this room version", From ff70ec0a00bd65d819ce42fdb46d67160c197202 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 15:30:02 +0000 Subject: [PATCH 160/213] Newsfile --- changelog.d/6823.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6823.misc diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6823.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. From 39c2d26e0b80ee0cd5589fc577327f2d3d80a446 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 Feb 2020 12:41:33 -0500 Subject: [PATCH 161/213] Add quotes around pip install target (my shell complained without them). --- README.rst | 2 +- changelog.d/6855.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6855.misc diff --git a/README.rst b/README.rst index 2691dfc23d..4db7d17e94 100644 --- a/README.rst +++ b/README.rst @@ -272,7 +272,7 @@ to install using pip and a virtualenv:: virtualenv -p python3 env source env/bin/activate - python -m pip install --no-use-pep517 -e .[all] + python -m pip install --no-use-pep517 -e ".[all]" This will run a process of downloading and installing all the needed dependencies into a virtual env. diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc new file mode 100644 index 0000000000..904361ddfb --- /dev/null +++ b/changelog.d/6855.misc @@ -0,0 +1 @@ +Update pip install directiosn in readme to avoid error when using zsh. From f0561fcffd172cb0dfe035dcc78f51bdd451c010 Mon Sep 17 00:00:00 2001 From: Robin Vleij Date: Wed, 5 Feb 2020 22:27:38 +0100 Subject: [PATCH 162/213] Update documentation (#6859) Update documentation to reflect the correct format of user_id (fully qualified). --- docs/admin_api/user_admin_api.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 0b3d09d694..ed6df61a26 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -2,7 +2,8 @@ Create or modify Account ======================== This API allows an administrator to create or modify a user account with a -specific ``user_id``. +specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example, +``@user:server.com``. This api is:: From 6a7e90ad782bddce95fa0c7d93e56291aa31c33d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:40:08 +0000 Subject: [PATCH 163/213] 1.10.0rc2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/6844.bugfix | 1 - changelog.d/6848.bugfix | 1 - changelog.d/6850.misc | 1 - synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/6844.bugfix delete mode 100644 changelog.d/6848.bugfix delete mode 100644 changelog.d/6850.misc diff --git a/CHANGES.md b/CHANGES.md index ab6fce3e7d..ee0e5d25e4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.10.0rc2 (2020-02-06) +============================== + +Bugfixes +-------- + +- Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) +- Fix detecting unknown devices from remote encrypted events. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) + + +Internal Changes +---------------- + +- Detect unexpected sender keys on inbound encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) + + Synapse 1.10.0rc1 (2020-01-31) ============================== diff --git a/changelog.d/6844.bugfix b/changelog.d/6844.bugfix deleted file mode 100644 index e84aa1029f..0000000000 --- a/changelog.d/6844.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an issue with cross-signing where device signatures were not sent to remote servers. diff --git a/changelog.d/6848.bugfix b/changelog.d/6848.bugfix deleted file mode 100644 index 65688e5d57..0000000000 --- a/changelog.d/6848.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix detecting unknown devices from remote encrypted events. diff --git a/changelog.d/6850.misc b/changelog.d/6850.misc deleted file mode 100644 index 418569113f..0000000000 --- a/changelog.d/6850.misc +++ /dev/null @@ -1 +0,0 @@ -Detect unexpected sender keys on inbound encrypted events and resync device lists. diff --git a/synapse/__init__.py b/synapse/__init__.py index bd942d3e1c..4f1859bd57 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc1" +__version__ = "1.10.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 4a50b674f2fa730fd3e85fe5d59b84b00e34bfc7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:45:29 +0000 Subject: [PATCH 164/213] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ee0e5d25e4..e56f738481 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,7 +17,7 @@ Internal Changes Synapse 1.10.0rc1 (2020-01-31) ============================== -**WARNING**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. +**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. Features From b5176166b7b62f3c04c21f12a775982c99a90c9c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:51:02 +0000 Subject: [PATCH 165/213] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e56f738481..17c7c91c62 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,13 +5,13 @@ Bugfixes -------- - Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) -- Fix detecting unknown devices from remote encrypted events. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) +- Fix to the unknown remote device detection which was indroduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) Internal Changes ---------------- -- Detect unexpected sender keys on inbound encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) +- Detect unexpected sender keys on remote encrypted events and resync device lists. ([\#6850](https://github.com/matrix-org/synapse/issues/6850)) Synapse 1.10.0rc1 (2020-01-31) From f663118155dbc242f99165f978817ef9dbeb9fd1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 10:52:25 +0000 Subject: [PATCH 166/213] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 17c7c91c62..c2aa735908 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix an issue with cross-signing where device signatures were not sent to remote servers. ([\#6844](https://github.com/matrix-org/synapse/issues/6844)) -- Fix to the unknown remote device detection which was indroduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) +- Fix to the unknown remote device detection which was introduced in 1.10.rc1. ([\#6848](https://github.com/matrix-org/synapse/issues/6848)) Internal Changes From ed630ea17c40d328cc0796e35d37287768c7140d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Feb 2020 13:31:05 +0000 Subject: [PATCH 167/213] Reduce amount of logging at INFO level. (#6862) A lot of the things we log at INFO are now a bit superfluous, so lets make them DEBUG logs to reduce the amount we log by default. Co-Authored-By: Brendan Abolivier Co-authored-by: Brendan Abolivier --- changelog.d/6862.misc | 1 + synapse/federation/federation_server.py | 6 +++--- synapse/federation/transport/server.py | 2 +- synapse/handlers/room.py | 10 +++++----- synapse/handlers/stats.py | 2 +- synapse/handlers/sync.py | 6 +++--- synapse/handlers/user_directory.py | 4 ++-- synapse/http/site.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/storage/data_stores/main/user_directory.py | 4 ++-- synapse/storage/persist_events.py | 2 +- synapse/util/caches/response_cache.py | 2 +- 12 files changed, 22 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6862.misc diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc new file mode 100644 index 0000000000..83626d2939 --- /dev/null +++ b/changelog.d/6862.misc @@ -0,0 +1 @@ +Reduce amount we log at `INFO` level. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d92d5e8064..8e3933b6c5 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -573,7 +573,7 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - logger.info( + logger.debug( "on_get_missing_events: earliest_events: %r, latest_events: %r," " limit: %d", earliest_events, @@ -586,11 +586,11 @@ class FederationServer(FederationBase): ) if len(missing_events) < 5: - logger.info( + logger.debug( "Returning %d events: %r", len(missing_events), missing_events ) else: - logger.info("Returning %d events", len(missing_events)) + logger.debug("Returning %d events", len(missing_events)) time_now = self._clock.time_msec() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ae48ba8157..92a9ae2320 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -158,7 +158,7 @@ class Authenticator(object): origin, json_request, now, "Incoming request" ) - logger.info("Request from %s", origin) + logger.debug("Request from %s", origin) request.authenticated_entity = origin # If we get a valid signed request from the other side, its probably diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b609a65f47..559e3399b8 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -259,7 +259,7 @@ class RoomCreationHandler(BaseHandler): for v in ("invite", "events_default"): current = int(pl_content.get(v, 0)) if current < restricted_level: - logger.info( + logger.debug( "Setting level for %s in %s to %i (was %i)", v, old_room_id, @@ -269,7 +269,7 @@ class RoomCreationHandler(BaseHandler): pl_content[v] = restricted_level updated = True else: - logger.info("Not setting level for %s (already %i)", v, current) + logger.debug("Not setting level for %s (already %i)", v, current) if updated: try: @@ -296,7 +296,7 @@ class RoomCreationHandler(BaseHandler): EventTypes.Aliases, events_default ) - logger.info("Setting correct PLs in new room to %s", new_pl_content) + logger.debug("Setting correct PLs in new room to %s", new_pl_content) yield self.event_creation_handler.create_and_send_nonmember_event( requester, { @@ -782,7 +782,7 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) - logger.info("Sending %s in new room", etype) + logger.debug("Sending %s in new room", etype) yield self.event_creation_handler.create_and_send_nonmember_event( creator, event, ratelimit=False ) @@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler): creation_content.update({"creator": creator_id}) yield send(etype=EventTypes.Create, content=creation_content) - logger.info("Sending %s in new room", EventTypes.Member) + logger.debug("Sending %s in new room", EventTypes.Member) yield self.room_member_handler.update_membership( creator, creator.user, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 68e6edace5..d93a276693 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler): room_state["guest_access"] = event_content.get("guest_access") for room_id, state in room_to_state_updates.items(): - logger.info("Updating room_stats_state for %s: %s", room_id, state) + logger.debug("Updating room_stats_state for %s: %s", room_id, state) yield self.store.update_room_state(room_id, state) return room_to_stats_deltas, user_to_stats_deltas diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5f060241b4..f8d60d32ba 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -968,7 +968,7 @@ class SyncHandler(object): # Always use the `now_token` in `SyncResultBuilder` now_token = await self.event_sources.get_current_token() - logger.info( + logger.debug( "Calculating sync response for %r between %s and %s", sync_config.user, since_token, @@ -1498,7 +1498,7 @@ class SyncHandler(object): room_entries = [] invited = [] for room_id, events in iteritems(mem_change_events_by_room_id): - logger.info( + logger.debug( "Membership changes in %s: [%s]", room_id, ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), @@ -1892,7 +1892,7 @@ class SyncHandler(object): if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() - logger.info( + logger.debug( "Incremental gappy sync of %s for user %s with %d state events" % (room_id, user_id, len(state)) ) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 624f05ab5b..81aa58dc8c 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.pos, room_max_stream_ordering ) - logger.info("Handling %d state deltas", len(deltas)) + logger.debug("Handling %d state deltas", len(deltas)) yield self._handle_deltas(deltas) self.pos = max_pos @@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler): room_id, self.server_name ) if not is_in_room: - logger.info("Server left room: %r", room_id) + logger.debug("Server left room: %r", room_id) # Fetch all the users that we marked as being in user # directory due to being in the room and then check if # need to remove those users or not diff --git a/synapse/http/site.py b/synapse/http/site.py index 911251c0bc..e092193c9c 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -225,7 +225,7 @@ class SynapseRequest(Request): self.start_time, name=servlet_name, method=self.get_method() ) - self.site.access_logger.info( + self.site.access_logger.debug( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index d0879b0490..5bb17d1228 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -398,7 +398,7 @@ class HttpPusher(object): Args: badge (int): number of unread messages """ - logger.info("Sending updated badge count %d to %s", badge, self.name) + logger.debug("Sending updated badge count %d to %s", badge, self.name) d = { "notification": { "id": "", diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py index 90c180ec6d..6b8130bf0f 100644 --- a/synapse/storage/data_stores/main/user_directory.py +++ b/synapse/storage/data_stores/main/user_directory.py @@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) return 1 - logger.info( + logger.debug( "Processing the next %d rooms of %d remaining" % (len(rooms_to_work_on), progress["remaining"]) ) @@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) return 1 - logger.info( + logger.debug( "Processing the next %d users of %d remaining" % (len(users_to_work_on), progress["remaining"]) ) diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index af3fd67ab9..a5370ed527 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -390,7 +390,7 @@ class EventsPersistenceStorage(object): state_delta_reuse_delta_counter.inc() break - logger.info("Calculating state delta for room %s", room_id) + logger.debug("Calculating state delta for room %s", room_id) with Measure( self._clock, "persist_events.get_new_state_after_events" ): diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 82d3eefe0e..b68f9fe0d4 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -144,7 +144,7 @@ class ResponseCache(object): """ result = self.get(key) if not result: - logger.info( + logger.debug( "[%s]: no cached result for [%s], calculating new one", self._name, key ) d = run_in_background(callback, *args, **kwargs) From 99fcc96289f673f96f2d180a84df84f6b8a85521 Mon Sep 17 00:00:00 2001 From: PeerD Date: Thu, 6 Feb 2020 15:15:29 +0100 Subject: [PATCH 168/213] Third party event rules Update (#6781) --- changelog.d/6781.bugfix | 1 + synapse/events/third_party_rules.py | 7 ++++--- synapse/handlers/room.py | 6 +++++- 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6781.bugfix diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix new file mode 100644 index 0000000000..47cd671bff --- /dev/null +++ b/changelog.d/6781.bugfix @@ -0,0 +1 @@ +Fixed third party event rules function `on_create_room`'s return value being ignored. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 86f7e5f8aa..459132d388 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -74,15 +74,16 @@ class ThirdPartyEventRules(object): is_requester_admin (bool): If the requester is an admin Returns: - defer.Deferred + defer.Deferred[bool]: Whether room creation is allowed or denied. """ if self.third_party_rules is None: - return + return True - yield self.third_party_rules.on_create_room( + ret = yield self.third_party_rules.on_create_room( requester, config, is_requester_admin ) + return ret @defer.inlineCallbacks def check_threepid_can_be_invited(self, medium, address, room_id): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 559e3399b8..ab07edd2fc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -579,9 +579,13 @@ class RoomCreationHandler(BaseHandler): # Check whether the third party rules allows/changes the room create # request. - yield self.third_party_event_rules.on_create_room( + event_allowed = yield self.third_party_event_rules.on_create_room( requester, config, is_requester_admin=is_requester_admin ) + if not event_allowed: + raise SynapseError( + 403, "You are not permitted to create rooms", Codes.FORBIDDEN + ) if not is_requester_admin and not self.spam_checker.user_may_create_room( user_id From bce557175bad82889d303b349e6575636c41b702 Mon Sep 17 00:00:00 2001 From: timfi Date: Thu, 6 Feb 2020 15:45:01 +0100 Subject: [PATCH 169/213] Allow empty federation_certificate_verification_whitelist (#6849) --- changelog.d/6849.bugfix | 1 + synapse/config/tls.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6849.bugfix diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix new file mode 100644 index 0000000000..d928a26ec6 --- /dev/null +++ b/changelog.d/6849.bugfix @@ -0,0 +1 @@ +Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank. diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 2e9e478a2a..2514b0713d 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -109,6 +109,8 @@ class TlsConfig(Config): fed_whitelist_entries = config.get( "federation_certificate_verification_whitelist", [] ) + if fed_whitelist_entries is None: + fed_whitelist_entries = [] # Support globs (*) in whitelist values self.federation_certificate_verification_whitelist = [] # type: List[str] From b0c8bdd49dd416ed066b12daee95cf5f4828f03b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Feb 2020 15:50:39 +0000 Subject: [PATCH 170/213] pass room version into FederationClient.send_join (#6854) ... which allows us to sanity-check the create event. --- changelog.d/6854.misc | 1 + synapse/federation/federation_client.py | 60 +++++++++++++------------ synapse/handlers/federation.py | 3 +- 3 files changed, 34 insertions(+), 30 deletions(-) create mode 100644 changelog.d/6854.misc diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6854.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 110e42b9ed..5fb4bd414c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -516,7 +516,7 @@ class FederationClient(FederationBase): ) async def send_join( - self, destinations: Iterable[str], pdu: EventBase, event_format_version: int + self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion ) -> Dict[str, Any]: """Sends a join event to one of a list of homeservers. @@ -527,7 +527,8 @@ class FederationClient(FederationBase): destinations: Candidate homeservers which are probably participating in the room. pdu: event to be sent - event_format_version: The event format version + room_version: the version of the room (according to the server that + did the make_join) Returns: a dict with members ``origin`` (a string @@ -540,58 +541,51 @@ class FederationClient(FederationBase): RuntimeError: if no servers were reachable. """ - def check_authchain_validity(signed_auth_chain): - for e in signed_auth_chain: - if e.type == EventTypes.Create: - create_event = e - break - else: - raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,)) - - # the room version should be sane. - room_version = create_event.content.get("room_version", "1") - if room_version not in KNOWN_ROOM_VERSIONS: - # This shouldn't be possible, because the remote server should have - # rejected the join attempt during make_join. - raise InvalidResponseError( - "room appears to have unsupported version %s" % (room_version,) - ) - async def send_request(destination) -> Dict[str, Any]: content = await self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) state = [ - event_from_pdu_json(p, event_format_version, outlier=True) + event_from_pdu_json(p, room_version.event_format, outlier=True) for p in content.get("state", []) ] auth_chain = [ - event_from_pdu_json(p, event_format_version, outlier=True) + event_from_pdu_json(p, room_version.event_format, outlier=True) for p in content.get("auth_chain", []) ] pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} - room_version = None + create_event = None for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): - room_version = e.content.get( - "room_version", RoomVersions.V1.identifier - ) + create_event = e break - if room_version is None: + if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. raise SynapseError(400, "No create event in state") + # the room version should be sane. + create_room_version = create_event.content.get( + "room_version", RoomVersions.V1.identifier + ) + if create_room_version != room_version.identifier: + # either the server that fulfilled the make_join, or the server that is + # handling the send_join, is lying. + raise InvalidResponseError( + "Unexpected room version %s in create event" + % (create_room_version,) + ) + valid_pdus = await self._check_sigs_and_hash_and_fetch( destination, list(pdus.values()), outlier=True, - room_version=room_version, + room_version=room_version.identifier, ) valid_pdus_map = {p.event_id: p for p in valid_pdus} @@ -615,7 +609,17 @@ class FederationClient(FederationBase): for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) - check_authchain_validity(signed_auth) + # double-check that the same create event has ended up in the auth chain + auth_chain_create_events = [ + e.event_id + for e in signed_auth + if (e.type, e.state_key) == (EventTypes.Create, "") + ] + if auth_chain_create_events != [create_event.event_id]: + raise InvalidResponseError( + "Unexpected create event(s) in auth chain" + % (auth_chain_create_events,) + ) return { "state": signed_state, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ef3cc264b7..10e8b6ea4c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1305,9 +1305,8 @@ class FederationHandler(BaseHandler): except ValueError: pass - event_format_version = room_version_obj.event_format ret = await self.federation_client.send_join( - target_hosts, event, event_format_version + target_hosts, event, room_version_obj ) origin = ret["origin"] From 928edef9793bf10fa6156a42c4babbfaaaa17f88 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 Jan 2020 16:50:13 +0000 Subject: [PATCH 171/213] Pass room_version into `event_from_pdu_json` It's called from all over the shop, so this one's a bit messy. --- changelog.d/6856.misc | 1 + synapse/federation/federation_base.py | 28 +++++++++-------- synapse/federation/federation_client.py | 35 ++++++++++----------- synapse/federation/federation_server.py | 41 +++++++++---------------- tests/handlers/test_federation.py | 6 ++-- 5 files changed, 51 insertions(+), 60 deletions(-) create mode 100644 changelog.d/6856.misc diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6856.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 0e22183280..ebe8b8e9fe 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersion, +) from synapse.crypto.event_signing import check_event_content_hash -from synapse.events import event_type_from_format_version +from synapse.events import EventBase, event_type_from_format_version from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( @@ -33,7 +38,7 @@ from synapse.logging.context import ( make_deferred_yieldable, preserve_fn, ) -from synapse.types import get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util import unwrapFirstError logger = logging.getLogger(__name__) @@ -342,16 +347,15 @@ def _is_invite_via_3pid(event): ) -def event_from_pdu_json(pdu_json, event_format_version, outlier=False): - """Construct a FrozenEvent from an event json received over federation +def event_from_pdu_json( + pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False +) -> EventBase: + """Construct an EventBase from an event json received over federation Args: - pdu_json (object): pdu as received over federation - event_format_version (int): The event format version - outlier (bool): True to mark this event as an outlier - - Returns: - FrozenEvent + pdu_json: pdu as received over federation + room_version: The version of the room this event belongs to + outlier: True to mark this event as an outlier Raises: SynapseError: if the pdu is missing required fields or is otherwise @@ -370,7 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False): elif depth > MAX_DEPTH: raise SynapseError(400, "Depth too large", Codes.BAD_JSON) - event = event_type_from_format_version(event_format_version)(pdu_json) + event = event_type_from_format_version(room_version.event_format)(pdu_json) event.internal_metadata.outlier = outlier diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 5fb4bd414c..4870e39652 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -49,7 +49,7 @@ from synapse.api.room_versions import ( RoomVersion, RoomVersions, ) -from synapse.events import EventBase, builder, room_version_to_event_format +from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.logging.context import make_deferred_yieldable from synapse.logging.utils import log_function @@ -209,18 +209,18 @@ class FederationClient(FederationBase): logger.debug("backfill transaction_data=%r", transaction_data) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) pdus = [ - event_from_pdu_json(p, format_ver, outlier=False) + event_from_pdu_json(p, room_version, outlier=False) for p in transaction_data["pdus"] ] # FIXME: We should handle signature failures more gracefully. pdus[:] = await make_deferred_yieldable( defer.gatherResults( - self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True + self._check_sigs_and_hashes(room_version.identifier, pdus), + consumeErrors=True, ).addErrback(unwrapFirstError) ) @@ -262,8 +262,6 @@ class FederationClient(FederationBase): pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) - format_ver = room_version.event_format - signed_pdu = None for destination in destinations: now = self._clock.time_msec() @@ -284,7 +282,7 @@ class FederationClient(FederationBase): ) pdu_list = [ - event_from_pdu_json(p, format_ver, outlier=outlier) + event_from_pdu_json(p, room_version, outlier=outlier) for p in transaction_data["pdus"] ] @@ -350,15 +348,15 @@ class FederationClient(FederationBase): async def get_event_auth(self, destination, room_id, event_id): res = await self.transport_layer.get_event_auth(destination, room_id, event_id) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) auth_chain = [ - event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"] + event_from_pdu_json(p, room_version, outlier=True) + for p in res["auth_chain"] ] signed_auth = await self._check_sigs_and_hash_and_fetch( - destination, auth_chain, outlier=True, room_version=room_version + destination, auth_chain, outlier=True, room_version=room_version.identifier ) signed_auth.sort(key=lambda e: e.depth) @@ -547,12 +545,12 @@ class FederationClient(FederationBase): logger.debug("Got content: %s", content) state = [ - event_from_pdu_json(p, room_version.event_format, outlier=True) + event_from_pdu_json(p, room_version, outlier=True) for p in content.get("state", []) ] auth_chain = [ - event_from_pdu_json(p, room_version.event_format, outlier=True) + event_from_pdu_json(p, room_version, outlier=True) for p in content.get("auth_chain", []) ] @@ -677,7 +675,7 @@ class FederationClient(FederationBase): logger.debug("Got response to send_invite: %s", pdu_dict) - pdu = event_from_pdu_json(pdu_dict, room_version.event_format) + pdu = event_from_pdu_json(pdu_dict, room_version) # Check signatures are correct. pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) @@ -865,15 +863,14 @@ class FederationClient(FederationBase): timeout=timeout, ) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) events = [ - event_from_pdu_json(e, format_ver) for e in content.get("events", []) + event_from_pdu_json(e, room_version) for e in content.get("events", []) ] signed_events = await self._check_sigs_and_hash_and_fetch( - destination, events, outlier=False, room_version=room_version + destination, events, outlier=False, room_version=room_version.identifier ) except HttpResponseException as e: if not e.code == 400: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8e3933b6c5..2489832a11 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -38,7 +38,6 @@ from synapse.api.errors import ( UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import room_version_to_event_format from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction @@ -234,24 +233,17 @@ class FederationServer(FederationBase): continue try: - room_version = await self.store.get_room_version_id(room_id) + room_version = await self.store.get_room_version(room_id) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue - - try: - format_ver = room_version_to_event_format(room_version) - except UnsupportedRoomVersionError: + except UnsupportedRoomVersionError as e: # this can happen if support for a given room version is withdrawn, # so that we still get events for said room. - logger.info( - "Ignoring PDU for room %s with unknown version %s", - room_id, - room_version, - ) + logger.info("Ignoring PDU: %s", e) continue - event = event_from_pdu_json(p, format_ver) + event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) pdu_results = {} @@ -407,9 +399,7 @@ class FederationServer(FederationBase): Codes.UNSUPPORTED_ROOM_VERSION, ) - format_ver = room_version.event_format - - pdu = event_from_pdu_json(content, format_ver) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) @@ -420,16 +410,15 @@ class FederationServer(FederationBase): async def on_send_join_request(self, origin, content, room_id): logger.debug("on_send_join_request: content: %s", content) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - pdu = event_from_pdu_json(content, format_ver) + room_version = await self.store.get_room_version(room_id) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() @@ -451,16 +440,15 @@ class FederationServer(FederationBase): async def on_send_leave_request(self, origin, content, room_id): logger.debug("on_send_leave_request: content: %s", content) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) - pdu = event_from_pdu_json(content, format_ver) + room_version = await self.store.get_room_version(room_id) + pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) - pdu = await self._check_sigs_and_hash(room_version, pdu) + pdu = await self._check_sigs_and_hash(room_version.identifier, pdu) await self.handler.on_send_leave_request(origin, pdu) return {} @@ -498,15 +486,14 @@ class FederationServer(FederationBase): origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) - room_version = await self.store.get_room_version_id(room_id) - format_ver = room_version_to_event_format(room_version) + room_version = await self.store.get_room_version(room_id) auth_chain = [ - event_from_pdu_json(e, format_ver) for e in content["auth_chain"] + event_from_pdu_json(e, room_version) for e in content["auth_chain"] ] signed_auth = await self._check_sigs_and_hash_and_fetch( - origin, auth_chain, outlier=True, room_version=room_version + origin, auth_chain, outlier=True, room_version=room_version.identifier ) ret = await self.handler.on_query_auth( diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index b4d92cf732..132e35651d 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -99,6 +99,7 @@ class FederationTestCase(unittest.HomeserverTestCase): user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(self.store.get_room_version(room_id)) # pretend that another server has joined join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) @@ -120,7 +121,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, - join_event.format_version, + room_version, ) with LoggingContext(request="send_rejected"): @@ -149,6 +150,7 @@ class FederationTestCase(unittest.HomeserverTestCase): user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + room_version = self.get_success(self.store.get_room_version(room_id)) # pretend that another server has joined join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) @@ -171,7 +173,7 @@ class FederationTestCase(unittest.HomeserverTestCase): "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, - join_event.format_version, + room_version, ) with LoggingContext(request="send_rejected"): From 7765bf398996002ee461904915de9d8bc2ea951a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 6 Feb 2020 13:25:24 -0500 Subject: [PATCH 172/213] Limit the number of events that can be requested when backfilling events (#6864) Limit the maximum number of events requested when backfilling events. --- changelog.d/6864.misc | 1 + synapse/handlers/federation.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/6864.misc diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc new file mode 100644 index 0000000000..d24eb68460 --- /dev/null +++ b/changelog.d/6864.misc @@ -0,0 +1 @@ +Limit the number of events that can be requested by the backfill federation API to 100. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 10e8b6ea4c..eb20ef4aec 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1788,6 +1788,9 @@ class FederationHandler(BaseHandler): if not in_room: raise AuthError(403, "Host not in room.") + # Synapse asks for 100 events per backfill request. Do not allow more. + limit = min(limit, 100) + events = yield self.store.get_backfill_events(room_id, pdu_list, limit) events = yield filter_events_for_server(self.storage, origin, events) @@ -2168,6 +2171,7 @@ class FederationHandler(BaseHandler): if not in_room: raise AuthError(403, "Host not in room.") + # Only allow up to 20 events to be retrieved per request. limit = min(limit, 20) missing_events = await self.store.get_missing_events( From f4884444c36d92659b9d7a2a90d42324ab786873 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 09:26:57 +0000 Subject: [PATCH 173/213] remove unused room_version_to_event_format (#6857) --- changelog.d/6857.misc | 1 + synapse/events/__init__.py | 24 +----------------------- 2 files changed, 2 insertions(+), 23 deletions(-) create mode 100644 changelog.d/6857.misc diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6857.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 92f76703b3..89d41d82b6 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -21,8 +21,7 @@ import six from unpaddedbase64 import encode_base64 -from synapse.api.errors import UnsupportedRoomVersionError -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions +from synapse.api.room_versions import EventFormatVersions from synapse.types import JsonDict from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -408,27 +407,6 @@ class FrozenEventV3(FrozenEventV2): return self._event_id -def room_version_to_event_format(room_version): - """Converts a room version string to the event format - - Args: - room_version (str) - - Returns: - int - - Raises: - UnsupportedRoomVersionError if the room version is unknown - """ - v = KNOWN_ROOM_VERSIONS.get(room_version) - - if not v: - # this can happen if support is withdrawn for a room version - raise UnsupportedRoomVersionError() - - return v.event_format - - def event_type_from_format_version(format_version): """Returns the python type to use to construct an Event object for the given event format version. From 56ca93ef5941b5dfcda368f373a6bcd80d177acd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 7 Feb 2020 11:29:36 +0100 Subject: [PATCH 174/213] Admin api to add an email address (#6789) --- changelog.d/6769.feature | 1 + docs/admin_api/user_admin_api.rst | 11 +++++++++ synapse/handlers/admin.py | 2 ++ synapse/handlers/auth.py | 8 +++++++ synapse/rest/admin/users.py | 39 +++++++++++++++++++++++++++++++ tests/rest/admin/test_user.py | 19 +++++++++++++-- 6 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6769.feature diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature new file mode 100644 index 0000000000..8a60e12907 --- /dev/null +++ b/changelog.d/6769.feature @@ -0,0 +1 @@ +Admin API to add or modify threepids of user accounts. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 0b3d09d694..eb146095de 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -15,6 +15,16 @@ with a body of: { "password": "user_password", "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], "avatar_url": "", "admin": false, "deactivated": false @@ -23,6 +33,7 @@ with a body of: including an ``access_token`` of a server admin. The parameter ``displayname`` is optional and defaults to ``user_id``. +The parameter ``threepids`` is optional. The parameter ``avatar_url`` is optional. The parameter ``admin`` is optional and defaults to 'false'. The parameter ``deactivated`` is optional and defaults to 'false'. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 9205865231..f3c0aeceb6 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -58,8 +58,10 @@ class AdminHandler(BaseHandler): ret = await self.store.get_user_by_id(user.to_string()) if ret: profile = await self.store.get_profileinfo(user.localpart) + threepids = await self.store.user_get_threepids(user.to_string()) ret["displayname"] = profile.display_name ret["avatar_url"] = profile.avatar_url + ret["threepids"] = threepids return ret async def export_user_data(self, user_id, writer): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 54a71c49d2..48a88d3c2a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -816,6 +816,14 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): + # check if medium has a valid value + if medium not in ["email", "msisdn"]: + raise SynapseError( + code=400, + msg=("'%s' is not a valid value for 'medium'" % (medium,)), + errcode=Codes.INVALID_PARAM, + ) + # 'Canonicalise' email addresses down to lower case. # We've now moving towards the homeserver being the entity that # is responsible for validating threepids used for resetting passwords diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f1c4434f5c..e75c5f1370 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -136,6 +136,8 @@ class UserRestServletV2(RestServlet): self.hs = hs self.auth = hs.get_auth() self.admin_handler = hs.get_handlers().admin_handler + self.store = hs.get_datastore() + self.auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.set_password_handler = hs.get_set_password_handler() self.deactivate_account_handler = hs.get_deactivate_account_handler() @@ -163,6 +165,7 @@ class UserRestServletV2(RestServlet): raise SynapseError(400, "This endpoint can only be used with local users") user = await self.admin_handler.get_user(target_user) + user_id = target_user.to_string() if user: # modify user if "displayname" in body: @@ -170,6 +173,29 @@ class UserRestServletV2(RestServlet): target_user, requester, body["displayname"], True ) + if "threepids" in body: + # check for required parameters for each threepid + for threepid in body["threepids"]: + assert_params_in_dict(threepid, ["medium", "address"]) + + # remove old threepids from user + threepids = await self.store.user_get_threepids(user_id) + for threepid in threepids: + try: + await self.auth_handler.delete_threepid( + user_id, threepid["medium"], threepid["address"], None + ) + except Exception: + logger.exception("Failed to remove threepids") + raise SynapseError(500, "Failed to remove threepids") + + # add new threepids to user + current_time = self.hs.get_clock().time_msec() + for threepid in body["threepids"]: + await self.auth_handler.add_threepid( + user_id, threepid["medium"], threepid["address"], current_time + ) + if "avatar_url" in body: await self.profile_handler.set_avatar_url( target_user, requester, body["avatar_url"], True @@ -221,6 +247,7 @@ class UserRestServletV2(RestServlet): admin = body.get("admin", None) user_type = body.get("user_type", None) displayname = body.get("displayname", None) + threepids = body.get("threepids", None) if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: raise SynapseError(400, "Invalid user type") @@ -232,6 +259,18 @@ class UserRestServletV2(RestServlet): default_display_name=displayname, user_type=user_type, ) + + if "threepids" in body: + # check for required parameters for each threepid + for threepid in body["threepids"]: + assert_params_in_dict(threepid, ["medium", "address"]) + + current_time = self.hs.get_clock().time_msec() + for threepid in body["threepids"]: + await self.auth_handler.add_threepid( + user_id, threepid["medium"], threepid["address"], current_time + ) + if "avatar_url" in body: await self.profile_handler.set_avatar_url( user_id, requester, body["avatar_url"], True diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 8f09f51c61..3b5169b38d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -407,7 +407,13 @@ class UserRestTestCase(unittest.HomeserverTestCase): """ self.hs.config.registration_shared_secret = None - body = json.dumps({"password": "abc123", "admin": True}) + body = json.dumps( + { + "password": "abc123", + "admin": True, + "threepids": [{"medium": "email", "address": "bob@bob.bob"}], + } + ) # Create user request, channel = self.make_request( @@ -421,6 +427,8 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) + self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) # Get user request, channel = self.make_request( @@ -449,7 +457,13 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Modify user - body = json.dumps({"displayname": "foobar", "deactivated": True}) + body = json.dumps( + { + "displayname": "foobar", + "deactivated": True, + "threepids": [{"medium": "email", "address": "bob2@bob.bob"}], + } + ) request, channel = self.make_request( "PUT", @@ -463,6 +477,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) self.assertEqual(True, channel.json_body["deactivated"]) + # the user is deactivated, the threepid will be deleted # Get user request, channel = self.make_request( From de2d267375069c2d22bceb0d6ef9c6f5a77380e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 11:14:19 +0000 Subject: [PATCH 175/213] Allow moving group read APIs to workers (#6866) --- changelog.d/6866.feature | 1 + docs/workers.md | 8 + synapse/app/client_reader.py | 3 + synapse/app/federation_reader.py | 2 + synapse/groups/groups_server.py | 379 ++++----- synapse/handlers/groups_local.py | 270 +++---- synapse/replication/slave/storage/groups.py | 14 +- synapse/server.py | 14 +- .../storage/data_stores/main/group_server.py | 720 +++++++++--------- 9 files changed, 723 insertions(+), 688 deletions(-) create mode 100644 changelog.d/6866.feature diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature new file mode 100644 index 0000000000..256feab6ff --- /dev/null +++ b/changelog.d/6866.feature @@ -0,0 +1 @@ +Add ability to run some group APIs on workers. diff --git a/docs/workers.md b/docs/workers.md index 09a9d8a7b8..82442d6a0a 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -177,8 +177,13 @@ endpoints matching the following regular expressions: ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/send/ + ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query +Additionally, the following REST endpoints can be handled for GET requests: + + ^/_matrix/federation/v1/groups/ + The above endpoints should all be routed to the federation_reader worker by the reverse-proxy configuration. @@ -254,10 +259,13 @@ following regular expressions: ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$ ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ + ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ + ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$ Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$ + ^/_matrix/client/(api/v1|r0|unstable)/groups/.*$ Additionally, the following REST endpoints can be handled, but all requests must be routed to the same instance: diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ca96da6a4a..7fa91a3b11 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import ( RoomStateRestServlet, ) from synapse.rest.client.v1.voip import VoipRestServlet +from synapse.rest.client.v2_alpha import groups from synapse.rest.client.v2_alpha.account import ThreepidRestServlet from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet @@ -124,6 +125,8 @@ class ClientReaderServer(HomeServer): PushRuleRestServlet(self).register(resource) VersionsRestServlet(self).register(resource) + groups.register_servlets(self, resource) + resources.update({"/_matrix/client": resource}) root_resource = create_resource_tree(resources, NoResource()) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 1f1cea1416..5e17ef1396 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -35,6 +35,7 @@ from synapse.replication.slave.storage.account_data import SlavedAccountDataStor from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore @@ -66,6 +67,7 @@ class FederationReaderSlavedStore( SlavedEventStore, SlavedKeyStore, SlavedRegistrationStore, + SlavedGroupServerStore, RoomStore, DirectoryStore, SlavedTransactionStore, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 0ec9be3cb5..c106abae21 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) # TODO: Flairs -class GroupsServerHandler(object): +class GroupsServerWorkerHandler(object): def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() @@ -51,9 +51,6 @@ class GroupsServerHandler(object): self.transport_client = hs.get_federation_transport_client() self.profile_handler = hs.get_profile_handler() - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - @defer.inlineCallbacks def check_group_is_ours( self, group_id, requester_user_id, and_exists=False, and_is_admin=None @@ -167,68 +164,6 @@ class GroupsServerHandler(object): "user": membership_info, } - @defer.inlineCallbacks - def update_group_summary_room( - self, group_id, requester_user_id, room_id, category_id, content - ): - """Add/update a room to the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - RoomID.from_string(room_id) # Ensure valid room id - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_room_to_summary( - group_id=group_id, - room_id=room_id, - category_id=category_id, - order=order, - is_public=is_public, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_summary_room( - self, group_id, requester_user_id, room_id, category_id - ): - """Remove a room from the summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_room_from_summary( - group_id=group_id, room_id=room_id, category_id=category_id - ) - - return {} - - @defer.inlineCallbacks - def set_group_join_policy(self, group_id, requester_user_id, content): - """Sets the group join policy. - - Currently supported policies are: - - "invite": an invite must be received and accepted in order to join. - - "open": anyone can join. - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - join_policy = _parse_join_policy_from_contents(content) - if join_policy is None: - raise SynapseError(400, "No value specified for 'm.join_policy'") - - yield self.store.set_group_join_policy(group_id, join_policy=join_policy) - - return {} - @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): """Get all categories in a group (as seen by user) @@ -248,42 +183,10 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) + logger.info("group %s", res) + return res - @defer.inlineCallbacks - def update_group_category(self, group_id, requester_user_id, category_id, content): - """Add/Update a group category - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - profile = content.get("profile") - - yield self.store.upsert_group_category( - group_id=group_id, - category_id=category_id, - is_public=is_public, - profile=profile, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_category(self, group_id, requester_user_id, category_id): - """Delete a group category - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_group_category( - group_id=group_id, category_id=category_id - ) - - return {} - @defer.inlineCallbacks def get_group_roles(self, group_id, requester_user_id): """Get all roles in a group (as seen by user) @@ -302,74 +205,6 @@ class GroupsServerHandler(object): res = yield self.store.get_group_role(group_id=group_id, role_id=role_id) return res - @defer.inlineCallbacks - def update_group_role(self, group_id, requester_user_id, role_id, content): - """Add/update a role in a group - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - - profile = content.get("profile") - - yield self.store.upsert_group_role( - group_id=group_id, role_id=role_id, is_public=is_public, profile=profile - ) - - return {} - - @defer.inlineCallbacks - def delete_group_role(self, group_id, requester_user_id, role_id): - """Remove role from group - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_group_role(group_id=group_id, role_id=role_id) - - return {} - - @defer.inlineCallbacks - def update_group_summary_user( - self, group_id, requester_user_id, user_id, role_id, content - ): - """Add/update a users entry in the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - yield self.store.add_user_to_summary( - group_id=group_id, - user_id=user_id, - role_id=role_id, - order=order, - is_public=is_public, - ) - - return {} - - @defer.inlineCallbacks - def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): - """Remove a user from the group summary - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - yield self.store.remove_user_from_summary( - group_id=group_id, user_id=user_id, role_id=role_id - ) - - return {} - @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): """Get the group profile as seen by requester_user_id @@ -394,24 +229,6 @@ class GroupsServerHandler(object): else: raise SynapseError(404, "Unknown group") - @defer.inlineCallbacks - def update_group_profile(self, group_id, requester_user_id, content): - """Update the group profile - """ - yield self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - profile = {} - for keyname in ("name", "avatar_url", "short_description", "long_description"): - if keyname in content: - value = content[keyname] - if not isinstance(value, string_types): - raise SynapseError(400, "%r value is not a string" % (keyname,)) - profile[keyname] = value - - yield self.store.update_group_profile(group_id, profile) - @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): """Get the users in group as seen by requester_user_id. @@ -530,6 +347,196 @@ class GroupsServerHandler(object): return {"chunk": chunk, "total_room_count_estimate": len(room_results)} + +class GroupsServerHandler(GroupsServerWorkerHandler): + def __init__(self, hs): + super(GroupsServerHandler, self).__init__(hs) + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + @defer.inlineCallbacks + def update_group_summary_room( + self, group_id, requester_user_id, room_id, category_id, content + ): + """Add/update a room to the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + RoomID.from_string(room_id) # Ensure valid room id + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_room_to_summary( + group_id=group_id, + room_id=room_id, + category_id=category_id, + order=order, + is_public=is_public, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_summary_room( + self, group_id, requester_user_id, room_id, category_id + ): + """Remove a room from the summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_room_from_summary( + group_id=group_id, room_id=room_id, category_id=category_id + ) + + return {} + + @defer.inlineCallbacks + def set_group_join_policy(self, group_id, requester_user_id, content): + """Sets the group join policy. + + Currently supported policies are: + - "invite": an invite must be received and accepted in order to join. + - "open": anyone can join. + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + join_policy = _parse_join_policy_from_contents(content) + if join_policy is None: + raise SynapseError(400, "No value specified for 'm.join_policy'") + + yield self.store.set_group_join_policy(group_id, join_policy=join_policy) + + return {} + + @defer.inlineCallbacks + def update_group_category(self, group_id, requester_user_id, category_id, content): + """Add/Update a group category + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_public = _parse_visibility_from_contents(content) + profile = content.get("profile") + + yield self.store.upsert_group_category( + group_id=group_id, + category_id=category_id, + is_public=is_public, + profile=profile, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_category(self, group_id, requester_user_id, category_id): + """Delete a group category + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_group_category( + group_id=group_id, category_id=category_id + ) + + return {} + + @defer.inlineCallbacks + def update_group_role(self, group_id, requester_user_id, role_id, content): + """Add/update a role in a group + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + is_public = _parse_visibility_from_contents(content) + + profile = content.get("profile") + + yield self.store.upsert_group_role( + group_id=group_id, role_id=role_id, is_public=is_public, profile=profile + ) + + return {} + + @defer.inlineCallbacks + def delete_group_role(self, group_id, requester_user_id, role_id): + """Remove role from group + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_group_role(group_id=group_id, role_id=role_id) + + return {} + + @defer.inlineCallbacks + def update_group_summary_user( + self, group_id, requester_user_id, user_id, role_id, content + ): + """Add/update a users entry in the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + order = content.get("order", None) + + is_public = _parse_visibility_from_contents(content) + + yield self.store.add_user_to_summary( + group_id=group_id, + user_id=user_id, + role_id=role_id, + order=order, + is_public=is_public, + ) + + return {} + + @defer.inlineCallbacks + def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): + """Remove a user from the group summary + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + yield self.store.remove_user_from_summary( + group_id=group_id, user_id=user_id, role_id=role_id + ) + + return {} + + @defer.inlineCallbacks + def update_group_profile(self, group_id, requester_user_id, content): + """Update the group profile + """ + yield self.check_group_is_ours( + group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id + ) + + profile = {} + for keyname in ("name", "avatar_url", "short_description", "long_description"): + if keyname in content: + value = content[keyname] + if not isinstance(value, string_types): + raise SynapseError(400, "%r value is not a string" % (keyname,)) + profile[keyname] = value + + yield self.store.update_group_profile(group_id, profile) + @defer.inlineCallbacks def add_room_to_group(self, group_id, requester_user_id, room_id, content): """Add room to group diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 319565510f..ad22415782 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -63,7 +63,7 @@ def _create_rerouter(func_name): return f -class GroupsLocalHandler(object): +class GroupsLocalWorkerHandler(object): def __init__(self, hs): self.hs = hs self.store = hs.get_datastore() @@ -81,40 +81,17 @@ class GroupsLocalHandler(object): self.profile_handler = hs.get_profile_handler() - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - # The following functions merely route the query to the local groups server # or federation depending on if the group is local or remote get_group_profile = _create_rerouter("get_group_profile") - update_group_profile = _create_rerouter("update_group_profile") get_rooms_in_group = _create_rerouter("get_rooms_in_group") - get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") - - add_room_to_group = _create_rerouter("add_room_to_group") - update_room_in_group = _create_rerouter("update_room_in_group") - remove_room_from_group = _create_rerouter("remove_room_from_group") - - update_group_summary_room = _create_rerouter("update_group_summary_room") - delete_group_summary_room = _create_rerouter("delete_group_summary_room") - - update_group_category = _create_rerouter("update_group_category") - delete_group_category = _create_rerouter("delete_group_category") get_group_category = _create_rerouter("get_group_category") get_group_categories = _create_rerouter("get_group_categories") - - update_group_summary_user = _create_rerouter("update_group_summary_user") - delete_group_summary_user = _create_rerouter("delete_group_summary_user") - - update_group_role = _create_rerouter("update_group_role") - delete_group_role = _create_rerouter("delete_group_role") get_group_role = _create_rerouter("get_group_role") get_group_roles = _create_rerouter("get_group_roles") - set_group_join_policy = _create_rerouter("set_group_join_policy") - @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): """Get the group summary for a group. @@ -169,6 +146,144 @@ class GroupsLocalHandler(object): return res + @defer.inlineCallbacks + def get_users_in_group(self, group_id, requester_user_id): + """Get users in a group + """ + if self.is_mine_id(group_id): + res = yield self.groups_server_handler.get_users_in_group( + group_id, requester_user_id + ) + return res + + group_server_name = get_domain_from_id(group_id) + + try: + res = yield self.transport_client.get_users_in_group( + get_domain_from_id(group_id), group_id, requester_user_id + ) + except HttpResponseException as e: + raise e.to_synapse_error() + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") + + chunk = res["chunk"] + valid_entries = [] + for entry in chunk: + g_user_id = entry["user_id"] + attestation = entry.pop("attestation", {}) + try: + if get_domain_from_id(g_user_id) != group_server_name: + yield self.attestations.verify_attestation( + attestation, + group_id=group_id, + user_id=g_user_id, + server_name=get_domain_from_id(g_user_id), + ) + valid_entries.append(entry) + except Exception as e: + logger.info("Failed to verify user is in group: %s", e) + + res["chunk"] = valid_entries + + return res + + @defer.inlineCallbacks + def get_joined_groups(self, user_id): + group_ids = yield self.store.get_joined_groups(user_id) + return {"groups": group_ids} + + @defer.inlineCallbacks + def get_publicised_groups_for_user(self, user_id): + if self.hs.is_mine_id(user_id): + result = yield self.store.get_publicised_groups_for_user(user_id) + + # Check AS associated groups for this user - this depends on the + # RegExps in the AS registration file (under `users`) + for app_service in self.store.get_app_services(): + result.extend(app_service.get_groups_for_user(user_id)) + + return {"groups": result} + else: + try: + bulk_result = yield self.transport_client.bulk_get_publicised_groups( + get_domain_from_id(user_id), [user_id] + ) + except HttpResponseException as e: + raise e.to_synapse_error() + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") + + result = bulk_result.get("users", {}).get(user_id) + # TODO: Verify attestations + return {"groups": result} + + @defer.inlineCallbacks + def bulk_get_publicised_groups(self, user_ids, proxy=True): + destinations = {} + local_users = set() + + for user_id in user_ids: + if self.hs.is_mine_id(user_id): + local_users.add(user_id) + else: + destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id) + + if not proxy and destinations: + raise SynapseError(400, "Some user_ids are not local") + + results = {} + failed_results = [] + for destination, dest_user_ids in iteritems(destinations): + try: + r = yield self.transport_client.bulk_get_publicised_groups( + destination, list(dest_user_ids) + ) + results.update(r["users"]) + except Exception: + failed_results.extend(dest_user_ids) + + for uid in local_users: + results[uid] = yield self.store.get_publicised_groups_for_user(uid) + + # Check AS associated groups for this user - this depends on the + # RegExps in the AS registration file (under `users`) + for app_service in self.store.get_app_services(): + results[uid].extend(app_service.get_groups_for_user(uid)) + + return {"users": results} + + +class GroupsLocalHandler(GroupsLocalWorkerHandler): + def __init__(self, hs): + super(GroupsLocalHandler, self).__init__(hs) + + # Ensure attestations get renewed + hs.get_groups_attestation_renewer() + + # The following functions merely route the query to the local groups server + # or federation depending on if the group is local or remote + + update_group_profile = _create_rerouter("update_group_profile") + + add_room_to_group = _create_rerouter("add_room_to_group") + update_room_in_group = _create_rerouter("update_room_in_group") + remove_room_from_group = _create_rerouter("remove_room_from_group") + + update_group_summary_room = _create_rerouter("update_group_summary_room") + delete_group_summary_room = _create_rerouter("delete_group_summary_room") + + update_group_category = _create_rerouter("update_group_category") + delete_group_category = _create_rerouter("delete_group_category") + + update_group_summary_user = _create_rerouter("update_group_summary_user") + delete_group_summary_user = _create_rerouter("delete_group_summary_user") + + update_group_role = _create_rerouter("update_group_role") + delete_group_role = _create_rerouter("delete_group_role") + + set_group_join_policy = _create_rerouter("set_group_join_policy") + @defer.inlineCallbacks def create_group(self, group_id, user_id, content): """Create a group @@ -219,48 +334,6 @@ class GroupsLocalHandler(object): return res - @defer.inlineCallbacks - def get_users_in_group(self, group_id, requester_user_id): - """Get users in a group - """ - if self.is_mine_id(group_id): - res = yield self.groups_server_handler.get_users_in_group( - group_id, requester_user_id - ) - return res - - group_server_name = get_domain_from_id(group_id) - - try: - res = yield self.transport_client.get_users_in_group( - get_domain_from_id(group_id), group_id, requester_user_id - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - chunk = res["chunk"] - valid_entries = [] - for entry in chunk: - g_user_id = entry["user_id"] - attestation = entry.pop("attestation", {}) - try: - if get_domain_from_id(g_user_id) != group_server_name: - yield self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - server_name=get_domain_from_id(g_user_id), - ) - valid_entries.append(entry) - except Exception as e: - logger.info("Failed to verify user is in group: %s", e) - - res["chunk"] = valid_entries - - return res - @defer.inlineCallbacks def join_group(self, group_id, user_id, content): """Request to join a group @@ -452,68 +525,3 @@ class GroupsLocalHandler(object): group_id, user_id, membership="leave" ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - - @defer.inlineCallbacks - def get_joined_groups(self, user_id): - group_ids = yield self.store.get_joined_groups(user_id) - return {"groups": group_ids} - - @defer.inlineCallbacks - def get_publicised_groups_for_user(self, user_id): - if self.hs.is_mine_id(user_id): - result = yield self.store.get_publicised_groups_for_user(user_id) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - result.extend(app_service.get_groups_for_user(user_id)) - - return {"groups": result} - else: - try: - bulk_result = yield self.transport_client.bulk_get_publicised_groups( - get_domain_from_id(user_id), [user_id] - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - result = bulk_result.get("users", {}).get(user_id) - # TODO: Verify attestations - return {"groups": result} - - @defer.inlineCallbacks - def bulk_get_publicised_groups(self, user_ids, proxy=True): - destinations = {} - local_users = set() - - for user_id in user_ids: - if self.hs.is_mine_id(user_id): - local_users.add(user_id) - else: - destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id) - - if not proxy and destinations: - raise SynapseError(400, "Some user_ids are not local") - - results = {} - failed_results = [] - for destination, dest_user_ids in iteritems(destinations): - try: - r = yield self.transport_client.bulk_get_publicised_groups( - destination, list(dest_user_ids) - ) - results.update(r["users"]) - except Exception: - failed_results.extend(dest_user_ids) - - for uid in local_users: - results[uid] = yield self.store.get_publicised_groups_for_user(uid) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - results[uid].extend(app_service.get_groups_for_user(uid)) - - return {"users": results} diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 69a4ae42f9..2d4fd08cf5 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -13,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage import DataStore +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker +from synapse.storage.data_stores.main.group_server import GroupServerWorkerStore from synapse.storage.database import Database from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore, __func__ -from ._slaved_id_tracker import SlavedIdTracker - -class SlavedGroupServerStore(BaseSlavedStore): +class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore): def __init__(self, database: Database, db_conn, hs): super(SlavedGroupServerStore, self).__init__(database, db_conn, hs) @@ -35,9 +34,8 @@ class SlavedGroupServerStore(BaseSlavedStore): self._group_updates_id_gen.get_current_token(), ) - get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user) - get_group_stream_token = __func__(DataStore.get_group_stream_token) - get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user) + def get_group_stream_token(self): + return self._group_updates_id_gen.get_current_token() def stream_positions(self): result = super(SlavedGroupServerStore, self).stream_positions() diff --git a/synapse/server.py b/synapse/server.py index 7926867b77..fd2f69e928 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -50,7 +50,7 @@ from synapse.federation.send_queue import FederationRemoteSendQueue from synapse.federation.sender import FederationSender from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer -from synapse.groups.groups_server import GroupsServerHandler +from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler from synapse.handlers import Handlers from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler @@ -62,7 +62,7 @@ from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler from synapse.handlers.events import EventHandler, EventStreamHandler -from synapse.handlers.groups_local import GroupsLocalHandler +from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler @@ -460,10 +460,16 @@ class HomeServer(object): return UserDirectoryHandler(self) def build_groups_local_handler(self): - return GroupsLocalHandler(self) + if self.config.worker_app: + return GroupsLocalWorkerHandler(self) + else: + return GroupsLocalHandler(self) def build_groups_server_handler(self): - return GroupsServerHandler(self) + if self.config.worker_app: + return GroupsServerWorkerHandler(self) + else: + return GroupsServerHandler(self) def build_groups_attestation_signing(self): return GroupAttestationSigning(self) diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py index 6acd45e9f3..0963e6c250 100644 --- a/synapse/storage/data_stores/main/group_server.py +++ b/synapse/storage/data_stores/main/group_server.py @@ -27,21 +27,7 @@ _DEFAULT_CATEGORY_ID = "" _DEFAULT_ROLE_ID = "" -class GroupServerStore(SQLBaseStore): - def set_group_join_policy(self, group_id, join_policy): - """Set the join policy of a group. - - join_policy can be one of: - * "invite" - * "open" - """ - return self.db.simple_update_one( - table="groups", - keyvalues={"group_id": group_id}, - updatevalues={"join_policy": join_policy}, - desc="set_group_join_policy", - ) - +class GroupServerWorkerStore(SQLBaseStore): def get_group(self, group_id): return self.db.simple_select_one( table="groups", @@ -157,6 +143,366 @@ class GroupServerStore(SQLBaseStore): "get_rooms_for_summary", _get_rooms_for_summary_txn ) + @defer.inlineCallbacks + def get_group_categories(self, group_id): + rows = yield self.db.simple_select_list( + table="group_room_categories", + keyvalues={"group_id": group_id}, + retcols=("category_id", "is_public", "profile"), + desc="get_group_categories", + ) + + return { + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + } + + @defer.inlineCallbacks + def get_group_category(self, group_id, category_id): + category = yield self.db.simple_select_one( + table="group_room_categories", + keyvalues={"group_id": group_id, "category_id": category_id}, + retcols=("is_public", "profile"), + desc="get_group_category", + ) + + category["profile"] = json.loads(category["profile"]) + + return category + + @defer.inlineCallbacks + def get_group_roles(self, group_id): + rows = yield self.db.simple_select_list( + table="group_roles", + keyvalues={"group_id": group_id}, + retcols=("role_id", "is_public", "profile"), + desc="get_group_roles", + ) + + return { + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), + } + for row in rows + } + + @defer.inlineCallbacks + def get_group_role(self, group_id, role_id): + role = yield self.db.simple_select_one( + table="group_roles", + keyvalues={"group_id": group_id, "role_id": role_id}, + retcols=("is_public", "profile"), + desc="get_group_role", + ) + + role["profile"] = json.loads(role["profile"]) + + return role + + def get_local_groups_for_room(self, room_id): + """Get all of the local group that contain a given room + Args: + room_id (str): The ID of a room + Returns: + Deferred[list[str]]: A twisted.Deferred containing a list of group ids + containing this room + """ + return self.db.simple_select_onecol( + table="group_rooms", + keyvalues={"room_id": room_id}, + retcol="group_id", + desc="get_local_groups_for_room", + ) + + def get_users_for_summary_by_role(self, group_id, include_private=False): + """Get the users and roles that should be included in a summary request + + Returns ([users], [roles]) + """ + + def _get_users_for_summary_txn(txn): + keyvalues = {"group_id": group_id} + if not include_private: + keyvalues["is_public"] = True + + sql = """ + SELECT user_id, is_public, role_id, user_order + FROM group_summary_users + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + users = [ + { + "user_id": row[0], + "is_public": row[1], + "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None, + "order": row[3], + } + for row in txn + ] + + sql = """ + SELECT role_id, is_public, profile, role_order + FROM group_summary_roles + INNER JOIN group_roles USING (group_id, role_id) + WHERE group_id = ? + """ + + if not include_private: + sql += " AND is_public = ?" + txn.execute(sql, (group_id, True)) + else: + txn.execute(sql, (group_id,)) + + roles = { + row[0]: { + "is_public": row[1], + "profile": json.loads(row[2]), + "order": row[3], + } + for row in txn + } + + return users, roles + + return self.db.runInteraction( + "get_users_for_summary_by_role", _get_users_for_summary_txn + ) + + def is_user_in_group(self, user_id, group_id): + return self.db.simple_select_one_onecol( + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + allow_none=True, + desc="is_user_in_group", + ).addCallback(lambda r: bool(r)) + + def is_user_admin_in_group(self, group_id, user_id): + return self.db.simple_select_one_onecol( + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="is_admin", + allow_none=True, + desc="is_user_admin_in_group", + ) + + def is_user_invited_to_local_group(self, group_id, user_id): + """Has the group server invited a user? + """ + return self.db.simple_select_one_onecol( + table="group_invites", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + desc="is_user_invited_to_local_group", + allow_none=True, + ) + + def get_users_membership_info_in_group(self, group_id, user_id): + """Get a dict describing the membership of a user in a group. + + Example if joined: + + { + "membership": "join", + "is_public": True, + "is_privileged": False, + } + + Returns an empty dict if the user is not join/invite/etc + """ + + def _get_users_membership_in_group_txn(txn): + row = self.db.simple_select_one_txn( + txn, + table="group_users", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcols=("is_admin", "is_public"), + allow_none=True, + ) + + if row: + return { + "membership": "join", + "is_public": row["is_public"], + "is_privileged": row["is_admin"], + } + + row = self.db.simple_select_one_onecol_txn( + txn, + table="group_invites", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcol="user_id", + allow_none=True, + ) + + if row: + return {"membership": "invite"} + + return {} + + return self.db.runInteraction( + "get_users_membership_info_in_group", _get_users_membership_in_group_txn + ) + + def get_publicised_groups_for_user(self, user_id): + """Get all groups a user is publicising + """ + return self.db.simple_select_onecol( + table="local_group_membership", + keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True}, + retcol="group_id", + desc="get_publicised_groups_for_user", + ) + + def get_attestations_need_renewals(self, valid_until_ms): + """Get all attestations that need to be renewed until givent time + """ + + def _get_attestations_need_renewals_txn(txn): + sql = """ + SELECT group_id, user_id FROM group_attestations_renewals + WHERE valid_until_ms <= ? + """ + txn.execute(sql, (valid_until_ms,)) + return self.db.cursor_to_dict(txn) + + return self.db.runInteraction( + "get_attestations_need_renewals", _get_attestations_need_renewals_txn + ) + + @defer.inlineCallbacks + def get_remote_attestation(self, group_id, user_id): + """Get the attestation that proves the remote agrees that the user is + in the group. + """ + row = yield self.db.simple_select_one( + table="group_attestations_remote", + keyvalues={"group_id": group_id, "user_id": user_id}, + retcols=("valid_until_ms", "attestation_json"), + desc="get_remote_attestation", + allow_none=True, + ) + + now = int(self._clock.time_msec()) + if row and now < row["valid_until_ms"]: + return json.loads(row["attestation_json"]) + + return None + + def get_joined_groups(self, user_id): + return self.db.simple_select_onecol( + table="local_group_membership", + keyvalues={"user_id": user_id, "membership": "join"}, + retcol="group_id", + desc="get_joined_groups", + ) + + def get_all_groups_for_user(self, user_id, now_token): + def _get_all_groups_for_user_txn(txn): + sql = """ + SELECT group_id, type, membership, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND membership != 'leave' + AND stream_id <= ? + """ + txn.execute(sql, (user_id, now_token)) + return [ + { + "group_id": row[0], + "type": row[1], + "membership": row[2], + "content": json.loads(row[3]), + } + for row in txn + ] + + return self.db.runInteraction( + "get_all_groups_for_user", _get_all_groups_for_user_txn + ) + + def get_groups_changes_for_user(self, user_id, from_token, to_token): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_entity_changed( + user_id, from_token + ) + if not has_changed: + return defer.succeed([]) + + def _get_groups_changes_for_user_txn(txn): + sql = """ + SELECT group_id, membership, type, u.content + FROM local_group_updates AS u + INNER JOIN local_group_membership USING (group_id, user_id) + WHERE user_id = ? AND ? < stream_id AND stream_id <= ? + """ + txn.execute(sql, (user_id, from_token, to_token)) + return [ + { + "group_id": group_id, + "membership": membership, + "type": gtype, + "content": json.loads(content_json), + } + for group_id, membership, gtype, content_json in txn + ] + + return self.db.runInteraction( + "get_groups_changes_for_user", _get_groups_changes_for_user_txn + ) + + def get_all_groups_changes(self, from_token, to_token, limit): + from_token = int(from_token) + has_changed = self._group_updates_stream_cache.has_any_entity_changed( + from_token + ) + if not has_changed: + return defer.succeed([]) + + def _get_all_groups_changes_txn(txn): + sql = """ + SELECT stream_id, group_id, user_id, type, content + FROM local_group_updates + WHERE ? < stream_id AND stream_id <= ? + LIMIT ? + """ + txn.execute(sql, (from_token, to_token, limit)) + return [ + (stream_id, group_id, user_id, gtype, json.loads(content_json)) + for stream_id, group_id, user_id, gtype, content_json in txn + ] + + return self.db.runInteraction( + "get_all_groups_changes", _get_all_groups_changes_txn + ) + + +class GroupServerStore(GroupServerWorkerStore): + def set_group_join_policy(self, group_id, join_policy): + """Set the join policy of a group. + + join_policy can be one of: + * "invite" + * "open" + """ + return self.db.simple_update_one( + table="groups", + keyvalues={"group_id": group_id}, + updatevalues={"join_policy": join_policy}, + desc="set_group_join_policy", + ) + def add_room_to_summary(self, group_id, room_id, category_id, order, is_public): return self.db.runInteraction( "add_room_to_summary", @@ -299,36 +645,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_room_from_summary", ) - @defer.inlineCallbacks - def get_group_categories(self, group_id): - rows = yield self.db.simple_select_list( - table="group_room_categories", - keyvalues={"group_id": group_id}, - retcols=("category_id", "is_public", "profile"), - desc="get_group_categories", - ) - - return { - row["category_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows - } - - @defer.inlineCallbacks - def get_group_category(self, group_id, category_id): - category = yield self.db.simple_select_one( - table="group_room_categories", - keyvalues={"group_id": group_id, "category_id": category_id}, - retcols=("is_public", "profile"), - desc="get_group_category", - ) - - category["profile"] = json.loads(category["profile"]) - - return category - def upsert_group_category(self, group_id, category_id, profile, is_public): """Add/update room category for group """ @@ -360,36 +676,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_group_category", ) - @defer.inlineCallbacks - def get_group_roles(self, group_id): - rows = yield self.db.simple_select_list( - table="group_roles", - keyvalues={"group_id": group_id}, - retcols=("role_id", "is_public", "profile"), - desc="get_group_roles", - ) - - return { - row["role_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows - } - - @defer.inlineCallbacks - def get_group_role(self, group_id, role_id): - role = yield self.db.simple_select_one( - table="group_roles", - keyvalues={"group_id": group_id, "role_id": role_id}, - retcols=("is_public", "profile"), - desc="get_group_role", - ) - - role["profile"] = json.loads(role["profile"]) - - return role - def upsert_group_role(self, group_id, role_id, profile, is_public): """Add/remove user role """ @@ -555,100 +841,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_user_from_summary", ) - def get_local_groups_for_room(self, room_id): - """Get all of the local group that contain a given room - Args: - room_id (str): The ID of a room - Returns: - Deferred[list[str]]: A twisted.Deferred containing a list of group ids - containing this room - """ - return self.db.simple_select_onecol( - table="group_rooms", - keyvalues={"room_id": room_id}, - retcol="group_id", - desc="get_local_groups_for_room", - ) - - def get_users_for_summary_by_role(self, group_id, include_private=False): - """Get the users and roles that should be included in a summary request - - Returns ([users], [roles]) - """ - - def _get_users_for_summary_txn(txn): - keyvalues = {"group_id": group_id} - if not include_private: - keyvalues["is_public"] = True - - sql = """ - SELECT user_id, is_public, role_id, user_order - FROM group_summary_users - WHERE group_id = ? - """ - - if not include_private: - sql += " AND is_public = ?" - txn.execute(sql, (group_id, True)) - else: - txn.execute(sql, (group_id,)) - - users = [ - { - "user_id": row[0], - "is_public": row[1], - "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None, - "order": row[3], - } - for row in txn - ] - - sql = """ - SELECT role_id, is_public, profile, role_order - FROM group_summary_roles - INNER JOIN group_roles USING (group_id, role_id) - WHERE group_id = ? - """ - - if not include_private: - sql += " AND is_public = ?" - txn.execute(sql, (group_id, True)) - else: - txn.execute(sql, (group_id,)) - - roles = { - row[0]: { - "is_public": row[1], - "profile": json.loads(row[2]), - "order": row[3], - } - for row in txn - } - - return users, roles - - return self.db.runInteraction( - "get_users_for_summary_by_role", _get_users_for_summary_txn - ) - - def is_user_in_group(self, user_id, group_id): - return self.db.simple_select_one_onecol( - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - allow_none=True, - desc="is_user_in_group", - ).addCallback(lambda r: bool(r)) - - def is_user_admin_in_group(self, group_id, user_id): - return self.db.simple_select_one_onecol( - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="is_admin", - allow_none=True, - desc="is_user_admin_in_group", - ) - def add_group_invite(self, group_id, user_id): """Record that the group server has invited a user """ @@ -658,64 +850,6 @@ class GroupServerStore(SQLBaseStore): desc="add_group_invite", ) - def is_user_invited_to_local_group(self, group_id, user_id): - """Has the group server invited a user? - """ - return self.db.simple_select_one_onecol( - table="group_invites", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - desc="is_user_invited_to_local_group", - allow_none=True, - ) - - def get_users_membership_info_in_group(self, group_id, user_id): - """Get a dict describing the membership of a user in a group. - - Example if joined: - - { - "membership": "join", - "is_public": True, - "is_privileged": False, - } - - Returns an empty dict if the user is not join/invite/etc - """ - - def _get_users_membership_in_group_txn(txn): - row = self.db.simple_select_one_txn( - txn, - table="group_users", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcols=("is_admin", "is_public"), - allow_none=True, - ) - - if row: - return { - "membership": "join", - "is_public": row["is_public"], - "is_privileged": row["is_admin"], - } - - row = self.db.simple_select_one_onecol_txn( - txn, - table="group_invites", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcol="user_id", - allow_none=True, - ) - - if row: - return {"membership": "invite"} - - return {} - - return self.db.runInteraction( - "get_users_membership_info_in_group", _get_users_membership_in_group_txn - ) - def add_user_to_group( self, group_id, @@ -846,16 +980,6 @@ class GroupServerStore(SQLBaseStore): "remove_room_from_group", _remove_room_from_group_txn ) - def get_publicised_groups_for_user(self, user_id): - """Get all groups a user is publicising - """ - return self.db.simple_select_onecol( - table="local_group_membership", - keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True}, - retcol="group_id", - desc="get_publicised_groups_for_user", - ) - def update_group_publicity(self, group_id, user_id, publicise): """Update whether the user is publicising their membership of the group """ @@ -1000,22 +1124,6 @@ class GroupServerStore(SQLBaseStore): desc="update_group_profile", ) - def get_attestations_need_renewals(self, valid_until_ms): - """Get all attestations that need to be renewed until givent time - """ - - def _get_attestations_need_renewals_txn(txn): - sql = """ - SELECT group_id, user_id FROM group_attestations_renewals - WHERE valid_until_ms <= ? - """ - txn.execute(sql, (valid_until_ms,)) - return self.db.cursor_to_dict(txn) - - return self.db.runInteraction( - "get_attestations_need_renewals", _get_attestations_need_renewals_txn - ) - def update_attestation_renewal(self, group_id, user_id, attestation): """Update an attestation that we have renewed """ @@ -1054,112 +1162,6 @@ class GroupServerStore(SQLBaseStore): desc="remove_attestation_renewal", ) - @defer.inlineCallbacks - def get_remote_attestation(self, group_id, user_id): - """Get the attestation that proves the remote agrees that the user is - in the group. - """ - row = yield self.db.simple_select_one( - table="group_attestations_remote", - keyvalues={"group_id": group_id, "user_id": user_id}, - retcols=("valid_until_ms", "attestation_json"), - desc="get_remote_attestation", - allow_none=True, - ) - - now = int(self._clock.time_msec()) - if row and now < row["valid_until_ms"]: - return json.loads(row["attestation_json"]) - - return None - - def get_joined_groups(self, user_id): - return self.db.simple_select_onecol( - table="local_group_membership", - keyvalues={"user_id": user_id, "membership": "join"}, - retcol="group_id", - desc="get_joined_groups", - ) - - def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): - sql = """ - SELECT group_id, type, membership, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND membership != 'leave' - AND stream_id <= ? - """ - txn.execute(sql, (user_id, now_token)) - return [ - { - "group_id": row[0], - "type": row[1], - "membership": row[2], - "content": json.loads(row[3]), - } - for row in txn - ] - - return self.db.runInteraction( - "get_all_groups_for_user", _get_all_groups_for_user_txn - ) - - def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( - user_id, from_token - ) - if not has_changed: - return defer.succeed([]) - - def _get_groups_changes_for_user_txn(txn): - sql = """ - SELECT group_id, membership, type, u.content - FROM local_group_updates AS u - INNER JOIN local_group_membership USING (group_id, user_id) - WHERE user_id = ? AND ? < stream_id AND stream_id <= ? - """ - txn.execute(sql, (user_id, from_token, to_token)) - return [ - { - "group_id": group_id, - "membership": membership, - "type": gtype, - "content": json.loads(content_json), - } - for group_id, membership, gtype, content_json in txn - ] - - return self.db.runInteraction( - "get_groups_changes_for_user", _get_groups_changes_for_user_txn - ) - - def get_all_groups_changes(self, from_token, to_token, limit): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_any_entity_changed( - from_token - ) - if not has_changed: - return defer.succeed([]) - - def _get_all_groups_changes_txn(txn): - sql = """ - SELECT stream_id, group_id, user_id, type, content - FROM local_group_updates - WHERE ? < stream_id AND stream_id <= ? - LIMIT ? - """ - txn.execute(sql, (from_token, to_token, limit)) - return [ - (stream_id, group_id, user_id, gtype, json.loads(content_json)) - for stream_id, group_id, user_id, gtype, content_json in txn - ] - - return self.db.runInteraction( - "get_all_groups_changes", _get_all_groups_changes_txn - ) - def get_group_stream_token(self): return self._group_updates_id_gen.get_current_token() From b08b0a22d505b1555f511e3f38935a62930ea25d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 13:56:38 +0000 Subject: [PATCH 176/213] Add typing to synapse.federation.sender (#6871) --- changelog.d/6871.misc | 1 + synapse/federation/federation_server.py | 7 +- synapse/federation/sender/__init__.py | 99 +++++++++---------- .../sender/per_destination_queue.py | 88 +++++++++-------- .../federation/sender/transaction_manager.py | 16 +-- synapse/federation/units.py | 23 ++++- synapse/server.pyi | 2 + tests/handlers/test_typing.py | 8 +- tox.ini | 1 + 9 files changed, 138 insertions(+), 107 deletions(-) create mode 100644 changelog.d/6871.misc diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc new file mode 100644 index 0000000000..5161af9983 --- /dev/null +++ b/changelog.d/6871.misc @@ -0,0 +1 @@ +Add typing to `synapse.federation.sender` and port to async/await. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2489832a11..a6c966a393 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -294,7 +294,12 @@ class FederationServer(FederationBase): async def _process_edu(edu_dict): received_edus_counter.inc() - edu = Edu(**edu_dict) + edu = Edu( + origin=origin, + destination=self.server_name, + edu_type=edu_dict["edu_type"], + content=edu_dict["content"], + ) await self.registry.on_edu(edu.edu_type, origin, edu.content) await concurrently_execute( diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 36c83c3027..233cb33daf 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Dict, Hashable, Iterable, List, Optional, Set from six import itervalues @@ -23,6 +24,7 @@ from twisted.internet import defer import synapse import synapse.metrics +from synapse.events import EventBase from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu @@ -39,6 +41,8 @@ from synapse.metrics import ( events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.presence import UserPresenceState +from synapse.types import ReadReceipt from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) @@ -68,7 +72,7 @@ class FederationSender(object): self._transaction_manager = TransactionManager(hs) # map from destination to PerDestinationQueue - self._per_destination_queues = {} # type: dict[str, PerDestinationQueue] + self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue] LaterGauge( "synapse_federation_transaction_queue_pending_destinations", @@ -84,7 +88,7 @@ class FederationSender(object): # Map of user_id -> UserPresenceState for all the pending presence # to be sent out by user_id. Entries here get processed and put in # pending_presence_by_dest - self.pending_presence = {} + self.pending_presence = {} # type: Dict[str, UserPresenceState] LaterGauge( "synapse_federation_transaction_queue_pending_pdus", @@ -116,20 +120,17 @@ class FederationSender(object): # and that there is a pending call to _flush_rrs_for_room in the system. self._queues_awaiting_rr_flush_by_room = ( {} - ) # type: dict[str, set[PerDestinationQueue]] + ) # type: Dict[str, Set[PerDestinationQueue]] self._rr_txn_interval_per_room_ms = ( - 1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second + 1000.0 / hs.config.federation_rr_transactions_per_room_per_second ) - def _get_per_destination_queue(self, destination): + def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination Args: - destination (str): server_name of remote server - - Returns: - PerDestinationQueue + destination: server_name of remote server """ queue = self._per_destination_queues.get(destination) if not queue: @@ -137,7 +138,7 @@ class FederationSender(object): self._per_destination_queues[destination] = queue return queue - def notify_new_events(self, current_id): + def notify_new_events(self, current_id: int) -> None: """This gets called when we have some new events we might want to send out to other servers. """ @@ -151,13 +152,12 @@ class FederationSender(object): "process_event_queue_for_federation", self._process_event_queue_loop ) - @defer.inlineCallbacks - def _process_event_queue_loop(self): + async def _process_event_queue_loop(self) -> None: try: self._is_processing = True while True: - last_token = yield self.store.get_federation_out_pos("events") - next_token, events = yield self.store.get_all_new_events_stream( + last_token = await self.store.get_federation_out_pos("events") + next_token, events = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 ) @@ -166,8 +166,7 @@ class FederationSender(object): if not events and next_token >= self._last_poked_id: break - @defer.inlineCallbacks - def handle_event(event): + async def handle_event(event: EventBase) -> None: # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) @@ -184,7 +183,7 @@ class FederationSender(object): # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. - destinations = yield self.state.get_hosts_in_room_at_events( + destinations = await self.state.get_hosts_in_room_at_events( event.room_id, event_ids=event.prev_event_ids() ) except Exception: @@ -206,17 +205,16 @@ class FederationSender(object): self._send_pdu(event, destinations) - @defer.inlineCallbacks - def handle_room_events(events): + async def handle_room_events(events: Iterable[EventBase]) -> None: with Measure(self.clock, "handle_room_events"): for event in events: - yield handle_event(event) + await handle_event(event) - events_by_room = {} + events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) - yield make_deferred_yieldable( + await make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) @@ -226,11 +224,11 @@ class FederationSender(object): ) ) - yield self.store.update_federation_out_pos("events", next_token) + await self.store.update_federation_out_pos("events", next_token) if events: now = self.clock.time_msec() - ts = yield self.store.get_received_ts(events[-1].event_id) + ts = await self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender" @@ -254,7 +252,7 @@ class FederationSender(object): finally: self._is_processing = False - def _send_pdu(self, pdu, destinations): + def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus # table and we'll get back to it later. @@ -276,11 +274,11 @@ class FederationSender(object): self._get_per_destination_queue(destination).send_pdu(pdu, order) @defer.inlineCallbacks - def send_read_receipt(self, receipt): + def send_read_receipt(self, receipt: ReadReceipt): """Send a RR to any other servers in the room Args: - receipt (synapse.types.ReadReceipt): receipt to be sent + receipt: receipt to be sent """ # Some background on the rate-limiting going on here. @@ -343,7 +341,7 @@ class FederationSender(object): else: queue.flush_read_receipts_for_room(room_id) - def _schedule_rr_flush_for_room(self, room_id, n_domains): + def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None: # that is going to cause approximately len(domains) transactions, so now back # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM backoff_ms = self._rr_txn_interval_per_room_ms * n_domains @@ -352,7 +350,7 @@ class FederationSender(object): self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id) self._queues_awaiting_rr_flush_by_room[room_id] = set() - def _flush_rrs_for_room(self, room_id): + def _flush_rrs_for_room(self, room_id: str) -> None: queues = self._queues_awaiting_rr_flush_by_room.pop(room_id) logger.debug("Flushing RRs in %s to %s", room_id, queues) @@ -368,14 +366,11 @@ class FederationSender(object): @preserve_fn # the caller should not yield on this @defer.inlineCallbacks - def send_presence(self, states): + def send_presence(self, states: List[UserPresenceState]): """Send the new presence states to the appropriate destinations. This actually queues up the presence states ready for sending and triggers a background task to process them and send out the transactions. - - Args: - states (list(UserPresenceState)) """ if not self.hs.config.use_presence: # No-op if presence is disabled. @@ -412,11 +407,10 @@ class FederationSender(object): finally: self._processing_pending_presence = False - def send_presence_to_destinations(self, states, destinations): + def send_presence_to_destinations( + self, states: List[UserPresenceState], destinations: List[str] + ) -> None: """Send the given presence states to the given destinations. - - Args: - states (list[UserPresenceState]) destinations (list[str]) """ @@ -431,12 +425,9 @@ class FederationSender(object): @measure_func("txnqueue._process_presence") @defer.inlineCallbacks - def _process_presence_inner(self, states): + def _process_presence_inner(self, states: List[UserPresenceState]): """Given a list of states populate self.pending_presence_by_dest and poke to send a new transaction to each destination - - Args: - states (list(UserPresenceState)) """ hosts_and_states = yield get_interested_remotes(self.store, states, self.state) @@ -446,14 +437,20 @@ class FederationSender(object): continue self._get_per_destination_queue(destination).send_presence(states) - def build_and_send_edu(self, destination, edu_type, content, key=None): + def build_and_send_edu( + self, + destination: str, + edu_type: str, + content: dict, + key: Optional[Hashable] = None, + ): """Construct an Edu object, and queue it for sending Args: - destination (str): name of server to send to - edu_type (str): type of EDU to send - content (dict): content of EDU - key (Any|None): clobbering key for this edu + destination: name of server to send to + edu_type: type of EDU to send + content: content of EDU + key: clobbering key for this edu """ if destination == self.server_name: logger.info("Not sending EDU to ourselves") @@ -468,12 +465,12 @@ class FederationSender(object): self.send_edu(edu, key) - def send_edu(self, edu, key): + def send_edu(self, edu: Edu, key: Optional[Hashable]): """Queue an EDU for sending Args: - edu (Edu): edu to send - key (Any|None): clobbering key for this edu + edu: edu to send + key: clobbering key for this edu """ queue = self._get_per_destination_queue(edu.destination) if key: @@ -481,7 +478,7 @@ class FederationSender(object): else: queue.send_edu(edu) - def send_device_messages(self, destination): + def send_device_messages(self, destination: str): if destination == self.server_name: logger.warning("Not sending device update to ourselves") return @@ -501,5 +498,5 @@ class FederationSender(object): self._get_per_destination_queue(destination).attempt_new_transaction() - def get_current_token(self): + def get_current_token(self) -> int: return 0 diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 5012aaea35..e13cd20ffa 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -15,11 +15,11 @@ # limitations under the License. import datetime import logging +from typing import Dict, Hashable, Iterable, List, Tuple from prometheus_client import Counter -from twisted.internet import defer - +import synapse.server from synapse.api.errors import ( FederationDeniedError, HttpResponseException, @@ -31,7 +31,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.presence import UserPresenceState -from synapse.types import StateMap +from synapse.types import ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter # This is defined in the Matrix spec and enforced by the receiver. @@ -56,13 +56,18 @@ class PerDestinationQueue(object): Manages the per-destination transmission queues. Args: - hs (synapse.HomeServer): - transaction_sender (TransactionManager): - destination (str): the server_name of the destination that we are managing + hs + transaction_sender + destination: the server_name of the destination that we are managing transmission for. """ - def __init__(self, hs, transaction_manager, destination): + def __init__( + self, + hs: "synapse.server.HomeServer", + transaction_manager: "synapse.federation.sender.TransactionManager", + destination: str, + ): self._server_name = hs.hostname self._clock = hs.get_clock() self._store = hs.get_datastore() @@ -72,20 +77,20 @@ class PerDestinationQueue(object): self.transmission_loop_running = False # a list of tuples of (pending pdu, order) - self._pending_pdus = [] # type: list[tuple[EventBase, int]] - self._pending_edus = [] # type: list[Edu] + self._pending_pdus = [] # type: List[Tuple[EventBase, int]] + self._pending_edus = [] # type: List[Edu] # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of (edu_type, key) -> Edu - self._pending_edus_keyed = {} # type: StateMap[Edu] + self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu] # Map of user_id -> UserPresenceState of pending presence to be sent to this # destination - self._pending_presence = {} # type: dict[str, UserPresenceState] + self._pending_presence = {} # type: Dict[str, UserPresenceState] # room_id -> receipt_type -> user_id -> receipt_dict - self._pending_rrs = {} + self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]] self._rrs_pending_flush = False # stream_id of last successfully sent to-device message. @@ -95,50 +100,50 @@ class PerDestinationQueue(object): # stream_id of last successfully sent device list update. self._last_device_list_stream_id = 0 - def __str__(self): + def __str__(self) -> str: return "PerDestinationQueue[%s]" % self._destination - def pending_pdu_count(self): + def pending_pdu_count(self) -> int: return len(self._pending_pdus) - def pending_edu_count(self): + def pending_edu_count(self) -> int: return ( len(self._pending_edus) + len(self._pending_presence) + len(self._pending_edus_keyed) ) - def send_pdu(self, pdu, order): + def send_pdu(self, pdu: EventBase, order: int) -> None: """Add a PDU to the queue, and start the transmission loop if neccessary Args: - pdu (EventBase): pdu to send - order (int): + pdu: pdu to send + order """ self._pending_pdus.append((pdu, order)) self.attempt_new_transaction() - def send_presence(self, states): + def send_presence(self, states: Iterable[UserPresenceState]) -> None: """Add presence updates to the queue. Start the transmission loop if neccessary. Args: - states (iterable[UserPresenceState]): presence to send + states: presence to send """ self._pending_presence.update({state.user_id: state for state in states}) self.attempt_new_transaction() - def queue_read_receipt(self, receipt): + def queue_read_receipt(self, receipt: ReadReceipt) -> None: """Add a RR to the list to be sent. Doesn't start the transmission loop yet (see flush_read_receipts_for_room) Args: - receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued + receipt: receipt to be queued """ self._pending_rrs.setdefault(receipt.room_id, {}).setdefault( receipt.receipt_type, {} )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data} - def flush_read_receipts_for_room(self, room_id): + def flush_read_receipts_for_room(self, room_id: str) -> None: # if we don't have any read-receipts for this room, it may be that we've already # sent them out, so we don't need to flush. if room_id not in self._pending_rrs: @@ -146,15 +151,15 @@ class PerDestinationQueue(object): self._rrs_pending_flush = True self.attempt_new_transaction() - def send_keyed_edu(self, edu, key): + def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: self._pending_edus_keyed[(edu.edu_type, key)] = edu self.attempt_new_transaction() - def send_edu(self, edu): + def send_edu(self, edu) -> None: self._pending_edus.append(edu) self.attempt_new_transaction() - def attempt_new_transaction(self): + def attempt_new_transaction(self) -> None: """Try to start a new transaction to this destination If there is already a transaction in progress to this destination, @@ -177,23 +182,22 @@ class PerDestinationQueue(object): self._transaction_transmission_loop, ) - @defer.inlineCallbacks - def _transaction_transmission_loop(self): - pending_pdus = [] + async def _transaction_transmission_loop(self) -> None: + pending_pdus = [] # type: List[Tuple[EventBase, int]] try: self.transmission_loop_running = True # This will throw if we wouldn't retry. We do this here so we fail # quickly, but we will later check this again in the http client, # hence why we throw the result away. - yield get_retry_limiter(self._destination, self._clock, self._store) + await get_retry_limiter(self._destination, self._clock, self._store) pending_pdus = [] while True: # We have to keep 2 free slots for presence and rr_edus limit = MAX_EDUS_PER_TRANSACTION - 2 - device_update_edus, dev_list_id = yield self._get_device_update_edus( + device_update_edus, dev_list_id = await self._get_device_update_edus( limit ) @@ -202,7 +206,7 @@ class PerDestinationQueue(object): ( to_device_edus, device_stream_id, - ) = yield self._get_to_device_message_edus(limit) + ) = await self._get_to_device_message_edus(limit) pending_edus = device_update_edus + to_device_edus @@ -269,7 +273,7 @@ class PerDestinationQueue(object): # END CRITICAL SECTION - success = yield self._transaction_manager.send_new_transaction( + success = await self._transaction_manager.send_new_transaction( self._destination, pending_pdus, pending_edus ) if success: @@ -280,7 +284,7 @@ class PerDestinationQueue(object): # Remove the acknowledged device messages from the database # Only bother if we actually sent some device messages if to_device_edus: - yield self._store.delete_device_msgs_for_remote( + await self._store.delete_device_msgs_for_remote( self._destination, device_stream_id ) @@ -289,7 +293,7 @@ class PerDestinationQueue(object): logger.info( "Marking as sent %r %r", self._destination, dev_list_id ) - yield self._store.mark_as_sent_devices_by_remote( + await self._store.mark_as_sent_devices_by_remote( self._destination, dev_list_id ) @@ -334,7 +338,7 @@ class PerDestinationQueue(object): # We want to be *very* sure we clear this after we stop processing self.transmission_loop_running = False - def _get_rr_edus(self, force_flush): + def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: if not self._pending_rrs: return if not force_flush and not self._rrs_pending_flush: @@ -351,17 +355,16 @@ class PerDestinationQueue(object): self._rrs_pending_flush = False yield edu - def _pop_pending_edus(self, limit): + def _pop_pending_edus(self, limit: int) -> List[Edu]: pending_edus = self._pending_edus pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:] return pending_edus - @defer.inlineCallbacks - def _get_device_update_edus(self, limit): + async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]: last_device_list = self._last_device_list_stream_id # Retrieve list of new device updates to send to the destination - now_stream_id, results = yield self._store.get_device_updates_by_remote( + now_stream_id, results = await self._store.get_device_updates_by_remote( self._destination, last_device_list, limit=limit ) edus = [ @@ -378,11 +381,10 @@ class PerDestinationQueue(object): return (edus, now_stream_id) - @defer.inlineCallbacks - def _get_to_device_message_edus(self, limit): + async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]: last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() - contents, stream_id = yield self._store.get_new_device_msgs_for_remote( + contents, stream_id = await self._store.get_new_device_msgs_for_remote( self._destination, last_device_stream_id, to_device_stream_id, limit ) edus = [ diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 5fed626d5b..3c2a02a3b3 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -13,14 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List from canonicaljson import json -from twisted.internet import defer - +import synapse.server from synapse.api.errors import HttpResponseException +from synapse.events import EventBase from synapse.federation.persistence import TransactionActions -from synapse.federation.units import Transaction +from synapse.federation.units import Edu, Transaction from synapse.logging.opentracing import ( extract_text_map, set_tag, @@ -39,7 +40,7 @@ class TransactionManager(object): shared between PerDestinationQueue objects """ - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self._server_name = hs.hostname self.clock = hs.get_clock() # nb must be called this for @measure_func self._store = hs.get_datastore() @@ -50,8 +51,9 @@ class TransactionManager(object): self._next_txn_id = int(self.clock.time_msec()) @measure_func("_send_new_transaction") - @defer.inlineCallbacks - def send_new_transaction(self, destination, pending_pdus, pending_edus): + async def send_new_transaction( + self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu] + ): # Make a transaction-sending opentracing span. This span follows on from # all the edus in that transaction. This needs to be done since there is @@ -127,7 +129,7 @@ class TransactionManager(object): return data try: - response = yield self._transport_layer.send_transaction( + response = await self._transport_layer.send_transaction( transaction, json_data_cb ) code = 200 diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b4d743cde7..6b32e0dcbf 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -19,11 +19,15 @@ server protocol. import logging +import attr + +from synapse.types import JsonDict from synapse.util.jsonobject import JsonEncodedObject logger = logging.getLogger(__name__) +@attr.s(slots=True) class Edu(JsonEncodedObject): """ An Edu represents a piece of data sent from one homeserver to another. @@ -32,11 +36,24 @@ class Edu(JsonEncodedObject): internal ID or previous references graph. """ - valid_keys = ["origin", "destination", "edu_type", "content"] + edu_type = attr.ib(type=str) + content = attr.ib(type=dict) + origin = attr.ib(type=str) + destination = attr.ib(type=str) - required_keys = ["edu_type"] + def get_dict(self) -> JsonDict: + return { + "edu_type": self.edu_type, + "content": self.content, + } - internal_keys = ["origin", "destination"] + def get_internal_dict(self) -> JsonDict: + return { + "edu_type": self.edu_type, + "content": self.content, + "origin": self.origin, + "destination": self.destination, + } def get_context(self): return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}") diff --git a/synapse/server.pyi b/synapse/server.pyi index 90347ac23e..40eabfe5d9 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -107,3 +107,5 @@ class HomeServer(object): self, ) -> synapse.replication.tcp.client.ReplicationClientHandler: pass + def is_mine_id(self, domain_id: str) -> bool: + pass diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 68b9847bd2..2767b0497a 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -111,7 +111,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): retry_timings_res ) - self.datastore.get_device_updates_by_remote.return_value = (0, []) + self.datastore.get_device_updates_by_remote.return_value = defer.succeed( + (0, []) + ) def get_received_txn_response(*args): return defer.succeed(None) @@ -144,7 +146,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.datastore.get_current_state_deltas.return_value = (0, None) self.datastore.get_to_device_stream_token = lambda: 0 - self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0) + self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: defer.succeed( + ([], 0) + ) self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed( None diff --git a/tox.ini b/tox.ini index ef22368cf1..f8229eba88 100644 --- a/tox.ini +++ b/tox.ini @@ -179,6 +179,7 @@ extras = all commands = mypy \ synapse/api \ synapse/config/ \ + synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/sync.py \ synapse/handlers/ui_auth \ From 799001f2c0b31d72b95a252a3808da25987e1ed3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 15:30:04 +0000 Subject: [PATCH 177/213] Add a `make_event_from_dict` method (#6858) ... and use it in places where it's trivial to do so. This will make it easier to pass room versions into the FrozenEvent constructors. --- changelog.d/6858.misc | 1 + synapse/events/__init__.py | 16 ++++++++++++++-- synapse/events/builder.py | 10 +++------- synapse/federation/federation_base.py | 5 ++--- tests/api/test_filtering.py | 4 ++-- tests/crypto/test_event_signing.py | 6 +++--- tests/events/test_utils.py | 9 +++++---- tests/federation/test_federation_server.py | 4 ++-- tests/replication/slave/storage/test_events.py | 12 ++++++++---- tests/state/test_v2.py | 4 ++-- tests/test_event_auth.py | 10 +++++----- tests/test_federation.py | 6 +++--- tests/test_state.py | 4 ++-- 13 files changed, 52 insertions(+), 39 deletions(-) create mode 100644 changelog.d/6858.misc diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc new file mode 100644 index 0000000000..08aa80bcd9 --- /dev/null +++ b/changelog.d/6858.misc @@ -0,0 +1 @@ +Refactoring work in preparation for changing the event redaction algorithm. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 89d41d82b6..a842661a90 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -16,12 +16,13 @@ import os from distutils.util import strtobool +from typing import Optional, Type import six from unpaddedbase64 import encode_base64 -from synapse.api.room_versions import EventFormatVersions +from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.types import JsonDict from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -407,7 +408,7 @@ class FrozenEventV3(FrozenEventV2): return self._event_id -def event_type_from_format_version(format_version): +def event_type_from_format_version(format_version: int) -> Type[EventBase]: """Returns the python type to use to construct an Event object for the given event format version. @@ -427,3 +428,14 @@ def event_type_from_format_version(format_version): return FrozenEventV3 else: raise Exception("No event format %r" % (format_version,)) + + +def make_event_from_dict( + event_dict: JsonDict, + room_version: RoomVersion = RoomVersions.V1, + internal_metadata_dict: JsonDict = {}, + rejected_reason: Optional[str] = None, +) -> EventBase: + """Construct an EventBase from the given event dict""" + event_type = event_type_from_format_version(room_version.event_format) + return event_type(event_dict, internal_metadata_dict, rejected_reason) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 8d63ad6dc3..a0c4a40c27 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -28,11 +28,7 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.events import ( - EventBase, - _EventInternalMetadata, - event_type_from_format_version, -) +from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.types import EventID, JsonDict from synapse.util import Clock from synapse.util.stringutils import random_string @@ -256,8 +252,8 @@ def create_local_event_from_event_dict( event_dict.setdefault("signatures", {}) add_hashes_and_signatures(room_version, event_dict, hostname, signing_key) - return event_type_from_format_version(format_version)( - event_dict, internal_metadata_dict=internal_metadata_dict + return make_event_from_dict( + event_dict, room_version, internal_metadata_dict=internal_metadata_dict ) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index ebe8b8e9fe..eea64c1c9f 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -29,7 +29,7 @@ from synapse.api.room_versions import ( RoomVersion, ) from synapse.crypto.event_signing import check_event_content_hash -from synapse.events import EventBase, event_type_from_format_version +from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( @@ -374,8 +374,7 @@ def event_from_pdu_json( elif depth > MAX_DEPTH: raise SynapseError(400, "Depth too large", Codes.BAD_JSON) - event = event_type_from_format_version(room_version.event_format)(pdu_json) - + event = make_event_from_dict(pdu_json, room_version) event.internal_metadata.outlier = outlier return event diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 63d8633582..4e67503cf0 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -25,7 +25,7 @@ from twisted.internet import defer from synapse.api.constants import EventContentFields from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from tests import unittest from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver @@ -38,7 +38,7 @@ def MockEvent(**kwargs): kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: kwargs["type"] = "fake_type" - return FrozenEvent(kwargs) + return make_event_from_dict(kwargs) class FilteringTestCase(unittest.TestCase): diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 6143a50ab2..62f639a18d 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -19,7 +19,7 @@ from unpaddedbase64 import decode_base64 from synapse.api.room_versions import RoomVersions from synapse.crypto.event_signing import add_hashes_and_signatures -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from tests import unittest @@ -54,7 +54,7 @@ class EventSigningTestCase(unittest.TestCase): RoomVersions.V1, event_dict, HOSTNAME, self.signing_key ) - event = FrozenEvent(event_dict) + event = make_event_from_dict(event_dict) self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) @@ -88,7 +88,7 @@ class EventSigningTestCase(unittest.TestCase): RoomVersions.V1, event_dict, HOSTNAME, self.signing_key ) - event = FrozenEvent(event_dict) + event = make_event_from_dict(event_dict) self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 2b13980dfd..45d55b9e94 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.events.utils import ( copy_power_levels_contents, prune_event, @@ -30,7 +29,7 @@ def MockEvent(**kwargs): kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: kwargs["type"] = "fake_type" - return FrozenEvent(kwargs) + return make_event_from_dict(kwargs) class PruneEventTestCase(unittest.TestCase): @@ -38,7 +37,9 @@ class PruneEventTestCase(unittest.TestCase): `matchdict` when it is redacted. """ def run_test(self, evdict, matchdict): - self.assertEquals(prune_event(FrozenEvent(evdict)).get_dict(), matchdict) + self.assertEquals( + prune_event(make_event_from_dict(evdict)).get_dict(), matchdict + ) def test_minimal(self): self.run_test( diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 1ec8c40901..e7d8699040 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -105,7 +105,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): def _create_acl_event(content): - return FrozenEvent( + return make_event_from_dict( { "room_id": "!a:b", "event_id": "$a:b", diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index b1b037006d..d31210fbe4 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -15,7 +15,7 @@ import logging from canonicaljson import encode_canonical_json -from synapse.events import FrozenEvent, _EventInternalMetadata +from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource from synapse.replication.slave.storage.events import SlavedEventStore @@ -90,7 +90,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction - redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) self.check("get_event", [msg.event_id], redacted) def test_backfilled_redactions(self): @@ -110,7 +112,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction - redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) + redacted = make_event_from_dict( + msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict() + ) self.check("get_event", [msg.event_id], redacted) def test_invites(self): @@ -345,7 +349,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): if redacts is not None: event_dict["redacts"] = redacts - event = FrozenEvent(event_dict, internal_metadata_dict=internal) + event = make_event_from_dict(event_dict, internal_metadata_dict=internal) self.event_id += 1 diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 0f341d3ac3..5bafad9f19 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -22,7 +22,7 @@ import attr from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store from synapse.types import EventID @@ -89,7 +89,7 @@ class FakeEvent(object): if self.state_key is not None: event_dict["state_key"] = self.state_key - return FrozenEvent(event_dict) + return make_event_from_dict(event_dict) # All graphs start with this set of events diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index ca20b085a2..bfa5d6f510 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -18,7 +18,7 @@ import unittest from synapse import event_auth from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict class EventAuthTestCase(unittest.TestCase): @@ -94,7 +94,7 @@ TEST_ROOM_ID = "!test:room" def _create_event(user_id): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -106,7 +106,7 @@ def _create_event(user_id): def _join_event(user_id): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -119,7 +119,7 @@ def _join_event(user_id): def _power_levels_event(sender, content): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), @@ -132,7 +132,7 @@ def _power_levels_event(sender, content): def _random_state_event(sender): - return FrozenEvent( + return make_event_from_dict( { "room_id": TEST_ROOM_ID, "event_id": _get_event_id(), diff --git a/tests/test_federation.py b/tests/test_federation.py index 68684460c6..9b5cf562f3 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -2,7 +2,7 @@ from mock import Mock from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.logging.context import LoggingContext from synapse.types import Requester, UserID from synapse.util import Clock @@ -43,7 +43,7 @@ class MessageAcceptTests(unittest.TestCase): ) )[0] - join_event = FrozenEvent( + join_event = make_event_from_dict( { "room_id": self.room_id, "sender": "@baduser:test.serv", @@ -105,7 +105,7 @@ class MessageAcceptTests(unittest.TestCase): )[0] # Now lie about an event - lying_event = FrozenEvent( + lying_event = make_event_from_dict( { "room_id": self.room_id, "sender": "@baduser:test.serv", diff --git a/tests/test_state.py b/tests/test_state.py index 1e4449fa1c..d1578fe581 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.auth import Auth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent +from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler @@ -66,7 +66,7 @@ def create_event( d.update(kwargs) - event = FrozenEvent(d) + event = make_event_from_dict(d) return event From e1d858984d71b6edf56e1024f1475224bfa49054 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 Feb 2020 15:30:26 +0000 Subject: [PATCH 178/213] Remove unused `get_room_stats_state` method. (#6869) --- changelog.d/6869.misc | 1 + synapse/storage/data_stores/main/stats.py | 25 ----------------------- 2 files changed, 1 insertion(+), 25 deletions(-) create mode 100644 changelog.d/6869.misc diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc new file mode 100644 index 0000000000..14f88f9bb7 --- /dev/null +++ b/changelog.d/6869.misc @@ -0,0 +1 @@ +Remove unused `get_room_stats_state` method. diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py index 7af1495e47..380c1ec7da 100644 --- a/synapse/storage/data_stores/main/stats.py +++ b/synapse/storage/data_stores/main/stats.py @@ -271,31 +271,6 @@ class StatsStore(StateDeltasStore): return slice_list - def get_room_stats_state(self, room_id): - """ - Returns the current room_stats_state for a room. - - Args: - room_id (str): The ID of the room to return state for. - - Returns (dict): - Dictionary containing these keys: - "name", "topic", "canonical_alias", "avatar", "join_rules", - "history_visibility" - """ - return self.db.simple_select_one( - "room_stats_state", - {"room_id": room_id}, - retcols=( - "name", - "topic", - "canonical_alias", - "avatar", - "join_rules", - "history_visibility", - ), - ) - @cached() def get_earliest_token_for_stats(self, stats_type, id): """ From 21db35f77e4718cfe6d6b292baada9dd02ef8280 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Feb 2020 15:45:39 +0000 Subject: [PATCH 179/213] Add support for putting fed user query API on workers (#6873) --- changelog.d/6873.feature | 1 + docs/workers.md | 1 + synapse/app/federation_reader.py | 2 ++ synapse/federation/federation_server.py | 7 +++-- synapse/handlers/device.py | 35 +++++++++++-------------- 5 files changed, 25 insertions(+), 21 deletions(-) create mode 100644 changelog.d/6873.feature diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature new file mode 100644 index 0000000000..bbedf8f7f0 --- /dev/null +++ b/changelog.d/6873.feature @@ -0,0 +1 @@ +Add ability to route federation user device queries to workers. diff --git a/docs/workers.md b/docs/workers.md index 82442d6a0a..6f7ec58780 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -176,6 +176,7 @@ endpoints matching the following regular expressions: ^/_matrix/federation/v1/query_auth/ ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ + ^/_matrix/federation/v1/user/devices/ ^/_matrix/federation/v1/send/ ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 5e17ef1396..d055d11b23 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -33,6 +33,7 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore @@ -68,6 +69,7 @@ class FederationReaderSlavedStore( SlavedKeyStore, SlavedRegistrationStore, SlavedGroupServerStore, + SlavedDeviceStore, RoomStore, DirectoryStore, SlavedTransactionStore, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index a6c966a393..7f9da49326 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -81,6 +81,8 @@ class FederationServer(FederationBase): self.handler = hs.get_handlers().federation_handler self.state = hs.get_state_handler() + self.device_handler = hs.get_device_handler() + self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") @@ -523,8 +525,9 @@ class FederationServer(FederationBase): def on_query_client_keys(self, origin, content): return self.on_query_request("client_keys", content) - def on_query_user_devices(self, origin, user_id): - return self.on_query_request("user_devices", user_id) + async def on_query_user_devices(self, origin: str, user_id: str): + keys = await self.device_handler.on_federation_query_user_devices(user_id) + return 200, keys @trace async def on_claim_client_keys(self, origin, content): diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a9bd431486..6d8e48ed39 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -225,6 +225,22 @@ class DeviceWorkerHandler(BaseHandler): return result + @defer.inlineCallbacks + def on_federation_query_user_devices(self, user_id): + stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) + master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master") + self_signing_key = yield self.store.get_e2e_cross_signing_key( + user_id, "self_signing" + ) + + return { + "user_id": user_id, + "stream_id": stream_id, + "devices": devices, + "master_key": master_key, + "self_signing_key": self_signing_key, + } + class DeviceHandler(DeviceWorkerHandler): def __init__(self, hs): @@ -239,9 +255,6 @@ class DeviceHandler(DeviceWorkerHandler): federation_registry.register_edu_handler( "m.device_list_update", self.device_list_updater.incoming_device_list_update ) - federation_registry.register_query_handler( - "user_devices", self.on_federation_query_user_devices - ) hs.get_distributor().observe("user_left_room", self.user_left_room) @@ -456,22 +469,6 @@ class DeviceHandler(DeviceWorkerHandler): self.notifier.on_new_event("device_list_key", position, users=[from_user_id]) - @defer.inlineCallbacks - def on_federation_query_user_devices(self, user_id): - stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) - master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master") - self_signing_key = yield self.store.get_e2e_cross_signing_key( - user_id, "self_signing" - ) - - return { - "user_id": user_id, - "stream_id": stream_id, - "devices": devices, - "master_key": master_key, - "self_signing_key": self_signing_key, - } - @defer.inlineCallbacks def user_left_room(self, user, room_id): user_id = user.to_string() From fe73f0d533dcdcf11f069d89ebbff2ce88d16bb3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 Feb 2020 00:41:20 +0000 Subject: [PATCH 180/213] Update setuptools for python 3.5 tests (#6880) Workaround for jaraco/zipp#40 --- .buildkite/scripts/test_old_deps.sh | 18 ++++++++++++++++++ changelog.d/6880.misc | 1 + 2 files changed, 19 insertions(+) create mode 100755 .buildkite/scripts/test_old_deps.sh create mode 100644 changelog.d/6880.misc diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh new file mode 100755 index 0000000000..dfd71b2511 --- /dev/null +++ b/.buildkite/scripts/test_old_deps.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# this script is run by buildkite in a plain `xenial` container; it installs the +# minimal requirements for tox and hands over to the py35-old tox environment. + +set -ex + +apt-get update +apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev + +# workaround for https://github.com/jaraco/zipp/issues/40 +python3.5 -m pip install 'setuptools>=34.4.0' + +python3.5 -m pip install tox + +export LANG="C.UTF-8" + +exec tox -e py35-old,combine diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc new file mode 100644 index 0000000000..8344a6ed1e --- /dev/null +++ b/changelog.d/6880.misc @@ -0,0 +1 @@ +Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. From 8e64c5a24c26a733c0cfd3e997ea4079ae457096 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 10 Feb 2020 09:36:23 +0000 Subject: [PATCH 181/213] filter out m.room.aliases from the CS API until a better solution is specced (#6878) We're in the middle of properly mitigating spam caused by malicious aliases being added to a room. However, until this work fully lands, we temporarily filter out all m.room.aliases events from /sync and /messages on the CS API, to remove abusive aliases. This is considered acceptable as m.room.aliases events were never a reliable record of the given alias->id mapping and were purely informational, and in their current state do more harm than good. --- changelog.d/6878.feature | 1 + synapse/visibility.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/6878.feature diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature new file mode 100644 index 0000000000..af3e958a43 --- /dev/null +++ b/changelog.d/6878.feature @@ -0,0 +1 @@ +Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. diff --git a/synapse/visibility.py b/synapse/visibility.py index 100dc47a8a..d0abd8f04f 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -122,6 +122,13 @@ def filter_events_for_client( if not event.is_state() and event.sender in ignore_list: return None + # Until MSC2261 has landed we can't redact malicious alias events, so for + # now we temporarily filter out m.room.aliases entirely to mitigate + # abuse, while we spec a better solution to advertising aliases + # on rooms. + if event.type == EventTypes.Aliases: + return None + # Don't try to apply the room's retention policy if the event is a state event, as # MSC1763 states that retention is only considered for non-state events. if apply_retention_policies and not event.is_state(): From 3de57e706209d98a331265e6d5a51bfd24939a3b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Feb 2020 09:56:42 +0000 Subject: [PATCH 182/213] 1.10.0rc3 --- CHANGES.md | 15 +++++++++++++++ changelog.d/6878.feature | 1 - changelog.d/6880.misc | 1 - synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/6878.feature delete mode 100644 changelog.d/6880.misc diff --git a/CHANGES.md b/CHANGES.md index c2aa735908..4a81a04627 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.10.0rc3 (2020-02-10) +============================== + +Features +-------- + +- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) + + +Internal Changes +---------------- + +- Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. ([\#6880](https://github.com/matrix-org/synapse/issues/6880)) + + Synapse 1.10.0rc2 (2020-02-06) ============================== diff --git a/changelog.d/6878.feature b/changelog.d/6878.feature deleted file mode 100644 index af3e958a43..0000000000 --- a/changelog.d/6878.feature +++ /dev/null @@ -1 +0,0 @@ -Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. diff --git a/changelog.d/6880.misc b/changelog.d/6880.misc deleted file mode 100644 index 8344a6ed1e..0000000000 --- a/changelog.d/6880.misc +++ /dev/null @@ -1 +0,0 @@ -Fix continuous integration failures with old versions of `pip`, which were introduced by a release of the `zipp` library. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4f1859bd57..36c0cf557a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc2" +__version__ = "1.10.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 3a3118f4ecb631dec3cc44a928a3666b734f5dcb Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 10 Feb 2020 11:47:18 -0500 Subject: [PATCH 183/213] Add an additional test to the SyTest blacklist for worker mode. (#6883) --- .buildkite/worker-blacklist | 2 ++ changelog.d/6883.misc | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6883.misc diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index 158ab79154..094b6c94da 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update # this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536 Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state + +Can get rooms/{roomId}/members at a given point diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc new file mode 100644 index 0000000000..e0837d7987 --- /dev/null +++ b/changelog.d/6883.misc @@ -0,0 +1 @@ +Add an additional entry to the SyTest blacklist for worker mode. From 01209382fbb5ccf7bdef71f1d42f754b125f625a Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 10 Feb 2020 18:07:35 +0000 Subject: [PATCH 184/213] filter out m.room.aliases from /sync state blocks (#6884) We forgot to filter out aliases from /sync state blocks as well as the timeline. --- changelog.d/6884.feature | 1 + synapse/handlers/sync.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/6884.feature diff --git a/changelog.d/6884.feature b/changelog.d/6884.feature new file mode 100644 index 0000000000..4f6b9630b8 --- /dev/null +++ b/changelog.d/6884.feature @@ -0,0 +1 @@ +Filter out m.room.aliases from /sync state blocks until a full fix lands. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd95f85e3f..2b62fd83fd 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -883,6 +883,7 @@ class SyncHandler(object): for e in sync_config.filter_collection.filter_room_state( list(state.values()) ) + if e.type != EventTypes.Aliases # until MSC2261 or alternative solution } async def unread_notifs_for_room_id(self, room_id, sync_config): From a92e703ab9d78aecc062e797f941bb7e206650a5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 10 Feb 2020 16:35:26 -0500 Subject: [PATCH 185/213] Reject device display names that are too long (#6882) * Reject device display names that are too long. Too long is currently defined as 100 characters in length. * Add a regression test for rejecting a too long device display name. --- changelog.d/6882.misc | 1 + synapse/handlers/device.py | 14 +++++++++++++- tests/handlers/test_device.py | 18 ++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6882.misc diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc new file mode 100644 index 0000000000..e8382e36ae --- /dev/null +++ b/changelog.d/6882.misc @@ -0,0 +1 @@ +Reject device display names over 100 characters in length. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 6d8e48ed39..50cea3f378 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( FederationDeniedError, HttpResponseException, RequestSendFailed, + SynapseError, ) from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import RoomStreamToken, get_domain_from_id @@ -39,6 +40,8 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +MAX_DEVICE_DISPLAY_NAME_LEN = 100 + class DeviceWorkerHandler(BaseHandler): def __init__(self, hs): @@ -404,9 +407,18 @@ class DeviceHandler(DeviceWorkerHandler): defer.Deferred: """ + # Reject a new displayname which is too long. + new_display_name = content.get("display_name") + if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN: + raise SynapseError( + 400, + "Device display name is too long (max %i)" + % (MAX_DEVICE_DISPLAY_NAME_LEN,), + ) + try: yield self.store.update_device( - user_id, device_id, new_display_name=content.get("display_name") + user_id, device_id, new_display_name=new_display_name ) yield self.notify_device_update(user_id, [device_id]) except errors.StoreError as e: diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index a3aa0a1cf2..62b47f6574 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -160,6 +160,24 @@ class DeviceTestCase(unittest.HomeserverTestCase): res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") + def test_update_device_too_long_display_name(self): + """Update a device with a display name that is invalid (too long).""" + self._record_users() + + # Request to update a device display name with a new value that is longer than allowed. + update = { + "display_name": "a" + * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) + } + self.get_failure( + self.handler.update_device(user1, "abc", update), + synapse.api.errors.SynapseError, + ) + + # Ensure the display name was not updated. + res = self.get_success(self.handler.get_device(user1, "abc")) + self.assertEqual(res["display_name"], "display 2") + def test_update_unknown_device(self): update = {"display_name": "new_display"} res = self.handler.update_device("user_id", "unknown_device_id", update) From 3edc65dd24f1916501e24e2353b4523bc2e3635c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 11 Feb 2020 10:43:16 +0000 Subject: [PATCH 186/213] 1.10.0rc4 --- CHANGES.md | 9 +++++++++ changelog.d/6884.feature | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6884.feature diff --git a/CHANGES.md b/CHANGES.md index 4a81a04627..f776560de7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.10.0rc4 (2020-02-11) +============================== + +Features +-------- + +- Filter out m.room.aliases from /sync state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) + + Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/changelog.d/6884.feature b/changelog.d/6884.feature deleted file mode 100644 index 4f6b9630b8..0000000000 --- a/changelog.d/6884.feature +++ /dev/null @@ -1 +0,0 @@ -Filter out m.room.aliases from /sync state blocks until a full fix lands. diff --git a/synapse/__init__.py b/synapse/__init__.py index 36c0cf557a..cc69ff0c41 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc3" +__version__ = "1.10.0rc4" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9e45d573d499796bd1878d026cdd820347c10c00 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:50:55 +0000 Subject: [PATCH 187/213] changelog formatting --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f776560de7..8f252bb1b2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.10.0rc4 (2020-02-11) Features -------- -- Filter out m.room.aliases from /sync state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Filter out `m.room.aliase`s from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) @@ -13,7 +13,7 @@ Synapse 1.10.0rc3 (2020-02-10) Features -------- -- Filter out m.room.aliases from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) +- Filter out `m.room.aliases` from the CS API to mitigate abuse while a better solution is specced. ([\#6878](https://github.com/matrix-org/synapse/issues/6878)) Internal Changes From aa7e4291ee55f4c1d817bfb4bf87930ae2f40aab Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:51:13 +0000 Subject: [PATCH 188/213] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8f252bb1b2..4248b7065b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.10.0rc4 (2020-02-11) Features -------- -- Filter out `m.room.aliase`s from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Filter out `m.room.aliases` from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) From 78d170262c7349cbb85baa4ad0749ccb5e2794e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 Feb 2020 10:53:25 +0000 Subject: [PATCH 189/213] changelog wording --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4248b7065b..ab031fccdc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,10 @@ Synapse 1.10.0rc4 (2020-02-11) ============================== -Features +Bugfixes -------- -- Filter out `m.room.aliases` from `/sync` state blocks until a full fix lands. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +- Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) Synapse 1.10.0rc3 (2020-02-10) From 856b2a9555b6c5a70b98ad2a1e14b7dbee0949f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 11 Feb 2020 11:06:28 +0000 Subject: [PATCH 190/213] 1.10.0rc5 --- CHANGES.md | 6 +++++- synapse/__init__.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ab031fccdc..1995a70b19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -Synapse 1.10.0rc4 (2020-02-11) +Synapse 1.10.0rc5 (2020-02-11) ============================== Bugfixes @@ -6,6 +6,10 @@ Bugfixes - Fix the filtering introduced in 1.10.0rc3 to also apply to the state blocks returned by `/sync`. ([\#6884](https://github.com/matrix-org/synapse/issues/6884)) +Synapse 1.10.0rc4 (2020-02-11) +============================== + +This release candidate was built incorrectly and is superceded by 1.10.0rc5. Synapse 1.10.0rc3 (2020-02-10) ============================== diff --git a/synapse/__init__.py b/synapse/__init__.py index cc69ff0c41..ba339004ba 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc4" +__version__ = "1.10.0rc5" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From a443d2a25dc3cd99519c6b9a3a326545a0b2f933 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:37:09 +0000 Subject: [PATCH 191/213] Spell out that Synapse never purges the last event sent in a room --- docs/message_retention_policies.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 4300809dfe..f2e2794252 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -42,6 +42,10 @@ purged according to its room's policy, then the receiving server will process and store that event until it's picked up by the next purge job, though it will always hide it from clients. +With the current implementation of this feature, in order not to break +rooms, Synapse will never delete the last message sent to a room, and +will only hide it from clients. + ## Server configuration From 705c978366146e85280af0dcf216e78d521352a3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:38:27 +0000 Subject: [PATCH 192/213] Changelog --- changelog.d/6891.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6891.doc diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc new file mode 100644 index 0000000000..f3afbbccda --- /dev/null +++ b/changelog.d/6891.doc @@ -0,0 +1 @@ +Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support. From 6b21986e4ee999eb3669ec90f6db3bdfa7ce71a1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:56:04 +0000 Subject: [PATCH 193/213] Also spell it out in the purge history API doc --- docs/admin_api/purge_history_api.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index f7be226fd9..f2c4dc03ac 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -8,6 +8,9 @@ Depending on the amount of history being purged a call to the API may take several minutes or longer. During this period users will not be able to paginate further back in the room from the point being purged from. +Note that, in order to not break the room, this API won't delete the last +message sent to it. + The API is: ``POST /_synapse/admin/v1/purge_history/[/]`` From a0c4769f1ae6d6aaf1a548b869f857836efbfb18 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 Feb 2020 17:56:42 +0000 Subject: [PATCH 194/213] Update the changelog file --- changelog.d/6891.doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6891.doc b/changelog.d/6891.doc index f3afbbccda..2f46c385b7 100644 --- a/changelog.d/6891.doc +++ b/changelog.d/6891.doc @@ -1 +1 @@ -Spell out that the last event sent to a room won't be deleted by the purge jobs for the message retention policies support. +Spell out that the last event sent to a room won't be deleted by a purge. From ba547ec3a94b17cfb634758deb4cdbc98fc840a9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 12 Feb 2020 07:02:19 -0500 Subject: [PATCH 195/213] Use BSD-compatible in-place editing for sed. (#6887) --- changelog.d/6887.misc | 1 + scripts-dev/config-lint.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6887.misc diff --git a/changelog.d/6887.misc b/changelog.d/6887.misc new file mode 100644 index 0000000000..b351d47c7b --- /dev/null +++ b/changelog.d/6887.misc @@ -0,0 +1 @@ +Fix the use of sed in the linting scripts when using BSD sed. diff --git a/scripts-dev/config-lint.sh b/scripts-dev/config-lint.sh index 677a854c85..189ca66535 100755 --- a/scripts-dev/config-lint.sh +++ b/scripts-dev/config-lint.sh @@ -3,7 +3,8 @@ # Exits with 0 if there are no problems, or another code otherwise. # Fix non-lowercase true/false values -sed -i -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml +sed -i.bak -E "s/: +True/: true/g; s/: +False/: false/g;" docs/sample_config.yaml +rm docs/sample_config.yaml.bak # Check if anything changed git diff --exit-code docs/sample_config.yaml From 3dd2b5f5e313c0350a1a64f1d5e40c8bdc46029c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 12 Feb 2020 12:02:53 +0000 Subject: [PATCH 196/213] bump the version of Alpine Linux used in the docker images (#6897) --- changelog.d/6897.docker | 1 + docker/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6897.docker diff --git a/changelog.d/6897.docker b/changelog.d/6897.docker new file mode 100644 index 0000000000..8e7bcd719a --- /dev/null +++ b/changelog.d/6897.docker @@ -0,0 +1 @@ +Update the docker images to Alpine Linux 3.11. diff --git a/docker/Dockerfile b/docker/Dockerfile index e5a0d6d5f6..93d61739ae 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7 ### ### Stage 0: builder ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder +FROM docker.io/python:${PYTHON_VERSION}-alpine3.11 as builder # install the OS build deps From fdb816713aacb295fe804290f5043bdac866dcf5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 12:19:19 +0000 Subject: [PATCH 197/213] 1.10.0 --- CHANGES.md | 9 +++++++++ changelog.d/6897.docker | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6897.docker diff --git a/CHANGES.md b/CHANGES.md index 1995a70b19..603a50054e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.10.0 (2020-02-12) +=========================== + +Updates to the Docker image +--------------------------- + +- Update the docker images to Alpine Linux 3.11. ([\#6897](https://github.com/matrix-org/synapse/issues/6897)) + + Synapse 1.10.0rc5 (2020-02-11) ============================== diff --git a/changelog.d/6897.docker b/changelog.d/6897.docker deleted file mode 100644 index 8e7bcd719a..0000000000 --- a/changelog.d/6897.docker +++ /dev/null @@ -1 +0,0 @@ -Update the docker images to Alpine Linux 3.11. diff --git a/debian/changelog b/debian/changelog index 74eb29c5ee..cdc3b1a5c2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.10.0) stable; urgency=medium + + * New synapse release 1.10.0. + + -- Synapse Packaging team Wed, 12 Feb 2020 12:18:54 +0000 + matrix-synapse-py3 (1.9.1) stable; urgency=medium * New synapse release 1.9.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index ba339004ba..9d285fca38 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.10.0rc5" +__version__ = "1.10.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 7b8d654a6196d889c8e1c2a403f5176650216432 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 12:20:37 +0000 Subject: [PATCH 198/213] Move the warning at the top of the release changes --- CHANGES.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 603a50054e..0bce84f400 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.10.0 (2020-02-12) =========================== +**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. + Updates to the Docker image --------------------------- @@ -54,9 +56,6 @@ Internal Changes Synapse 1.10.0rc1 (2020-01-31) ============================== -**WARNING to client developers**: As of this release Synapse validates `client_secret` parameters in the Client-Server API as per the spec. See [\#6766](https://github.com/matrix-org/synapse/issues/6766) for details. - - Features -------- From 08e050c3fddb35cc54f6e0704fa9b54128dddc39 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 15:39:40 +0000 Subject: [PATCH 199/213] Rephrase --- docs/admin_api/purge_history_api.rst | 4 ++-- docs/message_retention_policies.md | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index f2c4dc03ac..e2a620c54f 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -8,8 +8,8 @@ Depending on the amount of history being purged a call to the API may take several minutes or longer. During this period users will not be able to paginate further back in the room from the point being purged from. -Note that, in order to not break the room, this API won't delete the last -message sent to it. +Note that Synapse requires at least one message in each room, so it will never +delete the last message in a room. The API is: diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index f2e2794252..1dd60bdad9 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -42,9 +42,9 @@ purged according to its room's policy, then the receiving server will process and store that event until it's picked up by the next purge job, though it will always hide it from clients. -With the current implementation of this feature, in order not to break -rooms, Synapse will never delete the last message sent to a room, and -will only hide it from clients. +Synapse requires at least one message in each room, so it will never +delete the last message in a room. It will, however, hide it from +clients. ## Server configuration From d8994942f28f5028e560f6aba52512fae3ca1a6a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 12 Feb 2020 18:14:10 +0000 Subject: [PATCH 200/213] Return a 404 for admin api user lookup if user not found (#6901) --- changelog.d/6901.misc | 1 + synapse/rest/admin/users.py | 5 ++++- tests/rest/admin/test_user.py | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6901.misc diff --git a/changelog.d/6901.misc b/changelog.d/6901.misc new file mode 100644 index 0000000000..b2f12bbe86 --- /dev/null +++ b/changelog.d/6901.misc @@ -0,0 +1 @@ +Return a 404 instead of 200 for querying information of a non-existant user through the admin API. \ No newline at end of file diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index e75c5f1370..2107b5dc56 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -21,7 +21,7 @@ from six import text_type from six.moves import http_client from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -152,6 +152,9 @@ class UserRestServletV2(RestServlet): ret = await self.admin_handler.get_user(target_user) + if not ret: + raise NotFoundError("User not found") + return 200, ret async def on_PUT(self, request, user_id): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 3b5169b38d..490ce8f55d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -401,6 +401,22 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("You are not a server admin", channel.json_body["error"]) + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + self.hs.config.registration_shared_secret = None + + request, channel = self.make_request( + "GET", + "/_synapse/admin/v2/users/@unknown_person:test", + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) + def test_requester_is_admin(self): """ If the user is a server admin, a new user is created. From f092029d2ddc333d557c3551ebc443d59221433c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:14:16 +0000 Subject: [PATCH 201/213] Update ACME.md to mention ACME v1 deprecation --- docs/ACME.md | 48 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/docs/ACME.md b/docs/ACME.md index 9eb18a9cf5..6d06cae3b3 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -1,12 +1,46 @@ # ACME -Synapse v1.0 will require valid TLS certificates for communication between -servers (port `8448` by default) in addition to those that are client-facing -(port `443`). If you do not already have a valid certificate for your domain, -the easiest way to get one is with Synapse's new ACME support, which will use -the ACME protocol to provision a certificate automatically. Synapse v0.99.0+ -will provision server-to-server certificates automatically for you for free -through [Let's Encrypt](https://letsencrypt.org/) if you tell it to. +From version 1.0 (June 2019) onwards, Synapse requires valid TLS +certificates for communication between servers (by default on port +`8448`) in addition to those that are client-facing (port `443`). To +help homeserver admins fulfil this new requirement, Synapse v0.99.0 +introduced support for automatically provisioning certificates through +[Let's Encrypt](https://letsencrypt.org/) using the ACME protocol. + +## Deprecation of ACME v1 + +In [March 2019](https://community.letsencrypt.org/t/end-of-life-plan-for-acmev1/88430), +Let's Encrypt announced that they were deprecating version 1 of the ACME +protocol, with the plan to disable the use of it for new accounts in +November 2019, and for existing accounts in June 2020. + +Synapse doesn't currently support version 2 of the ACME protocol, which +means that: + +* for existing installs, Synapse's built-in ACME support will continue + to work until June 2020. +* for new installs, this feature will not work at all. + +Either way, it is recommended to move from Synapse's ACME support +feature to an external automated tool such as [certbot](https://github.com/certbot/certbot) +(or browse [this list](https://letsencrypt.org/fr/docs/client-options/) +for an alternative ACME client). + +It's also recommended to use a reverse proxy for the server-facing +communications (mode documentation about this can be found +[here](/docs/reverse_proxy.md)) as well as the client-facing ones and +have it serve the certificates. + +In case you can't do that and need Synapse to serve them itself, make +sure to set the `tls_certificate_path` configuration setting to the path +of the certificate (make sure to use the certificate containing the full +certification chain, e.g. `fullchain.pem` if using certbot) and +`tls_private_key_path` to the path of the matching private key. + +If you still want to use Synapse's built-in ACME support, the rest of +this document explains how to set it up. + +## Initial setup In the case that your `server_name` config variable is the same as the hostname that the client connects to, then the same certificate can be From e45a7c09396c56d6ca7e3f42827cc354a942ba5d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:14:59 +0000 Subject: [PATCH 202/213] Remove duplicated info about certbot et al --- docs/ACME.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/ACME.md b/docs/ACME.md index 6d06cae3b3..3b4416efe1 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -66,11 +66,6 @@ If you already have certificates, you will need to back up or delete them (files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root directory), Synapse's ACME implementation will not overwrite them. -You may wish to use alternate methods such as Certbot to obtain a certificate -from Let's Encrypt, depending on your server configuration. Of course, if you -already have a valid certificate for your homeserver's domain, that can be -placed in Synapse's config directory without the need for any ACME setup. - ## ACME setup The main steps for enabling ACME support in short summary are: From e88a5dd108f607c9ec99356a2601147e41a20533 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 20:15:41 +0000 Subject: [PATCH 203/213] Changelog --- changelog.d/6905.doc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6905.doc diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc new file mode 100644 index 0000000000..ca10b38301 --- /dev/null +++ b/changelog.d/6905.doc @@ -0,0 +1 @@ +Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins. From 459d089af7e90a703df9637a071e9285bf85eb12 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Feb 2020 21:05:30 +0000 Subject: [PATCH 204/213] Mention that using Synapse to serve certificates requires restarts --- docs/ACME.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/ACME.md b/docs/ACME.md index 3b4416efe1..97ac3c5ba3 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -35,7 +35,9 @@ In case you can't do that and need Synapse to serve them itself, make sure to set the `tls_certificate_path` configuration setting to the path of the certificate (make sure to use the certificate containing the full certification chain, e.g. `fullchain.pem` if using certbot) and -`tls_private_key_path` to the path of the matching private key. +`tls_private_key_path` to the path of the matching private key. Note +that in this case you will need to restart Synapse after each +certificate renewal so that Synapse stops using the old certificate. If you still want to use Synapse's built-in ACME support, the rest of this document explains how to set it up. From 862669d6cc802ff610de6f6df644ef2a6706abb3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 11:29:08 +0000 Subject: [PATCH 205/213] Update docs/ACME.md --- docs/ACME.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ACME.md b/docs/ACME.md index 97ac3c5ba3..f4c4740476 100644 --- a/docs/ACME.md +++ b/docs/ACME.md @@ -27,7 +27,7 @@ feature to an external automated tool such as [certbot](https://github.com/certb for an alternative ACME client). It's also recommended to use a reverse proxy for the server-facing -communications (mode documentation about this can be found +communications (more documentation about this can be found [here](/docs/reverse_proxy.md)) as well as the client-facing ones and have it serve the certificates. From dc3f9987061e24bec77ad9e26a7679a1907cee4f Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Thu, 13 Feb 2020 06:02:32 -0600 Subject: [PATCH 206/213] Remove m.lazy_load_members from unstable features since it is in CS r0.5.0 (#6877) Fixes #5528 --- changelog.d/6877.removal | 1 + synapse/rest/client/versions.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/6877.removal diff --git a/changelog.d/6877.removal b/changelog.d/6877.removal new file mode 100644 index 0000000000..9545e31fbe --- /dev/null +++ b/changelog.d/6877.removal @@ -0,0 +1 @@ +Remove `m.lazy_load_members` from `unstable_features` since lazy loading is in the stable Client-Server API version r0.5.0. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3d0fefb4df..3eeb3607f4 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -52,7 +52,6 @@ class VersionsRestServlet(RestServlet): ], # as per MSC1497: "unstable_features": { - "m.lazy_load_members": True, # as per MSC2190, as amended by MSC2264 # to be removed in r0.6.0 "m.id_access_token": True, From 361de49c90fd1f35adc4a6bca8206e50e7f15454 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Feb 2020 07:40:57 -0500 Subject: [PATCH 207/213] Add documentation for the spam checker module (#6906) Add documentation for the spam checker. --- changelog.d/6906.doc | 1 + docs/spam_checker.md | 85 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 changelog.d/6906.doc create mode 100644 docs/spam_checker.md diff --git a/changelog.d/6906.doc b/changelog.d/6906.doc new file mode 100644 index 0000000000..053b2436ae --- /dev/null +++ b/changelog.d/6906.doc @@ -0,0 +1 @@ +Add documentation for the spam checker. diff --git a/docs/spam_checker.md b/docs/spam_checker.md new file mode 100644 index 0000000000..97ff17f952 --- /dev/null +++ b/docs/spam_checker.md @@ -0,0 +1,85 @@ +# Handling spam in Synapse + +Synapse has support to customize spam checking behavior. It can plug into a +variety of events and affect how they are presented to users on your homeserver. + +The spam checking behavior is implemented as a Python class, which must be +able to be imported by the running Synapse. + +## Python spam checker class + +The Python class is instantiated with two objects: + +* Any configuration (see below). +* An instance of `synapse.spam_checker_api.SpamCheckerApi`. + +It then implements methods which return a boolean to alter behavior in Synapse. + +There's a generic method for checking every event (`check_event_for_spam`), as +well as some specific methods: + +* `user_may_invite` +* `user_may_create_room` +* `user_may_create_room_alias` +* `user_may_publish_room` + +The details of the each of these methods (as well as their inputs and outputs) +are documented in the `synapse.events.spamcheck.SpamChecker` class. + +The `SpamCheckerApi` class provides a way for the custom spam checker class to +call back into the homeserver internals. It currently implements the following +methods: + +* `get_state_events_in_room` + +### Example + +```python +class ExampleSpamChecker: + def __init__(self, config, api): + self.config = config + self.api = api + + def check_event_for_spam(self, foo): + return False # allow all events + + def user_may_invite(self, inviter_userid, invitee_userid, room_id): + return True # allow all invites + + def user_may_create_room(self, userid): + return True # allow all room creations + + def user_may_create_room_alias(self, userid, room_alias): + return True # allow all room aliases + + def user_may_publish_room(self, userid, room_id): + return True # allow publishing of all rooms +``` + +## Configuration + +Modify the `spam_checker` section of your `homeserver.yaml` in the following +manner: + +`module` should point to the fully qualified Python class that implements your +custom logic, e.g. `my_module.ExampleSpamChecker`. + +`config` is a dictionary that gets passed to the spam checker class. + +### Example + +This section might look like: + +```yaml +spam_checker: + module: my_module.ExampleSpamChecker + config: + # Enable or disable a specific option in ExampleSpamChecker. + my_custom_option: true +``` + +## Examples + +The [Mjolnir](https://github.com/matrix-org/mjolnir) project is a full fledged +example using the Synapse spam checking API, including a bot for dynamic +configuration. From df1c98c22a9ada46a2a103184aab3b5e08539b19 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 13 Feb 2020 16:12:20 +0000 Subject: [PATCH 208/213] Update changelog for #6905 to group it with upcoming PRs --- changelog.d/6905.doc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6905.doc b/changelog.d/6905.doc index ca10b38301..be0e698af8 100644 --- a/changelog.d/6905.doc +++ b/changelog.d/6905.doc @@ -1 +1 @@ -Mention in `ACME.md` that ACMEv1 is deprecated and explain what it means for Synapse admins. +Update Synapse's documentation to warn about the deprecation of ACME v1. From 49f877d32efc79cb40b2766cb052cf35bad31de5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 07:17:54 -0500 Subject: [PATCH 209/213] Filter the results of user directory searching via the spam checker (#6888) Add a method to the spam checker to filter the user directory results. --- changelog.d/6888.feature | 1 + docs/spam_checker.md | 3 + synapse/events/spamcheck.py | 27 ++++++++ synapse/handlers/user_directory.py | 14 +++- tests/handlers/test_user_directory.py | 92 +++++++++++++++++++++++++++ 5 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6888.feature diff --git a/changelog.d/6888.feature b/changelog.d/6888.feature new file mode 100644 index 0000000000..1b7ac0c823 --- /dev/null +++ b/changelog.d/6888.feature @@ -0,0 +1 @@ +The result of a user directory search can now be filtered via the spam checker. diff --git a/docs/spam_checker.md b/docs/spam_checker.md index 97ff17f952..5b5f5000b7 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md @@ -54,6 +54,9 @@ class ExampleSpamChecker: def user_may_publish_room(self, userid, room_id): return True # allow publishing of all rooms + + def check_username_for_spam(self, user_profile): + return False # allow all usernames ``` ## Configuration diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 5a907718d6..0a13fca9a4 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -15,6 +15,7 @@ # limitations under the License. import inspect +from typing import Dict from synapse.spam_checker_api import SpamCheckerApi @@ -125,3 +126,29 @@ class SpamChecker(object): return True return self.spam_checker.user_may_publish_room(userid, room_id) + + def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool: + """Checks if a user ID or display name are considered "spammy" by this server. + + If the server considers a username spammy, then it will not be included in + user directory results. + + Args: + user_profile: The user information to check, it contains the keys: + * user_id + * display_name + * avatar_url + + Returns: + True if the user is spammy. + """ + if self.spam_checker is None: + return False + + # For backwards compatibility, if the method does not exist on the spam checker, fallback to not interfering. + checker = getattr(self.spam_checker, "check_username_for_spam", None) + if not checker: + return False + # Make a copy of the user profile object to ensure the spam checker + # cannot modify it. + return checker(user_profile.copy()) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 81aa58dc8c..722760c59d 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -52,6 +52,7 @@ class UserDirectoryHandler(StateDeltasHandler): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.update_user_directory self.search_all_users = hs.config.user_directory_search_all_users + self.spam_checker = hs.get_spam_checker() # The current position in the current_state_delta stream self.pos = None @@ -65,7 +66,7 @@ class UserDirectoryHandler(StateDeltasHandler): # we start populating the user directory self.clock.call_later(0, self.notify_new_event) - def search_users(self, user_id, search_term, limit): + async def search_users(self, user_id, search_term, limit): """Searches for users in directory Returns: @@ -82,7 +83,16 @@ class UserDirectoryHandler(StateDeltasHandler): ] } """ - return self.store.search_user_dir(user_id, search_term, limit) + results = await self.store.search_user_dir(user_id, search_term, limit) + + # Remove any spammy users from the results. + results["results"] = [ + user + for user in results["results"] + if not self.spam_checker.check_username_for_spam(user) + ] + + return results def notify_new_event(self): """Called when there may be more deltas to process diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 26071059d2..0a4765fff4 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -147,6 +147,98 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): s = self.get_success(self.handler.search_users(u1, "user3", 10)) self.assertEqual(len(s["results"]), 0) + def test_spam_checker(self): + """ + A user which fails to the spam checks will not appear in search results. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + + # We do not add users to the directory until they join a room. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + # Check we have populated the database correctly. + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + self.assertEqual( + self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + ) + self.assertEqual(public_users, []) + + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + + # Configure a spam checker that does not filter any users. + spam_checker = self.hs.get_spam_checker() + + class AllowAll(object): + def check_username_for_spam(self, user_profile): + # Allow all users. + return False + + spam_checker.spam_checker = AllowAll() + + # The results do not change: + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + + # Configure a spam checker that filters all users. + class BlockAll(object): + def check_username_for_spam(self, user_profile): + # All users are spammy. + return True + + spam_checker.spam_checker = BlockAll() + + # User1 now gets no search results for any of the other users. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + def test_legacy_spam_checker(self): + """ + A spam checker without the expected method should be ignored. + """ + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + u2 = self.register_user("user2", "pass") + u2_token = self.login(u2, "pass") + + # We do not add users to the directory until they join a room. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + + room = self.helper.create_room_as(u1, is_public=False, tok=u1_token) + self.helper.invite(room, src=u1, targ=u2, tok=u1_token) + self.helper.join(room, user=u2, tok=u2_token) + + # Check we have populated the database correctly. + shares_private = self.get_users_who_share_private_rooms() + public_users = self.get_users_in_public_rooms() + + self.assertEqual( + self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)]) + ) + self.assertEqual(public_users, []) + + # Configure a spam checker. + spam_checker = self.hs.get_spam_checker() + # The spam checker doesn't need any methods, so create a bare object. + spam_checker.spam_checker = object() + + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + def _compress_shared(self, shared): """ Compress a list of users who share rooms dicts to a list of tuples. From 02e89021f58f931068ab0337de039181cc7f6569 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 09:05:43 -0500 Subject: [PATCH 210/213] Convert the directory handler tests to use HomeserverTestCase (#6919) Convert directory handler tests to use HomeserverTestCase. --- changelog.d/6919.misc | 1 + tests/handlers/test_directory.py | 41 +++++++++++++------------------- 2 files changed, 18 insertions(+), 24 deletions(-) create mode 100644 changelog.d/6919.misc diff --git a/changelog.d/6919.misc b/changelog.d/6919.misc new file mode 100644 index 0000000000..aa2cd89998 --- /dev/null +++ b/changelog.d/6919.misc @@ -0,0 +1 @@ +Convert the directory handler tests to use HomeserverTestCase. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 91c7a17070..ee88cf5a4b 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -19,24 +19,16 @@ from mock import Mock from twisted.internet import defer from synapse.config.room_directory import RoomDirectoryConfig -from synapse.handlers.directory import DirectoryHandler from synapse.rest.client.v1 import directory, room from synapse.types import RoomAlias from tests import unittest -from tests.utils import setup_test_homeserver -class DirectoryHandlers(object): - def __init__(self, hs): - self.directory_handler = DirectoryHandler(hs) - - -class DirectoryTestCase(unittest.TestCase): +class DirectoryTestCase(unittest.HomeserverTestCase): """ Tests the directory service. """ - @defer.inlineCallbacks - def setUp(self): + def make_homeserver(self, reactor, clock): self.mock_federation = Mock() self.mock_registry = Mock() @@ -47,14 +39,12 @@ class DirectoryTestCase(unittest.TestCase): self.mock_registry.register_query_handler = register_query_handler - hs = yield setup_test_homeserver( - self.addCleanup, + hs = self.setup_test_homeserver( http_client=None, resource_for_federation=Mock(), federation_client=self.mock_federation, federation_registry=self.mock_registry, ) - hs.handlers = DirectoryHandlers(hs) self.handler = hs.get_handlers().directory_handler @@ -64,23 +54,25 @@ class DirectoryTestCase(unittest.TestCase): self.your_room = RoomAlias.from_string("#your-room:test") self.remote_room = RoomAlias.from_string("#another:remote") - @defer.inlineCallbacks + return hs + def test_get_local_association(self): - yield self.store.create_room_alias_association( - self.my_room, "!8765qwer:test", ["test"] + self.get_success( + self.store.create_room_alias_association( + self.my_room, "!8765qwer:test", ["test"] + ) ) - result = yield self.handler.get_association(self.my_room) + result = self.get_success(self.handler.get_association(self.my_room)) self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result) - @defer.inlineCallbacks def test_get_remote_association(self): self.mock_federation.make_query.return_value = defer.succeed( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} ) - result = yield self.handler.get_association(self.remote_room) + result = self.get_success(self.handler.get_association(self.remote_room)) self.assertEquals( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}, result @@ -93,14 +85,15 @@ class DirectoryTestCase(unittest.TestCase): ignore_backoff=True, ) - @defer.inlineCallbacks def test_incoming_fed_query(self): - yield self.store.create_room_alias_association( - self.your_room, "!8765asdf:test", ["test"] + self.get_success( + self.store.create_room_alias_association( + self.your_room, "!8765asdf:test", ["test"] + ) ) - response = yield self.query_handlers["directory"]( - {"room_alias": "#your-room:test"} + response = self.get_success( + self.handler.on_directory_query({"room_alias": "#your-room:test"}) ) self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) From 97a42bbc3a4789620c48746f8e87291446f6f5ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 Feb 2020 16:22:30 +0000 Subject: [PATCH 211/213] Add a warning about indentation to generated config (#6920) Fixes #6916. --- changelog.d/6920.misc | 1 + docs/.sample_config_header.yaml | 4 +++- docs/sample_config.yaml | 12 +++++++++++- synapse/config/_base.py | 16 ++++++++++++++-- 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6920.misc diff --git a/changelog.d/6920.misc b/changelog.d/6920.misc new file mode 100644 index 0000000000..d333add990 --- /dev/null +++ b/changelog.d/6920.misc @@ -0,0 +1 @@ +Add a warning about indentation to generated configuration files. diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml index e001ef5983..35a591d042 100644 --- a/docs/.sample_config_header.yaml +++ b/docs/.sample_config_header.yaml @@ -1,4 +1,4 @@ -# The config is maintained as an up-to-date snapshot of the default +# This file is maintained as an up-to-date snapshot of the default # homeserver.yaml configuration generated by Synapse. # # It is intended to act as a reference for the default configuration, @@ -10,3 +10,5 @@ # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in INSTALL.md. +################################################################################ + diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8e8cf513b0..93236daddc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1,4 +1,4 @@ -# The config is maintained as an up-to-date snapshot of the default +# This file is maintained as an up-to-date snapshot of the default # homeserver.yaml configuration generated by Synapse. # # It is intended to act as a reference for the default configuration, @@ -10,6 +10,16 @@ # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in INSTALL.md. +################################################################################ + +# Configuration file for Synapse. +# +# This is a YAML file: see [1] for a quick introduction. Note in particular +# that *indentation is important*: all the elements of a list or dictionary +# should have the same indentation. +# +# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html + ## Server ## # The domain name of the server, with optional explicit port. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 08619404bb..ba846042c4 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -53,6 +53,18 @@ Missing mandatory `server_name` config option. """ +CONFIG_FILE_HEADER = """\ +# Configuration file for Synapse. +# +# This is a YAML file: see [1] for a quick introduction. Note in particular +# that *indentation is important*: all the elements of a list or dictionary +# should have the same indentation. +# +# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html + +""" + + def path_exists(file_path): """Check if a file exists @@ -344,7 +356,7 @@ class RootConfig(object): str: the yaml config file """ - return "\n\n".join( + return CONFIG_FILE_HEADER + "\n\n".join( dedent(conf) for conf in self.invoke_all( "generate_config_section", @@ -574,8 +586,8 @@ class RootConfig(object): if not path_exists(config_dir_path): os.makedirs(config_dir_path) with open(config_path, "w") as config_file: - config_file.write("# vim:ft=yaml\n\n") config_file.write(config_str) + config_file.write("\n\n# vim:ft=yaml") config_dict = yaml.safe_load(config_str) obj.generate_missing_files(config_dict, config_dir_path) From 32873efa87055518b08d9b3b001b3bf9b60437f9 Mon Sep 17 00:00:00 2001 From: Fridtjof Mund <2780577+fridtjof@users.noreply.github.com> Date: Fri, 14 Feb 2020 17:27:29 +0100 Subject: [PATCH 212/213] contrib/docker: Ensure correct encoding and locale settings on DB creation (#6921) Signed-off-by: Fridtjof Mund --- changelog.d/6921.docker | 1 + contrib/docker/docker-compose.yml | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/6921.docker diff --git a/changelog.d/6921.docker b/changelog.d/6921.docker new file mode 100644 index 0000000000..152e723339 --- /dev/null +++ b/changelog.d/6921.docker @@ -0,0 +1 @@ +Databases created using the compose file in contrib/docker will now always have correct encoding and locale settings. Contributed by Fridtjof Mund. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 2b044baf78..5df29379c8 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -56,6 +56,9 @@ services: environment: - POSTGRES_USER=synapse - POSTGRES_PASSWORD=changeme + # ensure the database gets created correctly + # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database + - POSTGRES_INITDB_ARGS="--encoding=UTF-8 --lc-collate=C --lc-ctype=C" volumes: # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data From 10027c80b031f1e62b47cd61c534420673f49a71 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 14 Feb 2020 12:49:40 -0500 Subject: [PATCH 213/213] Add type hints to the spam check module (#6915) Add typing information to the spam checker modules. --- changelog.d/6915.misc | 1 + synapse/events/spamcheck.py | 44 ++++++++++++++++------------ synapse/spam_checker_api/__init__.py | 12 +++++--- tox.ini | 1 + 4 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 changelog.d/6915.misc diff --git a/changelog.d/6915.misc b/changelog.d/6915.misc new file mode 100644 index 0000000000..3a181ef243 --- /dev/null +++ b/changelog.d/6915.misc @@ -0,0 +1 @@ +Add type hints to the spam checker module. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 0a13fca9a4..a23b6b7b61 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -19,9 +19,13 @@ from typing import Dict from synapse.spam_checker_api import SpamCheckerApi +MYPY = False +if MYPY: + import synapse.server + class SpamChecker(object): - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.spam_checker = None module = None @@ -41,7 +45,7 @@ class SpamChecker(object): else: self.spam_checker = module(config=config) - def check_event_for_spam(self, event): + def check_event_for_spam(self, event: "synapse.events.EventBase") -> bool: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -49,26 +53,30 @@ class SpamChecker(object): users receive a blank event. Args: - event (synapse.events.EventBase): the event to be checked + event: the event to be checked Returns: - bool: True if the event is spammy. + True if the event is spammy. """ if self.spam_checker is None: return False return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, inviter_userid, invitee_userid, room_id): + def user_may_invite( + self, inviter_userid: str, invitee_userid: str, room_id: str + ) -> bool: """Checks if a given user may send an invite If this method returns false, the invite will be rejected. Args: - userid (string): The sender's user ID + inviter_userid: The user ID of the sender of the invitation + invitee_userid: The user ID targeted in the invitation + room_id: The room ID Returns: - bool: True if the user may send an invite, otherwise False + True if the user may send an invite, otherwise False """ if self.spam_checker is None: return True @@ -77,50 +85,50 @@ class SpamChecker(object): inviter_userid, invitee_userid, room_id ) - def user_may_create_room(self, userid): + def user_may_create_room(self, userid: str) -> bool: """Checks if a given user may create a room If this method returns false, the creation request will be rejected. Args: - userid (string): The sender's user ID + userid: The ID of the user attempting to create a room Returns: - bool: True if the user may create a room, otherwise False + True if the user may create a room, otherwise False """ if self.spam_checker is None: return True return self.spam_checker.user_may_create_room(userid) - def user_may_create_room_alias(self, userid, room_alias): + def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool: """Checks if a given user may create a room alias If this method returns false, the association request will be rejected. Args: - userid (string): The sender's user ID - room_alias (string): The alias to be created + userid: The ID of the user attempting to create a room alias + room_alias: The alias to be created Returns: - bool: True if the user may create a room alias, otherwise False + True if the user may create a room alias, otherwise False """ if self.spam_checker is None: return True return self.spam_checker.user_may_create_room_alias(userid, room_alias) - def user_may_publish_room(self, userid, room_id): + def user_may_publish_room(self, userid: str, room_id: str) -> bool: """Checks if a given user may publish a room to the directory If this method returns false, the publish request will be rejected. Args: - userid (string): The sender's user ID - room_id (string): The ID of the room that would be published + userid: The user ID attempting to publish the room + room_id: The ID of the room that would be published Returns: - bool: True if the user may publish the room, otherwise False + True if the user may publish the room, otherwise False """ if self.spam_checker is None: return True diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index efcc10f808..9b78924d96 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -18,6 +18,10 @@ from twisted.internet import defer from synapse.storage.state import StateFilter +MYPY = False +if MYPY: + import synapse.server + logger = logging.getLogger(__name__) @@ -26,18 +30,18 @@ class SpamCheckerApi(object): access to rooms and other relevant information. """ - def __init__(self, hs): + def __init__(self, hs: "synapse.server.HomeServer"): self.hs = hs self._store = hs.get_datastore() @defer.inlineCallbacks - def get_state_events_in_room(self, room_id, types): + def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred: """Gets state events for the given room. Args: - room_id (string): The room ID to get state events in. - types (tuple): The event type and state key (using None + room_id: The room ID to get state events in. + types: The event type and state key (using None to represent 'any') of the room state to acquire. Returns: diff --git a/tox.ini b/tox.ini index f8229eba88..b9132a3177 100644 --- a/tox.ini +++ b/tox.ini @@ -179,6 +179,7 @@ extras = all commands = mypy \ synapse/api \ synapse/config/ \ + synapse/events/spamcheck.py \ synapse/federation/sender \ synapse/federation/transport \ synapse/handlers/sync.py \