2022-02-17 17:11:59 +01:00
|
|
|
# Copyright 2015-2022 The Matrix.org Foundation C.I.C.
|
2021-06-09 20:39:51 +02:00
|
|
|
# Copyright 2020 Sorunome
|
2015-01-26 11:45:24 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
|
2017-12-30 19:40:19 +01:00
|
|
|
import copy
|
|
|
|
import itertools
|
|
|
|
import logging
|
2020-02-03 21:59:10 +01:00
|
|
|
from typing import (
|
2021-01-20 13:59:18 +01:00
|
|
|
TYPE_CHECKING,
|
2020-02-03 21:59:10 +01:00
|
|
|
Awaitable,
|
|
|
|
Callable,
|
2021-06-08 12:07:46 +02:00
|
|
|
Collection,
|
2021-07-29 13:50:14 +02:00
|
|
|
Container,
|
2020-02-03 21:59:10 +01:00
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
2020-09-29 16:57:36 +02:00
|
|
|
Mapping,
|
2020-02-03 21:59:10 +01:00
|
|
|
Optional,
|
2021-03-24 13:45:39 +01:00
|
|
|
Sequence,
|
2020-02-03 21:59:10 +01:00
|
|
|
Tuple,
|
|
|
|
TypeVar,
|
2020-09-29 16:57:36 +02:00
|
|
|
Union,
|
2020-02-03 21:59:10 +01:00
|
|
|
)
|
2017-12-30 19:40:19 +01:00
|
|
|
|
2021-03-24 13:45:39 +01:00
|
|
|
import attr
|
2018-07-09 08:09:20 +02:00
|
|
|
from prometheus_client import Counter
|
|
|
|
|
2021-09-30 17:13:59 +02:00
|
|
|
from synapse.api.constants import EventContentFields, EventTypes, Membership
|
2015-03-05 17:08:02 +01:00
|
|
|
from synapse.api.errors import (
|
2018-07-09 08:09:20 +02:00
|
|
|
CodeMessageException,
|
2019-02-23 15:31:08 +01:00
|
|
|
Codes,
|
2018-07-09 08:09:20 +02:00
|
|
|
FederationDeniedError,
|
|
|
|
HttpResponseException,
|
2021-08-23 14:00:25 +02:00
|
|
|
RequestSendFailed,
|
2018-07-09 08:09:20 +02:00
|
|
|
SynapseError,
|
2020-01-27 15:30:57 +01:00
|
|
|
UnsupportedRoomVersionError,
|
2015-03-05 17:08:02 +01:00
|
|
|
)
|
2019-04-01 11:24:38 +02:00
|
|
|
from synapse.api.room_versions import (
|
|
|
|
KNOWN_ROOM_VERSIONS,
|
|
|
|
EventFormatVersions,
|
2020-02-03 21:51:26 +01:00
|
|
|
RoomVersion,
|
2019-04-01 11:24:38 +02:00
|
|
|
RoomVersions,
|
|
|
|
)
|
2022-07-20 22:58:51 +02:00
|
|
|
from synapse.events import EventBase, builder, make_event_from_dict
|
2022-06-01 00:32:56 +02:00
|
|
|
from synapse.federation.federation_base import (
|
|
|
|
FederationBase,
|
|
|
|
InvalidEventSignatureError,
|
|
|
|
event_from_pdu_json,
|
|
|
|
)
|
2021-05-20 17:11:48 +02:00
|
|
|
from synapse.federation.transport.client import SendJoinResponse
|
2022-04-08 14:06:51 +02:00
|
|
|
from synapse.http.types import QueryParams
|
2022-08-24 04:53:37 +02:00
|
|
|
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
|
2022-02-22 16:10:10 +01:00
|
|
|
from synapse.types import JsonDict, UserID, get_domain_from_id
|
2021-06-08 12:07:46 +02:00
|
|
|
from synapse.util.async_helpers import concurrently_execute
|
2017-12-30 19:40:19 +01:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2017-03-23 01:12:21 +01:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
2015-02-17 18:20:56 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
if TYPE_CHECKING:
|
2021-03-23 12:12:48 +01:00
|
|
|
from synapse.server import HomeServer
|
2021-01-20 13:59:18 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-03-10 16:29:22 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
|
2015-02-24 19:10:44 +01:00
|
|
|
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2016-08-10 12:31:46 +02:00
|
|
|
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
|
|
|
|
2020-02-03 21:59:10 +01:00
|
|
|
T = TypeVar("T")
|
|
|
|
|
2016-08-10 12:31:46 +02:00
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
|
|
|
class PulledPduInfo:
|
|
|
|
"""
|
|
|
|
A result object that stores the PDU and info about it like which homeserver we
|
|
|
|
pulled it from (`pull_origin`)
|
|
|
|
"""
|
|
|
|
|
|
|
|
pdu: EventBase
|
|
|
|
# Which homeserver we pulled the PDU from
|
|
|
|
pull_origin: str
|
|
|
|
|
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
class InvalidResponseError(RuntimeError):
|
|
|
|
"""Helper for _try_destination_list: indicates that the server returned a response
|
|
|
|
we couldn't parse
|
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-07-26 18:17:00 +02:00
|
|
|
|
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class SendJoinResult:
|
|
|
|
# The event to persist.
|
|
|
|
event: EventBase
|
|
|
|
# A string giving the server the event was sent to.
|
|
|
|
origin: str
|
|
|
|
state: List[EventBase]
|
|
|
|
auth_chain: List[EventBase]
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2022-02-17 17:11:59 +01:00
|
|
|
# True if 'state' elides non-critical membership events
|
|
|
|
partial_state: bool
|
|
|
|
|
|
|
|
# if 'partial_state' is set, a list of the servers in the room (otherwise empty)
|
|
|
|
servers_in_room: List[str]
|
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2015-02-03 15:58:30 +01:00
|
|
|
class FederationClient(FederationBase):
|
2021-01-20 13:59:18 +01:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(hs)
|
2015-02-16 19:02:39 +01:00
|
|
|
|
2021-07-15 12:02:43 +02:00
|
|
|
self.pdu_destination_tried: Dict[str, Dict[str, int]] = {}
|
2019-06-20 11:32:02 +02:00
|
|
|
self._clock.looping_call(self._clear_tried_cache, 60 * 1000)
|
2016-08-26 15:54:30 +02:00
|
|
|
self.state = hs.get_state_handler()
|
2018-03-12 15:07:39 +01:00
|
|
|
self.transport_layer = hs.get_federation_transport_client()
|
2016-08-10 12:31:46 +02:00
|
|
|
|
2019-01-25 18:19:31 +01:00
|
|
|
self.hostname = hs.hostname
|
2020-07-08 18:51:56 +02:00
|
|
|
self.signing_key = hs.signing_key
|
2019-01-16 16:13:07 +01:00
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
# Cache mapping `event_id` to a tuple of the event itself and the `pull_origin`
|
|
|
|
# (which server we pulled the event from)
|
|
|
|
self._get_pdu_cache: ExpiringCache[str, Tuple[EventBase, str]] = ExpiringCache(
|
2018-09-21 15:19:46 +02:00
|
|
|
cache_name="get_pdu_cache",
|
|
|
|
clock=self._clock,
|
|
|
|
max_len=1000,
|
|
|
|
expiry_ms=120 * 1000,
|
|
|
|
reset_expiry_on_get=False,
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2018-09-21 15:19:46 +02:00
|
|
|
|
2021-08-26 13:16:53 +02:00
|
|
|
# A cache for fetching the room hierarchy over federation.
|
|
|
|
#
|
|
|
|
# Some stale data over federation is OK, but must be refreshed
|
|
|
|
# periodically since the local server is in the room.
|
|
|
|
#
|
|
|
|
# It is a map of (room ID, suggested-only) -> the response of
|
|
|
|
# get_room_hierarchy.
|
|
|
|
self._get_room_hierarchy_cache: ExpiringCache[
|
2022-01-20 12:03:42 +01:00
|
|
|
Tuple[str, bool],
|
|
|
|
Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]],
|
2021-08-26 13:16:53 +02:00
|
|
|
] = ExpiringCache(
|
|
|
|
cache_name="get_room_hierarchy_cache",
|
|
|
|
clock=self._clock,
|
|
|
|
max_len=1000,
|
|
|
|
expiry_ms=5 * 60 * 1000,
|
|
|
|
reset_expiry_on_get=False,
|
|
|
|
)
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def _clear_tried_cache(self) -> None:
|
2016-08-10 12:31:46 +02:00
|
|
|
"""Clear pdu_destination_tried cache"""
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
|
|
|
|
old_dict = self.pdu_destination_tried
|
|
|
|
self.pdu_destination_tried = {}
|
|
|
|
|
|
|
|
for event_id, destination_dict in old_dict.items():
|
|
|
|
destination_dict = {
|
|
|
|
dest: time
|
|
|
|
for dest, time in destination_dict.items()
|
|
|
|
if time + PDU_RETRY_TIME_MS > now
|
|
|
|
}
|
|
|
|
if destination_dict:
|
|
|
|
self.pdu_destination_tried[event_id] = destination_dict
|
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def make_query(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2021-01-20 13:59:18 +01:00
|
|
|
destination: str,
|
|
|
|
query_type: str,
|
2022-04-08 14:06:51 +02:00
|
|
|
args: QueryParams,
|
2021-01-20 13:59:18 +01:00
|
|
|
retry_on_dns_fail: bool = False,
|
|
|
|
ignore_backoff: bool = False,
|
|
|
|
) -> JsonDict:
|
2015-01-26 11:45:24 +01:00
|
|
|
"""Sends a federation Query to a remote homeserver of the given type
|
|
|
|
and arguments.
|
|
|
|
|
|
|
|
Args:
|
2021-01-20 13:59:18 +01:00
|
|
|
destination: Domain name of the remote homeserver
|
|
|
|
query_type: Category of the query type; should match the
|
2015-01-26 11:45:24 +01:00
|
|
|
handler name used in register_query_handler().
|
2021-01-20 13:59:18 +01:00
|
|
|
args: Mapping of strings to strings containing the details
|
2015-01-26 11:45:24 +01:00
|
|
|
of the query request.
|
2021-01-20 13:59:18 +01:00
|
|
|
ignore_backoff: true to ignore the historical backoff data
|
2017-03-23 12:10:36 +01:00
|
|
|
and try the request anyway.
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
Returns:
|
2021-01-20 13:59:18 +01:00
|
|
|
The JSON object from the response
|
2015-01-26 11:45:24 +01:00
|
|
|
"""
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_queries_counter.labels(query_type).inc()
|
2015-03-10 16:29:22 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.make_query(
|
2019-06-20 11:32:02 +02:00
|
|
|
destination,
|
|
|
|
query_type,
|
|
|
|
args,
|
|
|
|
retry_on_dns_fail=retry_on_dns_fail,
|
2017-03-23 12:10:36 +01:00
|
|
|
ignore_backoff=ignore_backoff,
|
2015-01-26 11:45:24 +01:00
|
|
|
)
|
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def query_client_keys(
|
|
|
|
self, destination: str, content: JsonDict, timeout: int
|
|
|
|
) -> JsonDict:
|
2015-07-23 17:03:38 +02:00
|
|
|
"""Query device keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
2021-01-20 13:59:18 +01:00
|
|
|
destination: Domain name of the remote homeserver
|
|
|
|
content: The query content.
|
2015-07-23 17:03:38 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-01-20 13:59:18 +01:00
|
|
|
The JSON object from the response
|
2015-07-23 17:03:38 +02:00
|
|
|
"""
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_queries_counter.labels("client_device_keys").inc()
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.query_client_keys(
|
|
|
|
destination, content, timeout
|
|
|
|
)
|
2015-07-23 17:03:38 +02:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def query_user_devices(
|
|
|
|
self, destination: str, user_id: str, timeout: int = 30000
|
|
|
|
) -> JsonDict:
|
2017-01-26 17:06:54 +01:00
|
|
|
"""Query the device keys for a list of user ids hosted on a remote
|
|
|
|
server.
|
|
|
|
"""
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_queries_counter.labels("user_devices").inc()
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.query_user_devices(
|
|
|
|
destination, user_id, timeout
|
|
|
|
)
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def claim_client_keys(
|
2022-07-19 20:14:30 +02:00
|
|
|
self, destination: str, content: JsonDict, timeout: Optional[int]
|
2021-01-20 13:59:18 +01:00
|
|
|
) -> JsonDict:
|
2015-07-23 17:03:38 +02:00
|
|
|
"""Claims one-time keys for a device hosted on a remote server.
|
|
|
|
|
|
|
|
Args:
|
2021-01-20 13:59:18 +01:00
|
|
|
destination: Domain name of the remote homeserver
|
|
|
|
content: The query content.
|
2015-07-23 17:03:38 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-01-20 13:59:18 +01:00
|
|
|
The JSON object from the response
|
2015-07-23 17:03:38 +02:00
|
|
|
"""
|
2018-05-22 02:47:37 +02:00
|
|
|
sent_queries_counter.labels("client_one_time_keys").inc()
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.claim_client_keys(
|
|
|
|
destination, content, timeout
|
|
|
|
)
|
2015-07-23 17:03:38 +02:00
|
|
|
|
2022-08-03 17:57:38 +02:00
|
|
|
@trace
|
2022-08-16 19:39:40 +02:00
|
|
|
@tag_args
|
2020-02-03 21:35:40 +01:00
|
|
|
async def backfill(
|
2021-10-27 18:27:23 +02:00
|
|
|
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
|
2020-02-28 13:31:07 +01:00
|
|
|
) -> Optional[List[EventBase]]:
|
2020-02-03 21:35:40 +01:00
|
|
|
"""Requests some more historic PDUs for the given room from the
|
2015-01-26 11:45:24 +01:00
|
|
|
given destination server.
|
|
|
|
|
|
|
|
Args:
|
2021-01-20 13:59:18 +01:00
|
|
|
dest: The remote homeserver to ask.
|
|
|
|
room_id: The room_id to backfill.
|
|
|
|
limit: The maximum number of events to return.
|
|
|
|
extremities: our current backwards extremities, to backfill from
|
2021-10-27 18:27:23 +02:00
|
|
|
Must be a Collection that is falsy when empty.
|
|
|
|
(Iterable is not enough here!)
|
2015-01-26 11:45:24 +01:00
|
|
|
"""
|
|
|
|
logger.debug("backfill extrem=%s", extremities)
|
|
|
|
|
2020-02-28 13:31:07 +01:00
|
|
|
# If there are no extremities then we've (probably) reached the start.
|
2015-01-26 11:45:24 +01:00
|
|
|
if not extremities:
|
2020-02-28 13:31:07 +01:00
|
|
|
return None
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-02-03 21:35:40 +01:00
|
|
|
transaction_data = await self.transport_layer.backfill(
|
2019-06-20 11:32:02 +02:00
|
|
|
dest, room_id, extremities, limit
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2019-10-24 19:17:33 +02:00
|
|
|
logger.debug("backfill transaction_data=%r", transaction_data)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2021-10-27 18:27:23 +02:00
|
|
|
if not isinstance(transaction_data, dict):
|
|
|
|
# TODO we probably want an exception type specific to federation
|
|
|
|
# client validation.
|
|
|
|
raise TypeError("Backfill transaction_data is not a dict.")
|
|
|
|
|
|
|
|
transaction_data_pdus = transaction_data.get("pdus")
|
|
|
|
if not isinstance(transaction_data_pdus, list):
|
|
|
|
# TODO we probably want an exception type specific to federation
|
|
|
|
# client validation.
|
|
|
|
raise TypeError("transaction_data.pdus is not a list.")
|
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 21:21:33 +01:00
|
|
|
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-09-18 15:51:11 +02:00
|
|
|
# Check signatures and hash of pdus, removing any from the list that fail checks
|
2022-10-03 21:53:29 +02:00
|
|
|
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
`FederationClient.backfill`: stop flagging events as outliers (#11632)
Events returned by `backfill` should not be flagged as outliers.
Fixes:
```
AssertionError: null
File "synapse/handlers/federation.py", line 313, in try_backfill
dom, room_id, limit=100, extremities=extremities
File "synapse/handlers/federation_event.py", line 517, in backfill
await self._process_pulled_events(dest, events, backfilled=True)
File "synapse/handlers/federation_event.py", line 642, in _process_pulled_events
await self._process_pulled_event(origin, ev, backfilled=backfilled)
File "synapse/handlers/federation_event.py", line 669, in _process_pulled_event
assert not event.internal_metadata.is_outlier()
```
See https://sentry.matrix.org/sentry/synapse-matrixorg/issues/231992
Fixes #8894.
2022-01-04 17:31:32 +01:00
|
|
|
dest, pdus, room_version=room_version
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2015-01-26 15:33:11 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return pdus
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2021-11-09 22:07:57 +01:00
|
|
|
async def get_pdu_from_destination_raw(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
event_id: str,
|
|
|
|
room_version: RoomVersion,
|
|
|
|
timeout: Optional[int] = None,
|
|
|
|
) -> Optional[EventBase]:
|
|
|
|
"""Requests the PDU with given origin and ID from the remote home
|
|
|
|
server. Does not have any caching or rate limiting!
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: Which homeserver to query
|
|
|
|
event_id: event to fetch
|
|
|
|
room_version: version of the room
|
|
|
|
timeout: How long to try (in ms) each destination for before
|
|
|
|
moving to the next destination. None indicates no timeout.
|
|
|
|
|
|
|
|
Returns:
|
2022-07-20 22:58:51 +02:00
|
|
|
A copy of the requested PDU that is safe to modify, or None if we
|
|
|
|
were unable to find it.
|
2021-11-09 22:07:57 +01:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError, NotRetryingDestination, FederationDeniedError
|
|
|
|
"""
|
|
|
|
transaction_data = await self.transport_layer.get_event(
|
|
|
|
destination, event_id, timeout=timeout
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug(
|
2022-07-20 22:58:51 +02:00
|
|
|
"get_pdu_from_destination_raw: retrieved event id %s from %s: %r",
|
2021-11-09 22:07:57 +01:00
|
|
|
event_id,
|
|
|
|
destination,
|
|
|
|
transaction_data,
|
|
|
|
)
|
|
|
|
|
|
|
|
pdu_list: List[EventBase] = [
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
event_from_pdu_json(p, room_version) for p in transaction_data["pdus"]
|
2021-11-09 22:07:57 +01:00
|
|
|
]
|
|
|
|
|
|
|
|
if pdu_list and pdu_list[0]:
|
|
|
|
pdu = pdu_list[0]
|
|
|
|
|
|
|
|
# Check signatures are correct.
|
2022-06-01 00:32:56 +02:00
|
|
|
try:
|
2022-10-03 21:53:29 +02:00
|
|
|
|
|
|
|
async def _record_failure_callback(
|
|
|
|
event: EventBase, cause: str
|
|
|
|
) -> None:
|
|
|
|
await self.store.record_event_failed_pull_attempt(
|
|
|
|
event.room_id, event.event_id, cause
|
|
|
|
)
|
|
|
|
|
|
|
|
signed_pdu = await self._check_sigs_and_hash(
|
|
|
|
room_version, pdu, _record_failure_callback
|
|
|
|
)
|
2022-06-01 00:32:56 +02:00
|
|
|
except InvalidEventSignatureError as e:
|
|
|
|
errmsg = f"event id {pdu.event_id}: {e}"
|
|
|
|
logger.warning("%s", errmsg)
|
|
|
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
|
|
|
|
2021-11-09 22:07:57 +01:00
|
|
|
return signed_pdu
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
@trace
|
|
|
|
@tag_args
|
2020-02-03 21:41:54 +01:00
|
|
|
async def get_pdu(
|
|
|
|
self,
|
2022-10-26 23:10:55 +02:00
|
|
|
destinations: Collection[str],
|
2020-02-03 21:41:54 +01:00
|
|
|
event_id: str,
|
2020-01-31 15:07:31 +01:00
|
|
|
room_version: RoomVersion,
|
2020-02-03 21:41:54 +01:00
|
|
|
timeout: Optional[int] = None,
|
2022-10-26 23:10:55 +02:00
|
|
|
) -> Optional[PulledPduInfo]:
|
2015-01-26 11:45:24 +01:00
|
|
|
"""Requests the PDU with given origin and ID from the remote home
|
|
|
|
servers.
|
|
|
|
|
|
|
|
Will attempt to get the PDU from each destination in the list until
|
|
|
|
one succeeds.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 21:41:54 +01:00
|
|
|
destinations: Which homeservers to query
|
|
|
|
event_id: event to fetch
|
|
|
|
room_version: version of the room
|
|
|
|
timeout: How long to try (in ms) each destination for before
|
2015-05-22 16:18:04 +02:00
|
|
|
moving to the next destination. None indicates no timeout.
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
Returns:
|
2022-10-26 23:10:55 +02:00
|
|
|
The requested PDU wrapped in `PulledPduInfo`, or None if we were unable to find it.
|
2015-01-26 11:45:24 +01:00
|
|
|
"""
|
|
|
|
|
2022-07-20 22:58:51 +02:00
|
|
|
logger.debug(
|
2022-10-26 23:10:55 +02:00
|
|
|
"get_pdu(event_id=%s): from destinations=%s", event_id, destinations
|
2022-07-20 22:58:51 +02:00
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2022-07-20 22:58:51 +02:00
|
|
|
# TODO: Rate limit the number of times we try and get the same event.
|
2015-02-16 19:02:39 +01:00
|
|
|
|
2022-07-20 22:58:51 +02:00
|
|
|
# We might need the same event multiple times in quick succession (before
|
|
|
|
# it gets persisted to the database), so we cache the results of the lookup.
|
|
|
|
# Note that this is separate to the regular get_event cache which caches
|
|
|
|
# events once they have been persisted.
|
2022-10-26 23:10:55 +02:00
|
|
|
get_pdu_cache_entry = self._get_pdu_cache.get(event_id)
|
2022-07-20 22:58:51 +02:00
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
event = None
|
|
|
|
pull_origin = None
|
|
|
|
if get_pdu_cache_entry:
|
|
|
|
event, pull_origin = get_pdu_cache_entry
|
2022-07-20 22:58:51 +02:00
|
|
|
# If we don't see the event in the cache, go try to fetch it from the
|
|
|
|
# provided remote federated destinations
|
2022-10-26 23:10:55 +02:00
|
|
|
else:
|
2022-07-20 22:58:51 +02:00
|
|
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
# TODO: We can probably refactor this to use `_try_destination_list`
|
2022-07-20 22:58:51 +02:00
|
|
|
for destination in destinations:
|
|
|
|
now = self._clock.time_msec()
|
|
|
|
last_attempt = pdu_attempts.get(destination, 0)
|
|
|
|
if last_attempt + PDU_RETRY_TIME_MS > now:
|
|
|
|
logger.debug(
|
2022-10-26 23:10:55 +02:00
|
|
|
"get_pdu(event_id=%s): skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)",
|
|
|
|
event_id,
|
2022-07-20 22:58:51 +02:00
|
|
|
destination,
|
|
|
|
last_attempt,
|
|
|
|
PDU_RETRY_TIME_MS,
|
|
|
|
now,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
event = await self.get_pdu_from_destination_raw(
|
|
|
|
destination=destination,
|
|
|
|
event_id=event_id,
|
|
|
|
room_version=room_version,
|
|
|
|
timeout=timeout,
|
|
|
|
)
|
2022-10-26 23:10:55 +02:00
|
|
|
pull_origin = destination
|
2022-07-20 22:58:51 +02:00
|
|
|
|
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
|
|
|
if event:
|
|
|
|
# Prime the cache
|
2022-10-26 23:10:55 +02:00
|
|
|
self._get_pdu_cache[event.event_id] = (event, pull_origin)
|
2022-07-20 22:58:51 +02:00
|
|
|
|
2022-07-27 11:40:04 +02:00
|
|
|
# Now that we have an event, we can break out of this
|
|
|
|
# loop and stop asking other destinations.
|
|
|
|
break
|
2022-07-20 22:58:51 +02:00
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
except NotRetryingDestination as e:
|
|
|
|
logger.info("get_pdu(event_id=%s): %s", event_id, e)
|
|
|
|
continue
|
|
|
|
except FederationDeniedError:
|
|
|
|
logger.info(
|
|
|
|
"get_pdu(event_id=%s): Not attempting to fetch PDU from %s because the homeserver is not on our federation whitelist",
|
|
|
|
event_id,
|
|
|
|
destination,
|
|
|
|
)
|
|
|
|
continue
|
2022-07-20 22:58:51 +02:00
|
|
|
except SynapseError as e:
|
|
|
|
logger.info(
|
2022-10-26 23:10:55 +02:00
|
|
|
"get_pdu(event_id=%s): Failed to get PDU from %s because %s",
|
2022-07-20 22:58:51 +02:00
|
|
|
event_id,
|
|
|
|
destination,
|
|
|
|
e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
except Exception as e:
|
|
|
|
pdu_attempts[destination] = now
|
|
|
|
|
|
|
|
logger.info(
|
2022-11-01 20:20:35 +01:00
|
|
|
"get_pdu(event_id=%s): Failed to get PDU from %s because %s",
|
2022-07-20 22:58:51 +02:00
|
|
|
event_id,
|
|
|
|
destination,
|
|
|
|
e,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
if not event or not pull_origin:
|
2022-07-20 22:58:51 +02:00
|
|
|
return None
|
2016-08-10 12:31:46 +02:00
|
|
|
|
2022-07-20 22:58:51 +02:00
|
|
|
# `event` now refers to an object stored in `get_pdu_cache`. Our
|
|
|
|
# callers may need to modify the returned object (eg to set
|
|
|
|
# `event.internal_metadata.outlier = true`), so we return a copy
|
|
|
|
# rather than the original object.
|
|
|
|
event_copy = make_event_from_dict(
|
|
|
|
event.get_pdu_json(),
|
|
|
|
event.room_version,
|
|
|
|
)
|
2016-08-10 12:31:46 +02:00
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
return PulledPduInfo(event_copy, pull_origin)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
@trace
|
|
|
|
@tag_args
|
2020-02-03 21:42:52 +01:00
|
|
|
async def get_room_state_ids(
|
|
|
|
self, destination: str, room_id: str, event_id: str
|
|
|
|
) -> Tuple[List[str], List[str]]:
|
2019-12-10 18:42:46 +01:00
|
|
|
"""Calls the /state_ids endpoint to fetch the state at a particular point
|
|
|
|
in the room, and the auth events for the given event
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
Returns:
|
2020-02-03 21:42:52 +01:00
|
|
|
a tuple of (state event_ids, auth event_ids)
|
2022-05-31 16:50:29 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
InvalidResponseError: if fields in the response have the wrong type.
|
2015-01-26 11:45:24 +01:00
|
|
|
"""
|
2020-02-03 21:42:52 +01:00
|
|
|
result = await self.transport_layer.get_room_state_ids(
|
2019-06-20 11:32:02 +02:00
|
|
|
destination, room_id, event_id=event_id
|
2016-08-03 16:04:29 +02:00
|
|
|
)
|
|
|
|
|
2019-12-09 12:37:26 +01:00
|
|
|
state_event_ids = result["pdu_ids"]
|
|
|
|
auth_event_ids = result.get("auth_chain_ids", [])
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
set_tag(
|
|
|
|
SynapseTags.RESULT_PREFIX + "state_event_ids",
|
|
|
|
str(state_event_ids),
|
|
|
|
)
|
|
|
|
set_tag(
|
|
|
|
SynapseTags.RESULT_PREFIX + "state_event_ids.length",
|
|
|
|
str(len(state_event_ids)),
|
|
|
|
)
|
|
|
|
set_tag(
|
|
|
|
SynapseTags.RESULT_PREFIX + "auth_event_ids",
|
|
|
|
str(auth_event_ids),
|
|
|
|
)
|
|
|
|
set_tag(
|
|
|
|
SynapseTags.RESULT_PREFIX + "auth_event_ids.length",
|
|
|
|
str(len(auth_event_ids)),
|
|
|
|
)
|
|
|
|
|
2019-12-10 18:42:46 +01:00
|
|
|
if not isinstance(state_event_ids, list) or not isinstance(
|
|
|
|
auth_event_ids, list
|
|
|
|
):
|
2022-05-31 16:50:29 +02:00
|
|
|
raise InvalidResponseError("invalid response from /state_ids")
|
2016-08-03 15:47:37 +02:00
|
|
|
|
2019-12-10 18:42:46 +01:00
|
|
|
return state_event_ids, auth_event_ids
|
2016-08-03 15:47:37 +02:00
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
@trace
|
|
|
|
@tag_args
|
2022-02-22 13:17:10 +01:00
|
|
|
async def get_room_state(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
room_id: str,
|
|
|
|
event_id: str,
|
|
|
|
room_version: RoomVersion,
|
|
|
|
) -> Tuple[List[EventBase], List[EventBase]]:
|
|
|
|
"""Calls the /state endpoint to fetch the state at a particular point
|
|
|
|
in the room.
|
|
|
|
|
|
|
|
Any invalid events (those with incorrect or unverifiable signatures or hashes)
|
|
|
|
are filtered out from the response, and any duplicate events are removed.
|
|
|
|
|
|
|
|
(Size limits and other event-format checks are *not* performed.)
|
|
|
|
|
|
|
|
Note that the result is not ordered, so callers must be careful to process
|
|
|
|
the events in an order that handles dependencies.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a tuple of (state events, auth events)
|
|
|
|
"""
|
|
|
|
result = await self.transport_layer.get_room_state(
|
|
|
|
room_version,
|
|
|
|
destination,
|
|
|
|
room_id,
|
|
|
|
event_id,
|
|
|
|
)
|
|
|
|
state_events = result.state
|
|
|
|
auth_events = result.auth_events
|
|
|
|
|
|
|
|
# we may as well filter out any duplicates from the response, to save
|
|
|
|
# processing them multiple times. (In particular, events may be present in
|
|
|
|
# `auth_events` as well as `state`, which is redundant).
|
|
|
|
#
|
|
|
|
# We don't rely on the sort order of the events, so we can just stick them
|
|
|
|
# in a dict.
|
|
|
|
state_event_map = {event.event_id: event for event in state_events}
|
|
|
|
auth_event_map = {
|
|
|
|
event.event_id: event
|
|
|
|
for event in auth_events
|
|
|
|
if event.event_id not in state_event_map
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"Processing from /state: %d state events, %d auth events",
|
|
|
|
len(state_event_map),
|
|
|
|
len(auth_event_map),
|
|
|
|
)
|
|
|
|
|
2022-10-03 21:53:29 +02:00
|
|
|
valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
2022-02-22 13:17:10 +01:00
|
|
|
destination, auth_event_map.values(), room_version
|
|
|
|
)
|
|
|
|
|
2022-10-03 21:53:29 +02:00
|
|
|
valid_state_events = (
|
|
|
|
await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
|
|
|
destination, state_event_map.values(), room_version
|
|
|
|
)
|
2022-02-22 13:17:10 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
return valid_state_events, valid_auth_events
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
@trace
|
2022-10-03 21:53:29 +02:00
|
|
|
async def _check_sigs_and_hash_for_pulled_events_and_fetch(
|
2020-03-17 13:04:49 +01:00
|
|
|
self,
|
|
|
|
origin: str,
|
2021-06-08 12:07:46 +02:00
|
|
|
pdus: Collection[EventBase],
|
2020-03-19 13:22:56 +01:00
|
|
|
room_version: RoomVersion,
|
2020-03-17 13:04:49 +01:00
|
|
|
) -> List[EventBase]:
|
2022-10-03 21:53:29 +02:00
|
|
|
"""
|
|
|
|
Checks the signatures and hashes of a list of pulled events we got from
|
|
|
|
federation and records any signature failures as failed pull attempts.
|
2022-02-22 13:17:10 +01:00
|
|
|
|
|
|
|
If a PDU fails its signature check then we check if we have it in
|
|
|
|
the database, and if not then request it from the sender's server (if that
|
|
|
|
is different from `origin`). If that still fails, the event is omitted from
|
|
|
|
the returned list.
|
2020-03-17 13:04:49 +01:00
|
|
|
|
|
|
|
If a PDU fails its content hash check then it is redacted.
|
|
|
|
|
2022-02-22 13:17:10 +01:00
|
|
|
Also runs each event through the spam checker; if it fails, redacts the event
|
|
|
|
and flags it as soft-failed.
|
|
|
|
|
|
|
|
The given list of PDUs are not modified; instead the function returns
|
2020-03-17 13:04:49 +01:00
|
|
|
a new list.
|
|
|
|
|
|
|
|
Args:
|
2022-02-22 13:17:10 +01:00
|
|
|
origin: The server that sent us these events
|
|
|
|
pdus: The events to be checked
|
|
|
|
room_version: the version of the room these events are in
|
2020-03-17 13:04:49 +01:00
|
|
|
|
|
|
|
Returns:
|
2021-01-20 13:59:18 +01:00
|
|
|
A list of PDUs that have valid signatures and hashes.
|
2020-03-17 13:04:49 +01:00
|
|
|
"""
|
2022-08-24 04:53:37 +02:00
|
|
|
set_tag(
|
|
|
|
SynapseTags.RESULT_PREFIX + "pdus.length",
|
|
|
|
str(len(pdus)),
|
|
|
|
)
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
# We limit how many PDUs we check at once, as if we try to do hundreds
|
|
|
|
# of thousands of PDUs at once we see large memory spikes.
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2022-08-24 04:53:37 +02:00
|
|
|
valid_pdus: List[EventBase] = []
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2022-10-03 21:53:29 +02:00
|
|
|
async def _record_failure_callback(event: EventBase, cause: str) -> None:
|
|
|
|
await self.store.record_event_failed_pull_attempt(
|
|
|
|
event.room_id, event.event_id, cause
|
|
|
|
)
|
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
async def _execute(pdu: EventBase) -> None:
|
|
|
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
|
|
|
pdu=pdu,
|
|
|
|
origin=origin,
|
|
|
|
room_version=room_version,
|
2022-10-03 21:53:29 +02:00
|
|
|
record_failure_callback=_record_failure_callback,
|
2021-06-08 12:07:46 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if valid_pdu:
|
|
|
|
valid_pdus.append(valid_pdu)
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
await concurrently_execute(_execute, pdus, 10000)
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
return valid_pdus
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2022-08-24 04:53:37 +02:00
|
|
|
@trace
|
|
|
|
@tag_args
|
2021-06-08 12:07:46 +02:00
|
|
|
async def _check_sigs_and_hash_and_fetch_one(
|
|
|
|
self,
|
|
|
|
pdu: EventBase,
|
|
|
|
origin: str,
|
|
|
|
room_version: RoomVersion,
|
2022-10-03 21:53:29 +02:00
|
|
|
record_failure_callback: Optional[
|
|
|
|
Callable[[EventBase, str], Awaitable[None]]
|
|
|
|
] = None,
|
2021-06-08 12:07:46 +02:00
|
|
|
) -> Optional[EventBase]:
|
2022-02-22 13:17:10 +01:00
|
|
|
"""Takes a PDU and checks its signatures and hashes.
|
|
|
|
|
|
|
|
If the PDU fails its signature check then we check if we have it in the
|
|
|
|
database; if not, we then request it from sender's server (if that is not the
|
|
|
|
same as `origin`). If that still fails, we return None.
|
|
|
|
|
|
|
|
If the PDU fails its content hash check, it is redacted.
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2022-02-22 13:17:10 +01:00
|
|
|
Also runs the event through the spam checker; if it fails, redacts the event
|
|
|
|
and flags it as soft-failed.
|
2021-06-08 12:07:46 +02:00
|
|
|
|
|
|
|
Args:
|
|
|
|
origin
|
|
|
|
pdu
|
|
|
|
room_version
|
2022-10-03 21:53:29 +02:00
|
|
|
record_failure_callback: A callback to run whenever the given event
|
|
|
|
fails signature or hash checks. This includes exceptions
|
|
|
|
that would be normally be thrown/raised but also things like
|
|
|
|
checking for event tampering where we just return the redacted
|
|
|
|
event.
|
2021-06-08 12:07:46 +02:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
The PDU (possibly redacted) if it has valid signatures and hashes.
|
2022-06-01 00:32:56 +02:00
|
|
|
None if no valid copy could be found.
|
2021-06-08 12:07:46 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
try:
|
2022-10-03 21:53:29 +02:00
|
|
|
return await self._check_sigs_and_hash(
|
|
|
|
room_version, pdu, record_failure_callback
|
|
|
|
)
|
2022-06-01 00:32:56 +02:00
|
|
|
except InvalidEventSignatureError as e:
|
|
|
|
logger.warning(
|
|
|
|
"Signature on retrieved event %s was invalid (%s). "
|
2022-08-24 04:53:37 +02:00
|
|
|
"Checking local store/origin server",
|
2022-06-01 00:32:56 +02:00
|
|
|
pdu.event_id,
|
|
|
|
e,
|
2021-06-08 12:07:46 +02:00
|
|
|
)
|
2022-08-24 04:53:37 +02:00
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"message": "Signature on retrieved event was invalid. "
|
|
|
|
"Checking local store/origin server",
|
|
|
|
"event_id": pdu.event_id,
|
|
|
|
"InvalidEventSignatureError": e,
|
|
|
|
}
|
|
|
|
)
|
2021-06-08 12:07:46 +02:00
|
|
|
|
2022-06-01 00:32:56 +02:00
|
|
|
# Check local db.
|
|
|
|
res = await self.store.get_event(
|
|
|
|
pdu.event_id, allow_rejected=True, allow_none=True
|
|
|
|
)
|
|
|
|
|
2022-08-24 04:53:37 +02:00
|
|
|
# If the PDU fails its signature check and we don't have it in our
|
|
|
|
# database, we then request it from sender's server (if that is not the
|
|
|
|
# same as `origin`).
|
2021-06-08 12:07:46 +02:00
|
|
|
pdu_origin = get_domain_from_id(pdu.sender)
|
|
|
|
if not res and pdu_origin != origin:
|
|
|
|
try:
|
2022-10-26 23:10:55 +02:00
|
|
|
pulled_pdu_info = await self.get_pdu(
|
2021-06-08 12:07:46 +02:00
|
|
|
destinations=[pdu_origin],
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
room_version=room_version,
|
|
|
|
timeout=10000,
|
|
|
|
)
|
2022-10-26 23:10:55 +02:00
|
|
|
if pulled_pdu_info is not None:
|
|
|
|
res = pulled_pdu_info.pdu
|
2021-06-08 12:07:46 +02:00
|
|
|
except SynapseError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if not res:
|
|
|
|
logger.warning(
|
|
|
|
"Failed to find copy of %s with valid signature", pdu.event_id
|
|
|
|
)
|
|
|
|
|
|
|
|
return res
|
2020-03-17 13:04:49 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def get_event_auth(
|
|
|
|
self, destination: str, room_id: str, event_id: str
|
|
|
|
) -> List[EventBase]:
|
2020-02-03 21:43:40 +01:00
|
|
|
res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 21:21:33 +01:00
|
|
|
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2022-10-03 21:53:29 +02:00
|
|
|
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
destination, auth_chain, room_version=room_version
|
2015-02-02 17:56:01 +01:00
|
|
|
)
|
2015-01-26 15:33:11 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return signed_auth
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2021-04-27 13:30:34 +02:00
|
|
|
def _is_unknown_endpoint(
|
|
|
|
self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
|
|
|
|
) -> bool:
|
|
|
|
"""
|
|
|
|
Returns true if the response was due to an endpoint being unimplemented.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
e: The error response received from the remote server.
|
|
|
|
synapse_error: The above error converted to a SynapseError. This is
|
|
|
|
automatically generated if not provided.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if synapse_error is None:
|
|
|
|
synapse_error = e.to_synapse_error()
|
2022-12-06 13:23:03 +01:00
|
|
|
# MSC3743 specifies that servers should return a 404 or 405 with an errcode
|
|
|
|
# of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
|
|
|
|
# to an unknown method, respectively.
|
2021-04-27 13:30:34 +02:00
|
|
|
#
|
2022-12-06 13:23:03 +01:00
|
|
|
# Older versions of servers don't properly handle this. This needs to be
|
|
|
|
# rather specific as some endpoints truly do return 404 errors.
|
2022-02-28 13:52:44 +01:00
|
|
|
return (
|
2022-12-06 13:23:03 +01:00
|
|
|
# 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
|
|
|
|
(e.code == 404 or e.code == 405)
|
|
|
|
and (
|
|
|
|
# Older Dendrites returned a text or empty body.
|
|
|
|
# Older Conduit returned an empty body.
|
|
|
|
not e.response
|
|
|
|
or e.response == b"404 page not found"
|
|
|
|
# The proper response JSON with M_UNRECOGNIZED errcode.
|
|
|
|
or synapse_error.errcode == Codes.UNRECOGNIZED
|
|
|
|
)
|
|
|
|
) or (
|
|
|
|
# Older Synapses returned a 400 error.
|
|
|
|
e.code == 400
|
|
|
|
and synapse_error.errcode == Codes.UNRECOGNIZED
|
|
|
|
)
|
2021-04-27 13:30:34 +02:00
|
|
|
|
2020-02-03 21:59:10 +01:00
|
|
|
async def _try_destination_list(
|
|
|
|
self,
|
|
|
|
description: str,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
callback: Callable[[str], Awaitable[T]],
|
2021-07-29 13:50:14 +02:00
|
|
|
failover_errcodes: Optional[Container[str]] = None,
|
2021-03-24 13:45:39 +01:00
|
|
|
failover_on_unknown_endpoint: bool = False,
|
2020-02-03 21:59:10 +01:00
|
|
|
) -> T:
|
2018-08-01 12:24:19 +02:00
|
|
|
"""Try an operation on a series of servers, until it succeeds
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 21:59:10 +01:00
|
|
|
description: description of the operation we're doing, for logging
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2020-02-03 21:59:10 +01:00
|
|
|
destinations: list of server_names to try
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2020-02-03 21:59:10 +01:00
|
|
|
callback: Function to run for each server. Passed a single
|
|
|
|
argument: the server_name to try.
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2021-04-27 13:30:34 +02:00
|
|
|
If the callback raises a CodeMessageException with a 300/400 code or
|
|
|
|
an UnsupportedRoomVersionError, attempts to perform the operation
|
|
|
|
stop immediately and the exception is reraised.
|
2018-08-01 12:24:19 +02:00
|
|
|
|
|
|
|
Otherwise, if the callback raises an Exception the error is logged and the
|
|
|
|
next server tried. Normally the stacktrace is logged but this is
|
|
|
|
suppressed if the exception is an InvalidResponseError.
|
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
failover_errcodes: Error codes (specific to this endpoint) which should
|
|
|
|
cause a failover when received as part of an HTTP 400 error.
|
|
|
|
|
2021-03-24 13:45:39 +01:00
|
|
|
failover_on_unknown_endpoint: if True, we will try other servers if it looks
|
|
|
|
like a server doesn't support the endpoint. This is typically useful
|
|
|
|
if the endpoint in question is new or experimental.
|
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
Returns:
|
2020-02-03 21:59:10 +01:00
|
|
|
The result of callback, if it succeeds
|
2018-08-01 12:24:19 +02:00
|
|
|
|
|
|
|
Raises:
|
2019-08-01 14:47:31 +02:00
|
|
|
SynapseError if the chosen remote server returns a 300/400 code, or
|
|
|
|
no servers were reachable.
|
2018-08-01 12:24:19 +02:00
|
|
|
"""
|
2021-07-29 13:50:14 +02:00
|
|
|
if failover_errcodes is None:
|
|
|
|
failover_errcodes = ()
|
|
|
|
|
2022-07-27 12:37:50 +02:00
|
|
|
if not destinations:
|
|
|
|
# Give a bit of a clearer message if no servers were specified at all.
|
|
|
|
raise SynapseError(
|
|
|
|
502, f"Failed to {description} via any server: No servers specified."
|
|
|
|
)
|
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
for destination in destinations:
|
2022-10-26 23:10:55 +02:00
|
|
|
# We don't want to ask our own server for information we don't have
|
2018-08-01 12:24:19 +02:00
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
2021-04-27 13:30:34 +02:00
|
|
|
return await callback(destination)
|
2021-08-23 14:00:25 +02:00
|
|
|
except (
|
|
|
|
RequestSendFailed,
|
|
|
|
InvalidResponseError,
|
|
|
|
) as e:
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
2022-10-26 23:10:55 +02:00
|
|
|
# Skip to the next homeserver in the list to try.
|
|
|
|
continue
|
|
|
|
except NotRetryingDestination as e:
|
|
|
|
logger.info("%s: %s", description, e)
|
|
|
|
continue
|
|
|
|
except FederationDeniedError:
|
|
|
|
logger.info(
|
|
|
|
"%s: Not attempting to %s from %s because the homeserver is not on our federation whitelist",
|
|
|
|
description,
|
|
|
|
description,
|
|
|
|
destination,
|
|
|
|
)
|
|
|
|
continue
|
2020-01-27 15:30:57 +01:00
|
|
|
except UnsupportedRoomVersionError:
|
|
|
|
raise
|
2018-08-01 14:47:07 +02:00
|
|
|
except HttpResponseException as e:
|
2021-03-24 13:45:39 +01:00
|
|
|
synapse_error = e.to_synapse_error()
|
|
|
|
failover = False
|
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
# Failover should occur:
|
|
|
|
#
|
|
|
|
# * On internal server errors.
|
|
|
|
# * If the destination responds that it cannot complete the request.
|
|
|
|
# * If the destination doesn't implemented the endpoint for some reason.
|
2021-03-24 13:45:39 +01:00
|
|
|
if 500 <= e.code < 600:
|
|
|
|
failover = True
|
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
elif e.code == 400 and synapse_error.errcode in failover_errcodes:
|
|
|
|
failover = True
|
|
|
|
|
2021-04-27 13:30:34 +02:00
|
|
|
elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
|
|
|
|
e, synapse_error
|
|
|
|
):
|
|
|
|
failover = True
|
2021-03-24 13:45:39 +01:00
|
|
|
|
|
|
|
if not failover:
|
|
|
|
raise synapse_error from e
|
|
|
|
|
|
|
|
logger.warning(
|
|
|
|
"Failed to %s via %s: %i %s",
|
|
|
|
description,
|
|
|
|
destination,
|
|
|
|
e.code,
|
|
|
|
e.args[0],
|
|
|
|
)
|
2018-08-01 12:24:19 +02:00
|
|
|
except Exception:
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning(
|
2020-02-03 21:59:10 +01:00
|
|
|
"Failed to %s via %s", description, destination, exc_info=True
|
2019-10-31 11:23:24 +01:00
|
|
|
)
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2022-07-27 12:37:50 +02:00
|
|
|
raise SynapseError(502, f"Failed to {description} via any server")
|
2018-08-01 12:24:19 +02:00
|
|
|
|
2020-02-03 21:51:26 +01:00
|
|
|
async def make_membership_event(
|
2020-01-27 15:30:57 +01:00
|
|
|
self,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
room_id: str,
|
|
|
|
user_id: str,
|
|
|
|
membership: str,
|
|
|
|
content: dict,
|
2020-09-29 16:57:36 +02:00
|
|
|
params: Optional[Mapping[str, Union[str, Iterable[str]]]],
|
2020-02-03 21:51:26 +01:00
|
|
|
) -> Tuple[str, EventBase, RoomVersion]:
|
2015-10-20 12:58:58 +02:00
|
|
|
"""
|
|
|
|
Creates an m.room.member event, with context, without participating in the room.
|
|
|
|
|
|
|
|
Does so by asking one of the already participating servers to create an
|
|
|
|
event with proper context.
|
|
|
|
|
2019-01-24 19:08:08 +01:00
|
|
|
Returns a fully signed and hashed event.
|
|
|
|
|
2015-10-20 12:58:58 +02:00
|
|
|
Note that this does not append any events to any graphs.
|
|
|
|
|
|
|
|
Args:
|
2020-01-27 15:30:57 +01:00
|
|
|
destinations: Candidate homeservers which are probably
|
2015-10-20 12:58:58 +02:00
|
|
|
participating in the room.
|
2020-01-27 15:30:57 +01:00
|
|
|
room_id: The room in which the event will happen.
|
|
|
|
user_id: The user whose membership is being evented.
|
|
|
|
membership: The "membership" property of the event. Must be one of
|
|
|
|
"join" or "leave".
|
|
|
|
content: Any additional data to put into the content field of the
|
|
|
|
event.
|
|
|
|
params: Query parameters to include in the request.
|
2020-02-03 21:51:26 +01:00
|
|
|
|
|
|
|
Returns:
|
2020-01-27 15:30:57 +01:00
|
|
|
`(origin, event, room_version)` where origin is the remote
|
|
|
|
homeserver which generated the event, and room_version is the
|
|
|
|
version of the room.
|
|
|
|
|
2020-02-03 21:51:26 +01:00
|
|
|
Raises:
|
|
|
|
UnsupportedRoomVersionError: if remote responds with
|
|
|
|
a room version we don't understand.
|
2017-04-21 01:46:54 +02:00
|
|
|
|
2021-04-27 13:30:34 +02:00
|
|
|
SynapseError: if the chosen remote server returns a 300/400 code, or
|
|
|
|
no servers successfully handle the request.
|
2015-10-20 12:58:58 +02:00
|
|
|
"""
|
2021-06-15 13:45:14 +02:00
|
|
|
valid_memberships = {Membership.JOIN, Membership.LEAVE, Membership.KNOCK}
|
2021-06-09 20:39:51 +02:00
|
|
|
|
2015-10-20 12:58:58 +02:00
|
|
|
if membership not in valid_memberships:
|
|
|
|
raise RuntimeError(
|
2019-06-20 11:32:02 +02:00
|
|
|
"make_membership_event called with membership='%s', must be one of %s"
|
|
|
|
% (membership, ",".join(valid_memberships))
|
2015-10-20 12:58:58 +02:00
|
|
|
)
|
2015-06-26 10:52:24 +02:00
|
|
|
|
2020-02-03 22:07:13 +01:00
|
|
|
async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
|
|
|
|
ret = await self.transport_layer.make_membership_event(
|
2019-06-20 11:32:02 +02:00
|
|
|
destination, room_id, user_id, membership, params
|
2018-08-01 12:24:19 +02:00
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2019-01-23 17:50:06 +01:00
|
|
|
# Note: If not supplied, the room version may be either v1 or v2,
|
|
|
|
# however either way the event format version will be v1.
|
2020-01-27 15:30:57 +01:00
|
|
|
room_version_id = ret.get("room_version", RoomVersions.V1.identifier)
|
|
|
|
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
|
|
|
if not room_version:
|
|
|
|
raise UnsupportedRoomVersionError()
|
|
|
|
|
2021-06-09 20:39:51 +02:00
|
|
|
if not room_version.msc2403_knocking and membership == Membership.KNOCK:
|
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
"This room version does not support knocking",
|
|
|
|
errcode=Codes.FORBIDDEN,
|
|
|
|
)
|
|
|
|
|
2018-08-01 16:35:29 +02:00
|
|
|
pdu_dict = ret.get("event", None)
|
|
|
|
if not isinstance(pdu_dict, dict):
|
|
|
|
raise InvalidResponseError("Bad 'event' field in response")
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
logger.debug("Got response to make_%s: %s", membership, pdu_dict)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
pdu_dict["content"].update(content)
|
2015-11-12 17:19:55 +01:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
# The protoevent received over the JSON wire may not have all
|
|
|
|
# the required fields. Lets just gloss over that because
|
|
|
|
# there's some we never care about
|
2019-01-25 18:19:31 +01:00
|
|
|
ev = builder.create_local_event_from_event_dict(
|
2019-06-20 11:32:02 +02:00
|
|
|
self._clock,
|
|
|
|
self.hostname,
|
|
|
|
self.signing_key,
|
2020-01-29 18:58:01 +01:00
|
|
|
room_version=room_version,
|
2019-06-20 11:32:02 +02:00
|
|
|
event_dict=pdu_dict,
|
2019-01-23 20:44:37 +01:00
|
|
|
)
|
2017-01-13 14:21:04 +01:00
|
|
|
|
2020-02-03 22:07:13 +01:00
|
|
|
return destination, ev, room_version
|
2015-02-04 17:28:12 +01:00
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
# MSC3083 defines additional error codes for room joins. Unfortunately
|
|
|
|
# we do not yet know the room version, assume these will only be returned
|
|
|
|
# by valid room versions.
|
|
|
|
failover_errcodes = (
|
|
|
|
(Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN)
|
|
|
|
if membership == Membership.JOIN
|
|
|
|
else None
|
|
|
|
)
|
|
|
|
|
2020-02-03 21:51:26 +01:00
|
|
|
return await self._try_destination_list(
|
2021-07-29 13:50:14 +02:00
|
|
|
"make_" + membership,
|
|
|
|
destinations,
|
|
|
|
send_request,
|
|
|
|
failover_errcodes=failover_errcodes,
|
2018-08-01 12:24:19 +02:00
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-02-03 21:55:00 +01:00
|
|
|
async def send_join(
|
2020-02-06 16:50:39 +01:00
|
|
|
self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
|
2021-07-26 18:17:00 +02:00
|
|
|
) -> SendJoinResult:
|
2017-04-21 01:46:54 +02:00
|
|
|
"""Sends a join event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 21:55:00 +01:00
|
|
|
destinations: Candidate homeservers which are probably
|
2017-04-21 01:46:54 +02:00
|
|
|
participating in the room.
|
2020-02-03 21:55:00 +01:00
|
|
|
pdu: event to be sent
|
2020-02-06 16:50:39 +01:00
|
|
|
room_version: the version of the room (according to the server that
|
|
|
|
did the make_join)
|
2017-04-21 01:46:54 +02:00
|
|
|
|
2020-02-03 21:55:00 +01:00
|
|
|
Returns:
|
2021-07-26 18:17:00 +02:00
|
|
|
The result of the send join request.
|
2017-04-21 01:46:54 +02:00
|
|
|
|
2020-02-03 21:55:00 +01:00
|
|
|
Raises:
|
2021-04-27 13:30:34 +02:00
|
|
|
SynapseError: if the chosen remote server returns a 300/400 code, or
|
|
|
|
no servers successfully handle the request.
|
2017-04-21 01:46:54 +02:00
|
|
|
"""
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
async def send_request(destination: str) -> SendJoinResult:
|
2021-05-20 17:11:48 +02:00
|
|
|
response = await self._do_send_join(room_version, destination, pdu)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2021-07-26 18:17:00 +02:00
|
|
|
# If an event was returned (and expected to be returned):
|
|
|
|
#
|
|
|
|
# * Ensure it has the same event ID (note that the event ID is a hash
|
|
|
|
# of the event fields for versions which support MSC3083).
|
|
|
|
# * Ensure the signatures are good.
|
|
|
|
#
|
|
|
|
# Otherwise, fallback to the provided event.
|
|
|
|
if room_version.msc3083_join_rules and response.event:
|
|
|
|
event = response.event
|
|
|
|
|
|
|
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
|
|
|
pdu=event,
|
|
|
|
origin=destination,
|
|
|
|
room_version=room_version,
|
|
|
|
)
|
|
|
|
|
|
|
|
if valid_pdu is None or event.event_id != pdu.event_id:
|
|
|
|
raise InvalidResponseError("Returned an invalid join event")
|
|
|
|
else:
|
|
|
|
event = pdu
|
|
|
|
|
2021-05-20 17:11:48 +02:00
|
|
|
state = response.state
|
|
|
|
auth_chain = response.auth_events
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-02-06 16:50:39 +01:00
|
|
|
create_event = None
|
2019-01-23 18:19:58 +01:00
|
|
|
for e in state:
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
2020-02-06 16:50:39 +01:00
|
|
|
create_event = e
|
2019-01-23 18:19:58 +01:00
|
|
|
break
|
|
|
|
|
2020-02-06 16:50:39 +01:00
|
|
|
if create_event is None:
|
2019-01-24 19:31:23 +01:00
|
|
|
# If the state doesn't have a create event then the room is
|
|
|
|
# invalid, and it would fail auth checks anyway.
|
2021-04-27 13:30:34 +02:00
|
|
|
raise InvalidResponseError("No create event in state")
|
2019-01-23 18:19:58 +01:00
|
|
|
|
2020-02-06 16:50:39 +01:00
|
|
|
# the room version should be sane.
|
|
|
|
create_room_version = create_event.content.get(
|
|
|
|
"room_version", RoomVersions.V1.identifier
|
|
|
|
)
|
|
|
|
if create_room_version != room_version.identifier:
|
|
|
|
# either the server that fulfilled the make_join, or the server that is
|
|
|
|
# handling the send_join, is lying.
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"Unexpected room version %s in create event"
|
|
|
|
% (create_room_version,)
|
|
|
|
)
|
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
logger.info(
|
|
|
|
"Processing from send_join %d events", len(state) + len(auth_chain)
|
2018-08-01 12:24:19 +02:00
|
|
|
)
|
2015-06-26 10:52:24 +02:00
|
|
|
|
2021-06-08 12:07:46 +02:00
|
|
|
# We now go and check the signatures and hashes for the event. Note
|
|
|
|
# that we limit how many events we process at a time to keep the
|
|
|
|
# memory overhead from exploding.
|
|
|
|
valid_pdus_map: Dict[str, EventBase] = {}
|
|
|
|
|
|
|
|
async def _execute(pdu: EventBase) -> None:
|
|
|
|
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
|
|
|
pdu=pdu,
|
|
|
|
origin=destination,
|
|
|
|
room_version=room_version,
|
|
|
|
)
|
|
|
|
|
|
|
|
if valid_pdu:
|
|
|
|
valid_pdus_map[valid_pdu.event_id] = valid_pdu
|
|
|
|
|
|
|
|
await concurrently_execute(
|
|
|
|
_execute, itertools.chain(state, auth_chain), 10000
|
|
|
|
)
|
2015-06-26 10:52:24 +02:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
signed_state = [
|
|
|
|
copy.copy(valid_pdus_map[p.event_id])
|
|
|
|
for p in state
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-06-26 10:52:24 +02:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
signed_auth = [
|
|
|
|
valid_pdus_map[p.event_id]
|
|
|
|
for p in auth_chain
|
|
|
|
if p.event_id in valid_pdus_map
|
|
|
|
]
|
2015-01-26 15:33:11 +01:00
|
|
|
|
2018-08-01 12:24:19 +02:00
|
|
|
# NB: We *need* to copy to ensure that we don't have multiple
|
|
|
|
# references being passed on, as that causes... issues.
|
|
|
|
for s in signed_state:
|
|
|
|
s.internal_metadata = copy.deepcopy(s.internal_metadata)
|
2015-02-04 17:28:12 +01:00
|
|
|
|
2022-02-21 20:27:35 +01:00
|
|
|
# double-check that the auth chain doesn't include a different create event
|
2020-02-06 16:50:39 +01:00
|
|
|
auth_chain_create_events = [
|
|
|
|
e.event_id
|
|
|
|
for e in signed_auth
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, "")
|
|
|
|
]
|
2022-02-21 20:27:35 +01:00
|
|
|
if auth_chain_create_events and auth_chain_create_events != [
|
|
|
|
create_event.event_id
|
|
|
|
]:
|
2020-02-06 16:50:39 +01:00
|
|
|
raise InvalidResponseError(
|
2020-02-28 13:31:07 +01:00
|
|
|
"Unexpected create event(s) in auth chain: %s"
|
2020-02-06 16:50:39 +01:00
|
|
|
% (auth_chain_create_events,)
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2022-02-17 17:11:59 +01:00
|
|
|
if response.partial_state and not response.servers_in_room:
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"partial_state was set, but no servers were listed in the room"
|
|
|
|
)
|
|
|
|
|
2021-07-26 18:17:00 +02:00
|
|
|
return SendJoinResult(
|
|
|
|
event=event,
|
|
|
|
state=signed_state,
|
|
|
|
auth_chain=signed_auth,
|
|
|
|
origin=destination,
|
2022-02-17 17:11:59 +01:00
|
|
|
partial_state=response.partial_state,
|
|
|
|
servers_in_room=response.servers_in_room or [],
|
2021-07-26 18:17:00 +02:00
|
|
|
)
|
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
# MSC3083 defines additional error codes for room joins.
|
|
|
|
failover_errcodes = None
|
2021-07-26 18:17:00 +02:00
|
|
|
if room_version.msc3083_join_rules:
|
2021-07-29 13:50:14 +02:00
|
|
|
failover_errcodes = (
|
|
|
|
Codes.UNABLE_AUTHORISE_JOIN,
|
|
|
|
Codes.UNABLE_TO_GRANT_JOIN,
|
|
|
|
)
|
|
|
|
|
2021-07-26 18:17:00 +02:00
|
|
|
# If the join is being authorised via allow rules, we need to send
|
|
|
|
# the /send_join back to the same server that was originally used
|
|
|
|
# with /make_join.
|
2021-09-30 17:13:59 +02:00
|
|
|
if EventContentFields.AUTHORISING_USER in pdu.content:
|
2021-07-26 18:17:00 +02:00
|
|
|
destinations = [
|
2021-09-30 17:13:59 +02:00
|
|
|
get_domain_from_id(pdu.content[EventContentFields.AUTHORISING_USER])
|
2021-07-26 18:17:00 +02:00
|
|
|
]
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-07-29 13:50:14 +02:00
|
|
|
return await self._try_destination_list(
|
|
|
|
"send_join", destinations, send_request, failover_errcodes=failover_errcodes
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2021-05-20 17:11:48 +02:00
|
|
|
async def _do_send_join(
|
|
|
|
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
|
|
|
) -> SendJoinResponse:
|
2019-11-11 16:47:47 +01:00
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.send_join_v2(
|
2021-05-20 17:11:48 +02:00
|
|
|
room_version=room_version,
|
2019-11-11 16:47:47 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
2021-04-27 13:30:34 +02:00
|
|
|
# If an error is received that is due to an unrecognised endpoint,
|
2022-02-28 13:52:44 +01:00
|
|
|
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
2021-04-27 13:30:34 +02:00
|
|
|
# and raise.
|
|
|
|
if not self._is_unknown_endpoint(e):
|
|
|
|
raise
|
2019-11-11 16:47:47 +01:00
|
|
|
|
|
|
|
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
|
|
|
|
2021-05-20 17:11:48 +02:00
|
|
|
return await self.transport_layer.send_join_v1(
|
|
|
|
room_version=room_version,
|
2019-11-11 16:47:47 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
2020-02-05 16:47:00 +01:00
|
|
|
async def send_invite(
|
2021-02-16 23:32:34 +01:00
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
room_id: str,
|
|
|
|
event_id: str,
|
|
|
|
pdu: EventBase,
|
2020-02-05 16:47:00 +01:00
|
|
|
) -> EventBase:
|
2020-02-05 18:35:09 +01:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-28 15:55:53 +01:00
|
|
|
|
2020-02-03 23:29:49 +01:00
|
|
|
content = await self._do_send_invite(destination, pdu, room_version)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
pdu_dict = content["event"]
|
|
|
|
|
|
|
|
logger.debug("Got response to send_invite: %s", pdu_dict)
|
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
pdu = event_from_pdu_json(pdu_dict, room_version)
|
2015-01-26 15:33:11 +01:00
|
|
|
|
|
|
|
# Check signatures are correct.
|
2022-06-01 00:32:56 +02:00
|
|
|
try:
|
|
|
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
|
|
|
except InvalidEventSignatureError as e:
|
|
|
|
errmsg = f"event id {pdu.event_id}: {e}"
|
|
|
|
logger.warning("%s", errmsg)
|
|
|
|
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
2015-01-26 15:33:11 +01:00
|
|
|
|
2022-06-01 00:32:56 +02:00
|
|
|
# FIXME: We should handle signature failures more gracefully.
|
2015-01-26 15:33:11 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return pdu
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-02-05 16:49:42 +01:00
|
|
|
async def _do_send_invite(
|
2020-02-05 18:35:09 +01:00
|
|
|
self, destination: str, pdu: EventBase, room_version: RoomVersion
|
2020-02-05 16:49:42 +01:00
|
|
|
) -> JsonDict:
|
2019-01-28 15:55:53 +01:00
|
|
|
"""Actually sends the invite, first trying v2 API and falling back to
|
|
|
|
v1 API if necessary.
|
|
|
|
|
|
|
|
Returns:
|
2020-02-05 16:49:42 +01:00
|
|
|
The event as a dict as returned by the remote server
|
2021-04-27 13:30:34 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError: if the remote server returns an error or if the server
|
|
|
|
only supports the v1 endpoint and a room version other than "1"
|
|
|
|
or "2" is requested.
|
2019-01-28 15:55:53 +01:00
|
|
|
"""
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.send_invite_v2(
|
2019-01-28 15:55:53 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content={
|
|
|
|
"event": pdu.get_pdu_json(time_now),
|
2020-02-05 18:35:09 +01:00
|
|
|
"room_version": room_version.identifier,
|
2019-01-28 15:55:53 +01:00
|
|
|
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
2021-04-27 13:30:34 +02:00
|
|
|
# If an error is received that is due to an unrecognised endpoint,
|
|
|
|
# fallback to the v1 endpoint if the room uses old-style event IDs.
|
2022-02-28 13:52:44 +01:00
|
|
|
# Otherwise, consider it a legitimate error and raise.
|
2021-04-27 13:30:34 +02:00
|
|
|
err = e.to_synapse_error()
|
|
|
|
if self._is_unknown_endpoint(e, err):
|
2022-09-07 12:08:20 +02:00
|
|
|
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
2019-02-23 15:31:08 +01:00
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
"User's homeserver does not support this room version",
|
|
|
|
Codes.UNSUPPORTED_ROOM_VERSION,
|
|
|
|
)
|
2019-01-28 15:55:53 +01:00
|
|
|
else:
|
2021-04-27 13:30:34 +02:00
|
|
|
raise err
|
2019-01-28 15:55:53 +01:00
|
|
|
|
|
|
|
# Didn't work, try v1 API.
|
|
|
|
# Note the v1 API returns a tuple of `(200, content)`
|
|
|
|
|
2020-02-05 16:49:42 +01:00
|
|
|
_, content = await self.transport_layer.send_invite_v1(
|
2019-01-28 15:55:53 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2019-07-23 15:00:55 +02:00
|
|
|
return content
|
2019-01-28 15:55:53 +01:00
|
|
|
|
2020-02-03 21:55:11 +01:00
|
|
|
async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
|
2017-04-21 01:46:54 +02:00
|
|
|
"""Sends a leave event to one of a list of homeservers.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
This is mostly useful to reject received invites.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 21:55:11 +01:00
|
|
|
destinations: Candidate homeservers which are probably
|
2017-04-21 01:46:54 +02:00
|
|
|
participating in the room.
|
2020-02-03 21:55:11 +01:00
|
|
|
pdu: event to be sent
|
2017-04-21 01:46:54 +02:00
|
|
|
|
2020-02-03 21:55:11 +01:00
|
|
|
Raises:
|
2021-04-27 13:30:34 +02:00
|
|
|
SynapseError: if the chosen remote server returns a 300/400 code, or
|
|
|
|
no servers successfully handle the request.
|
2017-04-21 01:46:54 +02:00
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2020-02-03 22:08:51 +01:00
|
|
|
async def send_request(destination: str) -> None:
|
|
|
|
content = await self._do_send_leave(destination, pdu)
|
2019-11-11 17:26:53 +01:00
|
|
|
logger.debug("Got content: %s", content)
|
|
|
|
|
2020-02-03 21:55:11 +01:00
|
|
|
return await self._try_destination_list(
|
|
|
|
"send_leave", destinations, send_request
|
|
|
|
)
|
2019-11-11 17:26:53 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict:
|
2019-11-11 17:26:53 +01:00
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
try:
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.send_leave_v2(
|
2018-08-01 12:24:19 +02:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
2019-11-11 17:26:53 +01:00
|
|
|
except HttpResponseException as e:
|
2021-04-27 13:30:34 +02:00
|
|
|
# If an error is received that is due to an unrecognised endpoint,
|
2022-02-28 13:52:44 +01:00
|
|
|
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
|
2021-04-27 13:30:34 +02:00
|
|
|
# and raise.
|
|
|
|
if not self._is_unknown_endpoint(e):
|
|
|
|
raise
|
2019-11-11 17:26:53 +01:00
|
|
|
|
|
|
|
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
|
|
|
|
|
2020-02-03 22:09:07 +01:00
|
|
|
resp = await self.transport_layer.send_leave_v1(
|
2019-11-11 17:26:53 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
|
|
|
# We expect the v1 API to respond with [200, content], so we only return the
|
|
|
|
# content.
|
|
|
|
return resp[1]
|
2015-10-20 12:58:58 +02:00
|
|
|
|
2021-06-09 20:39:51 +02:00
|
|
|
async def send_knock(self, destinations: List[str], pdu: EventBase) -> JsonDict:
|
2022-10-12 15:37:20 +02:00
|
|
|
"""Attempts to send a knock event to a given list of servers. Iterates
|
2021-06-09 20:39:51 +02:00
|
|
|
through the list until one attempt succeeds.
|
|
|
|
|
|
|
|
Doing so will cause the remote server to add the event to the graph,
|
|
|
|
and send the event out to the rest of the federation.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations: A list of candidate homeservers which are likely to be
|
|
|
|
participating in the room.
|
|
|
|
pdu: The event to be sent.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The remote homeserver return some state from the room. The response
|
|
|
|
dictionary is in the form:
|
|
|
|
|
|
|
|
{"knock_state_events": [<state event dict>, ...]}
|
|
|
|
|
|
|
|
The list of state events may be empty.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError: If the chosen remote server returns a 3xx/4xx code.
|
|
|
|
RuntimeError: If no servers were reachable.
|
|
|
|
"""
|
|
|
|
|
|
|
|
async def send_request(destination: str) -> JsonDict:
|
|
|
|
return await self._do_send_knock(destination, pdu)
|
|
|
|
|
|
|
|
return await self._try_destination_list(
|
2021-06-15 13:45:14 +02:00
|
|
|
"send_knock", destinations, send_request
|
2021-06-09 20:39:51 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
async def _do_send_knock(self, destination: str, pdu: EventBase) -> JsonDict:
|
|
|
|
"""Send a knock event to a remote homeserver.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: The homeserver to send to.
|
|
|
|
pdu: The event to send.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The remote homeserver can optionally return some state from the room. The response
|
|
|
|
dictionary is in the form:
|
|
|
|
|
|
|
|
{"knock_state_events": [<state event dict>, ...]}
|
|
|
|
|
|
|
|
The list of state events may be empty.
|
|
|
|
"""
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
|
|
|
|
return await self.transport_layer.send_knock_v1(
|
|
|
|
destination=destination,
|
|
|
|
room_id=pdu.room_id,
|
|
|
|
event_id=pdu.event_id,
|
|
|
|
content=pdu.get_pdu_json(time_now),
|
|
|
|
)
|
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def get_public_rooms(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2020-05-01 16:15:08 +02:00
|
|
|
remote_server: str,
|
|
|
|
limit: Optional[int] = None,
|
|
|
|
since_token: Optional[str] = None,
|
|
|
|
search_filter: Optional[Dict] = None,
|
|
|
|
include_all_networks: bool = False,
|
|
|
|
third_party_instance_id: Optional[str] = None,
|
2021-01-20 13:59:18 +01:00
|
|
|
) -> JsonDict:
|
2020-05-01 16:15:08 +02:00
|
|
|
"""Get the list of public rooms from a remote homeserver
|
|
|
|
|
|
|
|
Args:
|
|
|
|
remote_server: The name of the remote server
|
|
|
|
limit: Maximum amount of rooms to return
|
|
|
|
since_token: Used for result pagination
|
|
|
|
search_filter: A filter dictionary to send the remote homeserver
|
|
|
|
and filter the result set
|
|
|
|
include_all_networks: Whether to include results from all third party instances
|
|
|
|
third_party_instance_id: Whether to only include results from a specific third
|
|
|
|
party instance
|
|
|
|
|
|
|
|
Returns:
|
2021-01-20 13:59:18 +01:00
|
|
|
The response from the remote server.
|
2016-05-31 18:20:07 +02:00
|
|
|
|
2020-05-01 16:15:08 +02:00
|
|
|
Raises:
|
2021-08-06 15:05:41 +02:00
|
|
|
HttpResponseException / RequestSendFailed: There was an exception
|
|
|
|
returned from the remote server
|
2020-05-01 16:15:08 +02:00
|
|
|
SynapseException: M_FORBIDDEN when the remote server has disallowed publicRoom
|
|
|
|
requests over federation
|
|
|
|
|
|
|
|
"""
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.get_public_rooms(
|
2020-05-01 16:15:08 +02:00
|
|
|
remote_server,
|
2019-06-20 11:32:02 +02:00
|
|
|
limit,
|
|
|
|
since_token,
|
|
|
|
search_filter,
|
2016-12-06 11:43:48 +01:00
|
|
|
include_all_networks=include_all_networks,
|
|
|
|
third_party_instance_id=third_party_instance_id,
|
2016-09-16 11:24:15 +02:00
|
|
|
)
|
2016-05-31 18:20:07 +02:00
|
|
|
|
2020-02-03 22:14:30 +01:00
|
|
|
async def get_missing_events(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2020-02-03 22:14:30 +01:00
|
|
|
destination: str,
|
|
|
|
room_id: str,
|
2021-01-20 13:59:18 +01:00
|
|
|
earliest_events_ids: Iterable[str],
|
2020-02-03 22:14:30 +01:00
|
|
|
latest_events: Iterable[EventBase],
|
|
|
|
limit: int,
|
|
|
|
min_depth: int,
|
|
|
|
timeout: int,
|
|
|
|
) -> List[EventBase]:
|
2015-03-05 17:31:13 +01:00
|
|
|
"""Tries to fetch events we are missing. This is called when we receive
|
|
|
|
an event without having received all of its ancestors.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 22:14:30 +01:00
|
|
|
destination
|
|
|
|
room_id
|
|
|
|
earliest_events_ids: List of event ids. Effectively the
|
2015-03-05 17:31:13 +01:00
|
|
|
events we expected to receive, but haven't. `get_missing_events`
|
|
|
|
should only return events that didn't happen before these.
|
2020-02-03 22:14:30 +01:00
|
|
|
latest_events: List of events we have received that we don't
|
2015-03-05 17:31:13 +01:00
|
|
|
have all previous events for.
|
2020-02-03 22:14:30 +01:00
|
|
|
limit: Maximum number of events to return.
|
|
|
|
min_depth: Minimum depth of events to return.
|
|
|
|
timeout: Max time to wait in ms
|
2015-03-05 17:31:13 +01:00
|
|
|
"""
|
2015-03-05 17:08:02 +01:00
|
|
|
try:
|
2020-02-03 22:14:30 +01:00
|
|
|
content = await self.transport_layer.get_missing_events(
|
2015-03-05 17:08:02 +01:00
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
earliest_events=earliest_events_ids,
|
|
|
|
latest_events=[e.event_id for e in latest_events],
|
|
|
|
limit=limit,
|
|
|
|
min_depth=min_depth,
|
2016-12-31 16:21:37 +01:00
|
|
|
timeout=timeout,
|
2015-03-05 17:08:02 +01:00
|
|
|
)
|
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2019-01-23 21:21:33 +01:00
|
|
|
|
2015-03-05 17:08:02 +01:00
|
|
|
events = [
|
2020-01-31 17:50:13 +01:00
|
|
|
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
2015-03-05 17:08:02 +01:00
|
|
|
]
|
|
|
|
|
2022-10-03 21:53:29 +02:00
|
|
|
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
destination, events, room_version=room_version
|
2015-03-05 17:08:02 +01:00
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
|
|
|
if not e.code == 400:
|
|
|
|
raise
|
2015-02-23 14:58:02 +01:00
|
|
|
|
2015-03-05 17:31:13 +01:00
|
|
|
# We are probably hitting an old server that doesn't support
|
|
|
|
# get_missing_events
|
2015-03-05 17:08:02 +01:00
|
|
|
signed_events = []
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return signed_events
|
2015-02-23 14:58:02 +01:00
|
|
|
|
2021-01-20 13:59:18 +01:00
|
|
|
async def forward_third_party_invite(
|
|
|
|
self, destinations: Iterable[str], room_id: str, event_dict: JsonDict
|
|
|
|
) -> None:
|
2015-11-05 17:43:19 +01:00
|
|
|
for destination in destinations:
|
|
|
|
if destination == self.server_name:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
2020-05-01 16:15:36 +02:00
|
|
|
await self.transport_layer.exchange_third_party_invite(
|
2019-06-20 11:32:02 +02:00
|
|
|
destination=destination, room_id=room_id, event_dict=event_dict
|
2015-11-05 17:43:19 +01:00
|
|
|
)
|
2021-01-20 13:59:18 +01:00
|
|
|
return
|
2015-11-05 17:43:19 +01:00
|
|
|
except CodeMessageException:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
logger.exception(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Failed to send_third_party_invite via %s: %s", destination, str(e)
|
2015-11-05 17:43:19 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
raise RuntimeError("Failed to send to any server.")
|
2019-07-29 18:47:27 +02:00
|
|
|
|
2020-07-17 13:08:56 +02:00
|
|
|
async def get_room_complexity(
|
|
|
|
self, destination: str, room_id: str
|
2021-01-20 13:59:18 +01:00
|
|
|
) -> Optional[JsonDict]:
|
2019-07-29 18:47:27 +02:00
|
|
|
"""
|
|
|
|
Fetch the complexity of a remote room from another server.
|
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:08:56 +02:00
|
|
|
destination: The remote server
|
|
|
|
room_id: The room ID to ask about.
|
2019-07-29 18:47:27 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-07-17 13:08:56 +02:00
|
|
|
Dict contains the complexity metric versions, while None means we
|
|
|
|
could not fetch the complexity.
|
2019-07-29 18:47:27 +02:00
|
|
|
"""
|
|
|
|
try:
|
2021-01-20 13:59:18 +01:00
|
|
|
return await self.transport_layer.get_room_complexity(
|
2019-07-29 18:47:27 +02:00
|
|
|
destination=destination, room_id=room_id
|
|
|
|
)
|
|
|
|
except CodeMessageException as e:
|
|
|
|
# We didn't manage to get it -- probably a 404. We are okay if other
|
|
|
|
# servers don't give it to us.
|
|
|
|
logger.debug(
|
|
|
|
"Failed to fetch room complexity via %s for %s, got a %d",
|
|
|
|
destination,
|
|
|
|
room_id,
|
|
|
|
e.code,
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
logger.exception(
|
|
|
|
"Failed to fetch room complexity via %s for %s", destination, room_id
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we don't manage to find it, return None. It's not an error if a
|
|
|
|
# server doesn't give it to us.
|
2020-07-17 13:08:56 +02:00
|
|
|
return None
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2021-08-16 14:06:17 +02:00
|
|
|
async def get_room_hierarchy(
|
|
|
|
self,
|
|
|
|
destinations: Iterable[str],
|
|
|
|
room_id: str,
|
|
|
|
suggested_only: bool,
|
2022-01-20 12:03:42 +01:00
|
|
|
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]:
|
2021-08-16 14:06:17 +02:00
|
|
|
"""
|
|
|
|
Call other servers to get a hierarchy of the given room.
|
|
|
|
|
|
|
|
Performs simple data validates and parsing of the response.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations: The remote servers. We will try them in turn, omitting any
|
|
|
|
that have been blacklisted.
|
|
|
|
room_id: ID of the space to be queried
|
|
|
|
suggested_only: If true, ask the remote server to only return children
|
|
|
|
with the "suggested" flag set
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple of:
|
2022-01-20 12:03:42 +01:00
|
|
|
The room as a JSON dictionary, without a "children_state" key.
|
|
|
|
A list of `m.space.child` state events.
|
2021-08-16 14:06:17 +02:00
|
|
|
A list of children rooms, as JSON dictionaries.
|
|
|
|
A list of inaccessible children room IDs.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError if we were unable to get a valid summary from any of the
|
|
|
|
remote servers
|
|
|
|
"""
|
|
|
|
|
2021-08-26 13:16:53 +02:00
|
|
|
cached_result = self._get_room_hierarchy_cache.get((room_id, suggested_only))
|
|
|
|
if cached_result:
|
|
|
|
return cached_result
|
|
|
|
|
2021-08-16 14:06:17 +02:00
|
|
|
async def send_request(
|
|
|
|
destination: str,
|
2022-01-20 12:03:42 +01:00
|
|
|
) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]:
|
2021-11-29 20:32:20 +01:00
|
|
|
try:
|
|
|
|
res = await self.transport_layer.get_room_hierarchy(
|
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
suggested_only=suggested_only,
|
|
|
|
)
|
|
|
|
except HttpResponseException as e:
|
|
|
|
# If an error is received that is due to an unrecognised endpoint,
|
2022-02-28 13:52:44 +01:00
|
|
|
# fallback to the unstable endpoint. Otherwise, consider it a
|
|
|
|
# legitimate error and raise.
|
2021-11-29 20:32:20 +01:00
|
|
|
if not self._is_unknown_endpoint(e):
|
|
|
|
raise
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
"Couldn't fetch room hierarchy with the v1 API, falling back to the unstable API"
|
|
|
|
)
|
|
|
|
|
|
|
|
res = await self.transport_layer.get_room_hierarchy_unstable(
|
|
|
|
destination=destination,
|
|
|
|
room_id=room_id,
|
|
|
|
suggested_only=suggested_only,
|
|
|
|
)
|
2021-08-16 14:06:17 +02:00
|
|
|
|
|
|
|
room = res.get("room")
|
|
|
|
if not isinstance(room, dict):
|
|
|
|
raise InvalidResponseError("'room' must be a dict")
|
2022-05-05 16:25:00 +02:00
|
|
|
if room.get("room_id") != room_id:
|
|
|
|
raise InvalidResponseError("wrong room returned in hierarchy response")
|
2021-08-16 14:06:17 +02:00
|
|
|
|
|
|
|
# Validate children_state of the room.
|
2022-01-20 12:03:42 +01:00
|
|
|
children_state = room.pop("children_state", [])
|
2022-03-02 14:18:51 +01:00
|
|
|
if not isinstance(children_state, list):
|
2021-08-16 14:06:17 +02:00
|
|
|
raise InvalidResponseError("'room.children_state' must be a list")
|
|
|
|
if any(not isinstance(e, dict) for e in children_state):
|
|
|
|
raise InvalidResponseError("Invalid event in 'children_state' list")
|
|
|
|
try:
|
2022-02-28 19:33:00 +01:00
|
|
|
for child_state in children_state:
|
|
|
|
_validate_hierarchy_event(child_state)
|
2021-08-16 14:06:17 +02:00
|
|
|
except ValueError as e:
|
|
|
|
raise InvalidResponseError(str(e))
|
|
|
|
|
|
|
|
# Validate the children rooms.
|
|
|
|
children = res.get("children", [])
|
2022-03-02 14:18:51 +01:00
|
|
|
if not isinstance(children, list):
|
2021-08-16 14:06:17 +02:00
|
|
|
raise InvalidResponseError("'children' must be a list")
|
|
|
|
if any(not isinstance(r, dict) for r in children):
|
|
|
|
raise InvalidResponseError("Invalid room in 'children' list")
|
|
|
|
|
|
|
|
# Validate the inaccessible children.
|
|
|
|
inaccessible_children = res.get("inaccessible_children", [])
|
2022-03-02 14:18:51 +01:00
|
|
|
if not isinstance(inaccessible_children, list):
|
2021-08-16 14:06:17 +02:00
|
|
|
raise InvalidResponseError("'inaccessible_children' must be a list")
|
|
|
|
if any(not isinstance(r, str) for r in inaccessible_children):
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"Invalid room ID in 'inaccessible_children' list"
|
|
|
|
)
|
|
|
|
|
2022-01-20 12:03:42 +01:00
|
|
|
return room, children_state, children, inaccessible_children
|
2021-08-16 14:06:17 +02:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
result = await self._try_destination_list(
|
|
|
|
"fetch room hierarchy",
|
|
|
|
destinations,
|
|
|
|
send_request,
|
|
|
|
failover_on_unknown_endpoint=True,
|
|
|
|
)
|
2021-08-26 13:16:53 +02:00
|
|
|
|
|
|
|
# Cache the result to avoid fetching data over federation every time.
|
|
|
|
self._get_room_hierarchy_cache[(room_id, suggested_only)] = result
|
|
|
|
return result
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2021-12-02 08:02:20 +01:00
|
|
|
async def timestamp_to_event(
|
2022-10-26 23:10:55 +02:00
|
|
|
self, *, destinations: List[str], room_id: str, timestamp: int, direction: str
|
|
|
|
) -> Optional["TimestampToEventResponse"]:
|
|
|
|
"""
|
|
|
|
Calls each remote federating server from `destinations` asking for their closest
|
|
|
|
event to the given timestamp in the given direction until we get a response.
|
|
|
|
Also validates the response to always return the expected keys or raises an
|
|
|
|
error.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destinations: The domains of homeservers to try fetching from
|
|
|
|
room_id: Room to fetch the event from
|
|
|
|
timestamp: The point in time (inclusive) we should navigate from in
|
|
|
|
the given direction to find the closest event.
|
|
|
|
direction: ["f"|"b"] to indicate whether we should navigate forward
|
|
|
|
or backward from the given timestamp to find the closest event.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A parsed TimestampToEventResponse including the closest event_id
|
|
|
|
and origin_server_ts or None if no destination has a response.
|
|
|
|
"""
|
|
|
|
|
|
|
|
async def _timestamp_to_event_from_destination(
|
|
|
|
destination: str,
|
|
|
|
) -> TimestampToEventResponse:
|
|
|
|
return await self._timestamp_to_event_from_destination(
|
|
|
|
destination, room_id, timestamp, direction
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Loop through each homeserver candidate until we get a succesful response
|
|
|
|
timestamp_to_event_response = await self._try_destination_list(
|
|
|
|
"timestamp_to_event",
|
|
|
|
destinations,
|
|
|
|
# TODO: The requested timestamp may lie in a part of the
|
|
|
|
# event graph that the remote server *also* didn't have,
|
|
|
|
# in which case they will have returned another event
|
|
|
|
# which may be nowhere near the requested timestamp. In
|
|
|
|
# the future, we may need to reconcile that gap and ask
|
|
|
|
# other homeservers, and/or extend `/timestamp_to_event`
|
|
|
|
# to return events on *both* sides of the timestamp to
|
|
|
|
# help reconcile the gap faster.
|
|
|
|
_timestamp_to_event_from_destination,
|
2022-11-28 22:54:18 +01:00
|
|
|
# Since this endpoint is new, we should try other servers before giving up.
|
|
|
|
# We can safely remove this in a year (remove after 2023-11-16).
|
|
|
|
failover_on_unknown_endpoint=True,
|
2022-10-26 23:10:55 +02:00
|
|
|
)
|
|
|
|
return timestamp_to_event_response
|
2022-11-28 22:54:18 +01:00
|
|
|
except SynapseError as e:
|
|
|
|
logger.warn(
|
|
|
|
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
|
|
|
|
room_id,
|
|
|
|
timestamp,
|
|
|
|
direction,
|
|
|
|
e,
|
|
|
|
)
|
2022-10-26 23:10:55 +02:00
|
|
|
return None
|
|
|
|
|
|
|
|
async def _timestamp_to_event_from_destination(
|
2021-12-02 08:02:20 +01:00
|
|
|
self, destination: str, room_id: str, timestamp: int, direction: str
|
|
|
|
) -> "TimestampToEventResponse":
|
|
|
|
"""
|
|
|
|
Calls a remote federating server at `destination` asking for their
|
|
|
|
closest event to the given timestamp in the given direction. Also
|
|
|
|
validates the response to always return the expected keys or raises an
|
|
|
|
error.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: Domain name of the remote homeserver
|
|
|
|
room_id: Room to fetch the event from
|
|
|
|
timestamp: The point in time (inclusive) we should navigate from in
|
|
|
|
the given direction to find the closest event.
|
|
|
|
direction: ["f"|"b"] to indicate whether we should navigate forward
|
|
|
|
or backward from the given timestamp to find the closest event.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A parsed TimestampToEventResponse including the closest event_id
|
|
|
|
and origin_server_ts
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
Various exceptions when the request fails
|
|
|
|
InvalidResponseError when the response does not have the correct
|
|
|
|
keys or wrong types
|
|
|
|
"""
|
|
|
|
remote_response = await self.transport_layer.timestamp_to_event(
|
|
|
|
destination, room_id, timestamp, direction
|
|
|
|
)
|
|
|
|
|
|
|
|
if not isinstance(remote_response, dict):
|
|
|
|
raise InvalidResponseError(
|
|
|
|
"Response must be a JSON dictionary but received %r" % remote_response
|
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
return TimestampToEventResponse.from_json_dict(remote_response)
|
|
|
|
except ValueError as e:
|
|
|
|
raise InvalidResponseError(str(e))
|
|
|
|
|
2022-02-22 16:10:10 +01:00
|
|
|
async def get_account_status(
|
|
|
|
self, destination: str, user_ids: List[str]
|
|
|
|
) -> Tuple[JsonDict, List[str]]:
|
|
|
|
"""Retrieves account statuses for a given list of users on a given remote
|
|
|
|
homeserver.
|
|
|
|
|
|
|
|
If the request fails for any reason, all user IDs for this destination are marked
|
|
|
|
as failed.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
destination: the destination to contact
|
|
|
|
user_ids: the user ID(s) for which to request account status(es)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The account statuses, as well as the list of user IDs for which it was not
|
|
|
|
possible to retrieve a status.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
res = await self.transport_layer.get_account_status(destination, user_ids)
|
|
|
|
except Exception:
|
|
|
|
# If the query failed for any reason, mark all the users as failed.
|
|
|
|
return {}, user_ids
|
|
|
|
|
|
|
|
statuses = res.get("account_statuses", {})
|
|
|
|
failures = res.get("failures", [])
|
|
|
|
|
|
|
|
if not isinstance(statuses, dict) or not isinstance(failures, list):
|
|
|
|
# Make sure we're not feeding back malformed data back to the caller.
|
|
|
|
logger.warning(
|
|
|
|
"Destination %s responded with malformed data to account_status query",
|
|
|
|
destination,
|
|
|
|
)
|
|
|
|
return {}, user_ids
|
|
|
|
|
|
|
|
for user_id in user_ids:
|
|
|
|
# Any account whose status is missing is a user we failed to receive the
|
|
|
|
# status of.
|
|
|
|
if user_id not in statuses and user_id not in failures:
|
|
|
|
failures.append(user_id)
|
|
|
|
|
|
|
|
# Filter out any user ID that doesn't belong to the remote server that sent its
|
|
|
|
# status (or failure).
|
|
|
|
def filter_user_id(user_id: str) -> bool:
|
|
|
|
try:
|
|
|
|
return UserID.from_string(user_id).domain == destination
|
|
|
|
except SynapseError:
|
|
|
|
# If the user ID doesn't parse, ignore it.
|
|
|
|
return False
|
|
|
|
|
|
|
|
filtered_statuses = dict(
|
|
|
|
# item is a (key, value) tuple, so item[0] is the user ID.
|
|
|
|
filter(lambda item: filter_user_id(item[0]), statuses.items())
|
|
|
|
)
|
|
|
|
|
|
|
|
filtered_failures = list(filter(filter_user_id, failures))
|
|
|
|
|
|
|
|
return filtered_statuses, filtered_failures
|
|
|
|
|
2021-12-02 08:02:20 +01:00
|
|
|
|
|
|
|
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
|
|
|
class TimestampToEventResponse:
|
|
|
|
"""Typed response dictionary for the federation /timestamp_to_event endpoint"""
|
|
|
|
|
|
|
|
event_id: str
|
|
|
|
origin_server_ts: int
|
|
|
|
|
|
|
|
# the raw data, including the above keys
|
|
|
|
data: JsonDict
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_json_dict(cls, d: JsonDict) -> "TimestampToEventResponse":
|
|
|
|
"""Parsed response from the federation /timestamp_to_event endpoint
|
|
|
|
|
|
|
|
Args:
|
|
|
|
d: JSON object response to be parsed
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
ValueError if d does not the correct keys or they are the wrong types
|
|
|
|
"""
|
|
|
|
|
|
|
|
event_id = d.get("event_id")
|
|
|
|
if not isinstance(event_id, str):
|
|
|
|
raise ValueError(
|
|
|
|
"Invalid response: 'event_id' must be a str but received %r" % event_id
|
|
|
|
)
|
|
|
|
|
|
|
|
origin_server_ts = d.get("origin_server_ts")
|
|
|
|
if not isinstance(origin_server_ts, int):
|
|
|
|
raise ValueError(
|
|
|
|
"Invalid response: 'origin_server_ts' must be a int but received %r"
|
|
|
|
% origin_server_ts
|
|
|
|
)
|
|
|
|
|
|
|
|
return cls(event_id, origin_server_ts, d)
|
|
|
|
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
def _validate_hierarchy_event(d: JsonDict) -> None:
|
|
|
|
"""Validate an event within the result of a /hierarchy request
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
Args:
|
|
|
|
d: json object to be parsed
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
Raises:
|
|
|
|
ValueError if d is not a valid event
|
|
|
|
"""
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
event_type = d.get("type")
|
|
|
|
if not isinstance(event_type, str):
|
|
|
|
raise ValueError("Invalid event: 'event_type' must be a str")
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
state_key = d.get("state_key")
|
|
|
|
if not isinstance(state_key, str):
|
|
|
|
raise ValueError("Invalid event: 'state_key' must be a str")
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
content = d.get("content")
|
|
|
|
if not isinstance(content, dict):
|
|
|
|
raise ValueError("Invalid event: 'content' must be a dict")
|
2021-03-24 13:45:39 +01:00
|
|
|
|
2022-02-28 19:33:00 +01:00
|
|
|
via = content.get("via")
|
2022-03-02 14:18:51 +01:00
|
|
|
if not isinstance(via, list):
|
2022-02-28 19:33:00 +01:00
|
|
|
raise ValueError("Invalid event: 'via' must be a list")
|
|
|
|
if any(not isinstance(v, str) for v in via):
|
|
|
|
raise ValueError("Invalid event: 'via' must be a list of strings")
|