2021-08-26 22:41:44 +02:00
|
|
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
2022-07-14 23:52:26 +02:00
|
|
|
|
import collections
|
2021-10-19 11:24:09 +02:00
|
|
|
|
import itertools
|
2021-08-26 22:41:44 +02:00
|
|
|
|
import logging
|
|
|
|
|
from http import HTTPStatus
|
|
|
|
|
from typing import (
|
|
|
|
|
TYPE_CHECKING,
|
|
|
|
|
Collection,
|
|
|
|
|
Container,
|
|
|
|
|
Dict,
|
|
|
|
|
Iterable,
|
|
|
|
|
List,
|
2022-08-01 16:14:29 +02:00
|
|
|
|
Optional,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
Sequence,
|
|
|
|
|
Set,
|
|
|
|
|
Tuple,
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-17 11:33:19 +02:00
|
|
|
|
from prometheus_client import Counter, Histogram
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-20 10:54:12 +02:00
|
|
|
|
from synapse import event_auth
|
2021-08-26 22:41:44 +02:00
|
|
|
|
from synapse.api.constants import (
|
|
|
|
|
EventContentFields,
|
|
|
|
|
EventTypes,
|
2021-09-06 13:17:16 +02:00
|
|
|
|
GuestAccess,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
Membership,
|
|
|
|
|
RejectedReason,
|
|
|
|
|
RoomEncryptionAlgorithms,
|
|
|
|
|
)
|
|
|
|
|
from synapse.api.errors import (
|
|
|
|
|
AuthError,
|
|
|
|
|
Codes,
|
2022-12-13 14:19:19 +01:00
|
|
|
|
EventSizeError,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
FederationError,
|
2022-10-15 07:36:49 +02:00
|
|
|
|
FederationPullAttemptBackoffError,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
HttpResponseException,
|
2023-02-11 00:31:05 +01:00
|
|
|
|
PartialStateConflictError,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
RequestSendFailed,
|
|
|
|
|
SynapseError,
|
|
|
|
|
)
|
2021-10-19 11:24:09 +02:00
|
|
|
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
|
Split `event_auth.check` into two parts (#10940)
Broadly, the existing `event_auth.check` function has two parts:
* a validation section: checks that the event isn't too big, that it has the rught signatures, etc.
This bit is independent of the rest of the state in the room, and so need only be done once
for each event.
* an auth section: ensures that the event is allowed, given the rest of the state in the room.
This gets done multiple times, against various sets of room state, because it forms part of
the state res algorithm.
Currently, this is implemented with `do_sig_check` and `do_size_check` parameters, but I think
that makes everything hard to follow. Instead, we split the function in two and call each part
separately where it is needed.
2021-09-29 19:59:15 +02:00
|
|
|
|
from synapse.event_auth import (
|
|
|
|
|
auth_types_for_event,
|
2022-06-15 20:48:22 +02:00
|
|
|
|
check_state_dependent_auth_rules,
|
|
|
|
|
check_state_independent_auth_rules,
|
Split `event_auth.check` into two parts (#10940)
Broadly, the existing `event_auth.check` function has two parts:
* a validation section: checks that the event isn't too big, that it has the rught signatures, etc.
This bit is independent of the rest of the state in the room, and so need only be done once
for each event.
* an auth section: ensures that the event is allowed, given the rest of the state in the room.
This gets done multiple times, against various sets of room state, because it forms part of
the state res algorithm.
Currently, this is implemented with `do_sig_check` and `do_size_check` parameters, but I think
that makes everything hard to follow. Instead, we split the function in two and call each part
separately where it is needed.
2021-09-29 19:59:15 +02:00
|
|
|
|
validate_event_for_room_version,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
from synapse.events import EventBase
|
2023-02-09 22:05:02 +01:00
|
|
|
|
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
|
2022-10-26 23:10:55 +02:00
|
|
|
|
from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo
|
2022-06-17 11:22:50 +02:00
|
|
|
|
from synapse.logging.context import nested_logging_context
|
2022-08-16 19:39:40 +02:00
|
|
|
|
from synapse.logging.opentracing import (
|
|
|
|
|
SynapseTags,
|
|
|
|
|
set_tag,
|
|
|
|
|
start_active_span,
|
|
|
|
|
tag_args,
|
|
|
|
|
trace,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2023-04-21 13:06:39 +02:00
|
|
|
|
from synapse.replication.http.devices import (
|
|
|
|
|
ReplicationMultiUserDevicesResyncRestServlet,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
from synapse.replication.http.federation import (
|
|
|
|
|
ReplicationFederationSendEventsRestServlet,
|
|
|
|
|
)
|
|
|
|
|
from synapse.state import StateResolutionStore
|
|
|
|
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
|
|
|
|
from synapse.types import (
|
|
|
|
|
PersistedEventPosition,
|
|
|
|
|
RoomStreamToken,
|
|
|
|
|
StateMap,
|
2023-01-26 18:31:58 +01:00
|
|
|
|
StrCollection,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
UserID,
|
|
|
|
|
get_domain_from_id,
|
|
|
|
|
)
|
2022-12-12 17:19:30 +01:00
|
|
|
|
from synapse.types.state import StateFilter
|
2021-09-28 16:25:07 +02:00
|
|
|
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
2021-08-26 22:41:44 +02:00
|
|
|
|
from synapse.util.iterutils import batch_iter
|
|
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
|
|
|
|
from synapse.util.stringutils import shortstr
|
|
|
|
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
soft_failed_event_counter = Counter(
|
|
|
|
|
"synapse_federation_soft_failed_events_total",
|
|
|
|
|
"Events received over federation that we marked as soft_failed",
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-17 11:33:19 +02:00
|
|
|
|
# Added to debug performance and track progress on optimizations
|
|
|
|
|
backfill_processing_after_timer = Histogram(
|
|
|
|
|
"synapse_federation_backfill_processing_after_time_seconds",
|
|
|
|
|
"sec",
|
|
|
|
|
[],
|
|
|
|
|
buckets=(
|
2022-08-23 09:47:30 +02:00
|
|
|
|
0.1,
|
|
|
|
|
0.25,
|
|
|
|
|
0.5,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
1.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
2.5,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
5.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
7.5,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
10.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
15.0,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
20.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
25.0,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
30.0,
|
|
|
|
|
40.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
50.0,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
60.0,
|
|
|
|
|
80.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
100.0,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
120.0,
|
2022-08-23 09:47:30 +02:00
|
|
|
|
150.0,
|
2022-08-17 11:33:19 +02:00
|
|
|
|
180.0,
|
|
|
|
|
"+Inf",
|
|
|
|
|
),
|
|
|
|
|
)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-06 15:49:33 +02:00
|
|
|
|
class FederationEventHandler:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""Handles events that originated from federation.
|
|
|
|
|
|
|
|
|
|
Responsible for handing incoming events and passing them on to the rest
|
|
|
|
|
of the homeserver (including auth and state conflict resolutions)
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, hs: "HomeServer"):
|
2023-03-30 14:36:41 +02:00
|
|
|
|
self._clock = hs.get_clock()
|
2022-02-23 12:04:02 +01:00
|
|
|
|
self._store = hs.get_datastores().main
|
2022-05-31 14:17:50 +02:00
|
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
|
|
|
|
self._state_storage_controller = self._storage_controllers.state
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
self._state_handler = hs.get_state_handler()
|
|
|
|
|
self._event_creation_handler = hs.get_event_creation_handler()
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self._event_auth_handler = hs.get_event_auth_handler()
|
|
|
|
|
self._message_handler = hs.get_message_handler()
|
2022-05-11 13:15:21 +02:00
|
|
|
|
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self._state_resolution_handler = hs.get_state_resolution_handler()
|
2021-09-06 15:49:33 +02:00
|
|
|
|
# avoid a circular dependency by deferring execution here
|
|
|
|
|
self._get_room_member_handler = hs.get_room_member_handler
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
self._federation_client = hs.get_federation_client()
|
2023-05-04 16:18:22 +02:00
|
|
|
|
self._third_party_event_rules = (
|
|
|
|
|
hs.get_module_api_callbacks().third_party_event_rules
|
|
|
|
|
)
|
2021-09-06 15:49:33 +02:00
|
|
|
|
self._notifier = hs.get_notifier()
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
self._is_mine_id = hs.is_mine_id
|
2023-05-05 16:06:22 +02:00
|
|
|
|
self._is_mine_server_name = hs.is_mine_server_name
|
2021-09-06 15:49:33 +02:00
|
|
|
|
self._server_name = hs.hostname
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
self._config = hs.config
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
|
|
|
|
|
|
|
|
|
|
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
|
2021-09-13 19:07:12 +02:00
|
|
|
|
if hs.config.worker.worker_app:
|
2023-04-21 13:06:39 +02:00
|
|
|
|
self._multi_user_device_resync = (
|
|
|
|
|
ReplicationMultiUserDevicesResyncRestServlet.make_client(hs)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
self._device_list_updater = hs.get_device_handler().device_list_updater
|
|
|
|
|
|
|
|
|
|
# When joining a room we need to queue any events for that room up.
|
|
|
|
|
# For each room, a list of (pdu, origin) tuples.
|
|
|
|
|
# TODO: replace this with something more elegant, probably based around the
|
|
|
|
|
# federation event staging area.
|
|
|
|
|
self.room_queues: Dict[str, List[Tuple[EventBase, str]]] = {}
|
|
|
|
|
|
|
|
|
|
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
|
|
|
|
|
|
|
|
|
async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None:
|
|
|
|
|
"""Process a PDU received via a federation /send/ transaction
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
origin: server which initiated the /send/ transaction. Will
|
|
|
|
|
be used to fetch missing events or state.
|
|
|
|
|
pdu: received PDU
|
|
|
|
|
"""
|
|
|
|
|
|
2021-09-08 11:41:13 +02:00
|
|
|
|
# We should never see any outliers here.
|
|
|
|
|
assert not pdu.internal_metadata.outlier
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
room_id = pdu.room_id
|
|
|
|
|
event_id = pdu.event_id
|
|
|
|
|
|
|
|
|
|
# We reprocess pdus when we have seen them only as outliers
|
2021-09-07 12:15:51 +02:00
|
|
|
|
existing = await self._store.get_event(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
event_id, allow_none=True, allow_rejected=True
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# FIXME: Currently we fetch an event again when we already have it
|
|
|
|
|
# if it has been marked as an outlier.
|
|
|
|
|
if existing:
|
|
|
|
|
if not existing.internal_metadata.is_outlier():
|
|
|
|
|
logger.info(
|
|
|
|
|
"Ignoring received event %s which we have already seen", event_id
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
if pdu.internal_metadata.is_outlier():
|
|
|
|
|
logger.info(
|
|
|
|
|
"Ignoring received outlier %s which we already have as an outlier",
|
|
|
|
|
event_id,
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
logger.info("De-outliering event %s", event_id)
|
|
|
|
|
|
|
|
|
|
# do some initial sanity-checking of the event. In particular, make
|
|
|
|
|
# sure it doesn't have hundreds of prev_events or auth_events, which
|
|
|
|
|
# could cause a huge state resolution or cascade of event fetches.
|
|
|
|
|
try:
|
|
|
|
|
self._sanity_check_event(pdu)
|
|
|
|
|
except SynapseError as err:
|
|
|
|
|
logger.warning("Received event failed sanity checks")
|
|
|
|
|
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
|
|
|
|
|
|
|
|
|
|
# If we are currently in the process of joining this room, then we
|
|
|
|
|
# queue up events for later processing.
|
|
|
|
|
if room_id in self.room_queues:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Queuing PDU from %s for now: join in progress",
|
|
|
|
|
origin,
|
|
|
|
|
)
|
|
|
|
|
self.room_queues[room_id].append((pdu, origin))
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# If we're not in the room just ditch the event entirely. This is
|
|
|
|
|
# probably an old server that has come back and thinks we're still in
|
|
|
|
|
# the room (or we've been rejoined to the room by a state reset).
|
|
|
|
|
#
|
|
|
|
|
# Note that if we were never in the room then we would have already
|
|
|
|
|
# dropped the event, since we wouldn't know the room version.
|
2022-09-23 12:47:16 +02:00
|
|
|
|
is_in_room = await self._event_auth_handler.is_host_in_room(
|
2021-09-06 15:49:33 +02:00
|
|
|
|
room_id, self._server_name
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
if not is_in_room:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Ignoring PDU from %s as we're not in the room",
|
|
|
|
|
origin,
|
|
|
|
|
)
|
|
|
|
|
return None
|
|
|
|
|
|
2021-09-08 11:41:13 +02:00
|
|
|
|
# Try to fetch any missing prev events to fill in gaps in the graph
|
|
|
|
|
prevs = set(pdu.prev_event_ids())
|
|
|
|
|
seen = await self._store.have_events_in_timeline(prevs)
|
|
|
|
|
missing_prevs = prevs - seen
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-08 11:41:13 +02:00
|
|
|
|
if missing_prevs:
|
|
|
|
|
# We only backfill backwards to the min depth.
|
2021-10-18 18:17:15 +02:00
|
|
|
|
min_depth = await self._store.get_min_depth(pdu.room_id)
|
2021-09-08 11:41:13 +02:00
|
|
|
|
logger.debug("min_depth: %d", min_depth)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-08 11:41:13 +02:00
|
|
|
|
if min_depth is not None and pdu.depth > min_depth:
|
|
|
|
|
# If we're missing stuff, ensure we only fetch stuff one
|
|
|
|
|
# at a time.
|
|
|
|
|
logger.info(
|
|
|
|
|
"Acquiring room lock to fetch %d missing prev_events: %s",
|
|
|
|
|
len(missing_prevs),
|
|
|
|
|
shortstr(missing_prevs),
|
|
|
|
|
)
|
2022-04-05 16:43:52 +02:00
|
|
|
|
async with self._room_pdu_linearizer.queue(pdu.room_id):
|
2021-08-26 22:41:44 +02:00
|
|
|
|
logger.info(
|
2021-09-08 11:41:13 +02:00
|
|
|
|
"Acquired room lock to fetch %d missing prev_events",
|
2021-08-26 22:41:44 +02:00
|
|
|
|
len(missing_prevs),
|
|
|
|
|
)
|
2021-09-08 11:41:13 +02:00
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
await self._get_missing_events_for_pdu(
|
|
|
|
|
origin, pdu, prevs, min_depth
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
2021-09-08 11:41:13 +02:00
|
|
|
|
except Exception as e:
|
|
|
|
|
raise Exception(
|
|
|
|
|
"Error fetching missing prev_events for %s: %s"
|
|
|
|
|
% (event_id, e)
|
|
|
|
|
) from e
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-08 11:41:13 +02:00
|
|
|
|
# Update the set of things we've seen after trying to
|
|
|
|
|
# fetch the missing stuff
|
|
|
|
|
seen = await self._store.have_events_in_timeline(prevs)
|
|
|
|
|
missing_prevs = prevs - seen
|
|
|
|
|
|
|
|
|
|
if not missing_prevs:
|
|
|
|
|
logger.info("Found all missing prev_events")
|
|
|
|
|
|
|
|
|
|
if missing_prevs:
|
|
|
|
|
# since this event was pushed to us, it is possible for it to
|
|
|
|
|
# become the only forward-extremity in the room, and we would then
|
|
|
|
|
# trust its state to be the state for the whole room. This is very
|
|
|
|
|
# bad. Further, if the event was pushed to us, there is no excuse
|
|
|
|
|
# for us not to have all the prev_events. (XXX: apart from
|
|
|
|
|
# min_depth?)
|
|
|
|
|
#
|
|
|
|
|
# We therefore reject any such events.
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Rejecting: failed to fetch %d prev events: %s",
|
|
|
|
|
len(missing_prevs),
|
|
|
|
|
shortstr(missing_prevs),
|
|
|
|
|
)
|
|
|
|
|
raise FederationError(
|
|
|
|
|
"ERROR",
|
|
|
|
|
403,
|
|
|
|
|
(
|
|
|
|
|
"Your server isn't divulging details about prev_events "
|
|
|
|
|
"referenced in this event."
|
|
|
|
|
),
|
|
|
|
|
affected=pdu.event_id,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
try:
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context = await self._state_handler.compute_event_context(pdu)
|
|
|
|
|
await self._process_received_pdu(origin, pdu, context)
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
except PartialStateConflictError:
|
|
|
|
|
# The room was un-partial stated while we were processing the PDU.
|
|
|
|
|
# Try once more, with full state this time.
|
|
|
|
|
logger.info(
|
|
|
|
|
"Room %s was un-partial stated while processing the PDU, trying again.",
|
|
|
|
|
room_id,
|
|
|
|
|
)
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context = await self._state_handler.compute_event_context(pdu)
|
|
|
|
|
await self._process_received_pdu(origin, pdu, context)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
async def on_send_membership_event(
|
|
|
|
|
self, origin: str, event: EventBase
|
|
|
|
|
) -> Tuple[EventBase, EventContext]:
|
|
|
|
|
"""
|
|
|
|
|
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
|
|
|
|
|
|
|
|
|
Verify that event and send it into the room on the remote homeserver's behalf.
|
|
|
|
|
|
|
|
|
|
This is quite similar to on_receive_pdu, with the following principal
|
|
|
|
|
differences:
|
|
|
|
|
* only membership events are permitted (and only events with
|
|
|
|
|
sender==state_key -- ie, no kicks or bans)
|
|
|
|
|
* *We* send out the event on behalf of the remote server.
|
|
|
|
|
* We enforce the membership restrictions of restricted rooms.
|
|
|
|
|
* Rejected events result in an exception rather than being stored.
|
|
|
|
|
|
|
|
|
|
There are also other differences, however it is not clear if these are by
|
|
|
|
|
design or omission. In particular, we do not attempt to backfill any missing
|
|
|
|
|
prev_events.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
origin: The homeserver of the remote (joining/invited/knocking) user.
|
|
|
|
|
event: The member event that has been signed by the remote homeserver.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
The event and context of the event after inserting it into the room graph.
|
|
|
|
|
|
|
|
|
|
Raises:
|
2022-08-01 14:53:56 +02:00
|
|
|
|
RuntimeError if any prev_events are missing
|
2021-08-26 22:41:44 +02:00
|
|
|
|
SynapseError if the event is not accepted into the room
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
PartialStateConflictError if the room was un-partial stated in between
|
|
|
|
|
computing the state at the event and persisting it. The caller should
|
|
|
|
|
retry exactly once in this case.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
|
|
|
|
logger.debug(
|
|
|
|
|
"on_send_membership_event: Got event: %s, signatures: %s",
|
|
|
|
|
event.event_id,
|
|
|
|
|
event.signatures,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if get_domain_from_id(event.sender) != origin:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Got send_membership request for user %r from different origin %s",
|
|
|
|
|
event.sender,
|
|
|
|
|
origin,
|
|
|
|
|
)
|
|
|
|
|
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
|
|
|
|
|
|
|
|
|
if event.sender != event.state_key:
|
|
|
|
|
raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
|
|
|
|
|
|
|
|
|
|
assert not event.internal_metadata.outlier
|
|
|
|
|
|
|
|
|
|
# Send this event on behalf of the other server.
|
|
|
|
|
#
|
|
|
|
|
# The remote server isn't a full participant in the room at this point, so
|
|
|
|
|
# may not have an up-to-date list of the other homeservers participating in
|
|
|
|
|
# the room, so we send it on their behalf.
|
|
|
|
|
event.internal_metadata.send_on_behalf_of = origin
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
context = await self._state_handler.compute_event_context(event)
|
2022-07-14 23:52:26 +02:00
|
|
|
|
await self._check_event_auth(origin, event, context)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if context.rejected:
|
|
|
|
|
raise SynapseError(
|
|
|
|
|
403, f"{event.membership} event was rejected", Codes.FORBIDDEN
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# for joins, we need to check the restrictions of restricted rooms
|
|
|
|
|
if event.membership == Membership.JOIN:
|
|
|
|
|
await self.check_join_restrictions(context, event)
|
|
|
|
|
|
|
|
|
|
# for knock events, we run the third-party event rules. It's not entirely clear
|
|
|
|
|
# why we don't do this for other sorts of membership events.
|
|
|
|
|
if event.membership == Membership.KNOCK:
|
2021-09-07 12:15:51 +02:00
|
|
|
|
event_allowed, _ = await self._third_party_event_rules.check_event_allowed(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
event, context
|
|
|
|
|
)
|
|
|
|
|
if not event_allowed:
|
|
|
|
|
logger.info("Sending of knock %s forbidden by third-party rules", event)
|
|
|
|
|
raise SynapseError(
|
|
|
|
|
403, "This event is not allowed in this context", Codes.FORBIDDEN
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# all looks good, we can persist the event.
|
2021-10-05 14:01:41 +02:00
|
|
|
|
|
|
|
|
|
# First, precalculate the joined hosts so that the federation sender doesn't
|
|
|
|
|
# need to.
|
2022-10-12 20:01:00 +02:00
|
|
|
|
await self._event_creation_handler.cache_joined_hosts_for_events(
|
|
|
|
|
[(event, context)]
|
|
|
|
|
)
|
2021-10-05 14:01:41 +02:00
|
|
|
|
|
2022-08-01 14:53:56 +02:00
|
|
|
|
await self._check_for_soft_fail(event, context=context, origin=origin)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
await self._run_push_actions_and_persist_event(event, context)
|
|
|
|
|
return event, context
|
|
|
|
|
|
|
|
|
|
async def check_join_restrictions(
|
2023-02-09 22:05:02 +01:00
|
|
|
|
self,
|
|
|
|
|
context: UnpersistedEventContextBase,
|
|
|
|
|
event: EventBase,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
) -> None:
|
|
|
|
|
"""Check that restrictions in restricted join rules are matched
|
|
|
|
|
|
|
|
|
|
Called when we receive a join event via send_join.
|
|
|
|
|
|
|
|
|
|
Raises an auth error if the restrictions are not matched.
|
|
|
|
|
"""
|
|
|
|
|
prev_state_ids = await context.get_prev_state_ids()
|
|
|
|
|
|
|
|
|
|
# Check if the user is already in the room or invited to the room.
|
|
|
|
|
user_id = event.state_key
|
|
|
|
|
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
2023-02-11 00:31:05 +01:00
|
|
|
|
prev_membership = None
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if prev_member_event_id:
|
2021-09-07 12:15:51 +02:00
|
|
|
|
prev_member_event = await self._store.get_event(prev_member_event_id)
|
2023-02-11 00:31:05 +01:00
|
|
|
|
prev_membership = prev_member_event.membership
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# Check if the member should be allowed access via membership in a space.
|
|
|
|
|
await self._event_auth_handler.check_restricted_join_rules(
|
|
|
|
|
prev_state_ids,
|
|
|
|
|
event.room_version,
|
|
|
|
|
user_id,
|
2023-02-11 00:31:05 +01:00
|
|
|
|
prev_membership,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-10-19 11:24:09 +02:00
|
|
|
|
async def process_remote_join(
|
|
|
|
|
self,
|
|
|
|
|
origin: str,
|
|
|
|
|
room_id: str,
|
|
|
|
|
auth_events: List[EventBase],
|
|
|
|
|
state: List[EventBase],
|
|
|
|
|
event: EventBase,
|
|
|
|
|
room_version: RoomVersion,
|
2022-03-01 13:49:54 +01:00
|
|
|
|
partial_state: bool,
|
2021-10-19 11:24:09 +02:00
|
|
|
|
) -> int:
|
|
|
|
|
"""Persists the events returned by a send_join
|
|
|
|
|
|
|
|
|
|
Checks the auth chain is valid (and passes auth checks) for the
|
2021-10-25 16:21:09 +02:00
|
|
|
|
state and event. Then persists all of the events.
|
|
|
|
|
Notifies about the persisted events where appropriate.
|
2021-10-19 11:24:09 +02:00
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
origin: Where the events came from
|
2021-10-25 16:21:09 +02:00
|
|
|
|
room_id:
|
2021-10-19 11:24:09 +02:00
|
|
|
|
auth_events
|
|
|
|
|
state
|
|
|
|
|
event
|
|
|
|
|
room_version: The room version we expect this room to have, and
|
|
|
|
|
will raise if it doesn't match the version in the create event.
|
2022-03-01 13:49:54 +01:00
|
|
|
|
partial_state: True if the state omits non-critical membership events
|
2021-10-25 16:21:09 +02:00
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
The stream ID after which all events have been persisted.
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
SynapseError if the response is in some way invalid.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
PartialStateConflictError if the homeserver is already in the room and it
|
|
|
|
|
has been un-partial stated.
|
2021-10-19 11:24:09 +02:00
|
|
|
|
"""
|
|
|
|
|
create_event = None
|
2022-02-17 12:59:26 +01:00
|
|
|
|
for e in state:
|
2021-10-19 11:24:09 +02:00
|
|
|
|
if (e.type, e.state_key) == (EventTypes.Create, ""):
|
|
|
|
|
create_event = e
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if create_event is None:
|
|
|
|
|
# If the state doesn't have a create event then the room is
|
|
|
|
|
# invalid, and it would fail auth checks anyway.
|
|
|
|
|
raise SynapseError(400, "No create event in state")
|
|
|
|
|
|
|
|
|
|
room_version_id = create_event.content.get(
|
|
|
|
|
"room_version", RoomVersions.V1.identifier
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if room_version.identifier != room_version_id:
|
|
|
|
|
raise SynapseError(400, "Room version mismatch")
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
# persist the auth chain and state events.
|
|
|
|
|
#
|
|
|
|
|
# any invalid events here will be marked as rejected, and we'll carry on.
|
|
|
|
|
#
|
|
|
|
|
# any events whose auth events are missing (ie, not in the send_join response,
|
|
|
|
|
# and not already in our db) will just be ignored. This is correct behaviour,
|
|
|
|
|
# because the reason that auth_events are missing might be due to us being
|
|
|
|
|
# unable to validate their signatures. The fact that we can't validate their
|
|
|
|
|
# signatures right now doesn't mean that we will *never* be able to, so it
|
|
|
|
|
# is premature to reject them.
|
|
|
|
|
#
|
2022-02-15 15:33:28 +01:00
|
|
|
|
await self._auth_and_persist_outliers(
|
|
|
|
|
room_id, itertools.chain(auth_events, state)
|
|
|
|
|
)
|
2021-10-25 16:21:09 +02:00
|
|
|
|
|
|
|
|
|
# and now persist the join event itself.
|
2022-03-01 13:49:54 +01:00
|
|
|
|
logger.info(
|
|
|
|
|
"Peristing join-via-remote %s (partial_state: %s)", event, partial_state
|
|
|
|
|
)
|
2021-10-25 16:21:09 +02:00
|
|
|
|
with nested_logging_context(suffix=event.event_id):
|
2023-02-11 00:31:05 +01:00
|
|
|
|
if partial_state:
|
|
|
|
|
# When handling a second partial state join into a partial state room,
|
|
|
|
|
# the returned state will exclude the membership from the first join. To
|
|
|
|
|
# preserve prior memberships, we try to compute the partial state before
|
|
|
|
|
# the event ourselves if we know about any of the prev events.
|
|
|
|
|
#
|
|
|
|
|
# When we don't know about any of the prev events, it's fine to just use
|
|
|
|
|
# the returned state, since the new join will create a new forward
|
|
|
|
|
# extremity, and leave the forward extremity containing our prior
|
|
|
|
|
# memberships alone.
|
|
|
|
|
prev_event_ids = set(event.prev_event_ids())
|
|
|
|
|
seen_event_ids = await self._store.have_events_in_timeline(
|
|
|
|
|
prev_event_ids
|
|
|
|
|
)
|
|
|
|
|
missing_event_ids = prev_event_ids - seen_event_ids
|
|
|
|
|
|
|
|
|
|
state_maps_to_resolve: List[StateMap[str]] = []
|
|
|
|
|
|
|
|
|
|
# Fetch the state after the prev events that we know about.
|
|
|
|
|
state_maps_to_resolve.extend(
|
|
|
|
|
(
|
|
|
|
|
await self._state_storage_controller.get_state_groups_ids(
|
|
|
|
|
room_id, seen_event_ids, await_full_state=False
|
|
|
|
|
)
|
|
|
|
|
).values()
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# When there are prev events we do not have the state for, we state
|
|
|
|
|
# resolve with the state returned by the remote homeserver.
|
|
|
|
|
if missing_event_ids or len(state_maps_to_resolve) == 0:
|
|
|
|
|
state_maps_to_resolve.append(
|
|
|
|
|
{(e.type, e.state_key): e.event_id for e in state}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
state_ids_before_event = (
|
|
|
|
|
await self._state_resolution_handler.resolve_events_with_store(
|
|
|
|
|
event.room_id,
|
|
|
|
|
room_version.identifier,
|
|
|
|
|
state_maps_to_resolve,
|
|
|
|
|
event_map=None,
|
|
|
|
|
state_res_store=StateResolutionStore(self._store),
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
state_ids_before_event = {
|
|
|
|
|
(e.type, e.state_key): e.event_id for e in state
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
context = await self._state_handler.compute_event_context(
|
2022-03-01 13:49:54 +01:00
|
|
|
|
event,
|
2023-02-11 00:31:05 +01:00
|
|
|
|
state_ids_before_event=state_ids_before_event,
|
2022-03-01 13:49:54 +01:00
|
|
|
|
partial_state=partial_state,
|
2021-10-19 11:24:09 +02:00
|
|
|
|
)
|
|
|
|
|
|
2022-07-14 23:52:26 +02:00
|
|
|
|
await self._check_event_auth(origin, event, context)
|
2021-10-25 16:21:09 +02:00
|
|
|
|
if context.rejected:
|
2023-03-29 10:37:27 +02:00
|
|
|
|
raise SynapseError(403, "Join event was rejected")
|
2021-10-19 11:24:09 +02:00
|
|
|
|
|
2022-03-30 13:04:35 +02:00
|
|
|
|
# the remote server is responsible for sending our join event to the rest
|
|
|
|
|
# of the federation. Indeed, attempting to do so will result in problems
|
|
|
|
|
# when we try to look up the state before the join (to get the server list)
|
|
|
|
|
# and discover that we do not have it.
|
|
|
|
|
event.internal_metadata.proactively_send = False
|
|
|
|
|
|
2022-05-24 03:43:37 +02:00
|
|
|
|
stream_id_after_persist = await self.persist_events_and_notify(
|
|
|
|
|
room_id, [(event, context)]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# If we're joining the room again, check if there is new marker
|
|
|
|
|
# state indicating that there is new history imported somewhere in
|
|
|
|
|
# the DAG. Multiple markers can exist in the current state with
|
|
|
|
|
# unique state_keys.
|
|
|
|
|
#
|
|
|
|
|
# Do this after the state from the remote join was persisted (via
|
|
|
|
|
# `persist_events_and_notify`). Otherwise we can run into a
|
|
|
|
|
# situation where the create event doesn't exist yet in the
|
|
|
|
|
# `current_state_events`
|
|
|
|
|
for e in state:
|
|
|
|
|
await self._handle_marker_event(origin, e)
|
|
|
|
|
|
|
|
|
|
return stream_id_after_persist
|
2021-10-19 11:24:09 +02:00
|
|
|
|
|
2022-04-12 15:23:43 +02:00
|
|
|
|
async def update_state_for_partial_state_event(
|
|
|
|
|
self, destination: str, event: EventBase
|
|
|
|
|
) -> None:
|
|
|
|
|
"""Recalculate the state at an event as part of a de-partial-stating process
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
destination: server to request full state from
|
|
|
|
|
event: partial-state event to be de-partial-stated
|
2022-05-31 16:50:29 +02:00
|
|
|
|
|
|
|
|
|
Raises:
|
2022-10-15 07:36:49 +02:00
|
|
|
|
FederationPullAttemptBackoffError if we are are deliberately not attempting
|
|
|
|
|
to pull the given event over federation because we've already done so
|
|
|
|
|
recently and are backing off.
|
2022-05-31 16:50:29 +02:00
|
|
|
|
FederationError if we fail to request state from the remote server.
|
2022-04-12 15:23:43 +02:00
|
|
|
|
"""
|
|
|
|
|
logger.info("Updating state for %s", event.event_id)
|
|
|
|
|
with nested_logging_context(suffix=event.event_id):
|
|
|
|
|
# if we have all the event's prev_events, then we can work out the
|
|
|
|
|
# state based on their states. Otherwise, we request it from the destination
|
|
|
|
|
# server.
|
|
|
|
|
#
|
|
|
|
|
# This is the same operation as we do when we receive a regular event
|
|
|
|
|
# over federation.
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context = await self._compute_event_context_with_maybe_missing_prevs(
|
2022-07-26 13:39:23 +02:00
|
|
|
|
destination, event
|
2022-04-12 15:23:43 +02:00
|
|
|
|
)
|
2022-08-01 14:53:56 +02:00
|
|
|
|
if context.partial_state:
|
2022-04-12 15:23:43 +02:00
|
|
|
|
# this can happen if some or all of the event's prev_events still have
|
2022-07-26 13:47:31 +02:00
|
|
|
|
# partial state. We were careful to only pick events from the db without
|
|
|
|
|
# partial-state prev events, so that implies that a prev event has
|
|
|
|
|
# been persisted (with partial state) since we did the query.
|
2022-04-12 15:23:43 +02:00
|
|
|
|
#
|
2022-07-26 13:47:31 +02:00
|
|
|
|
# So, let's just ignore `event` for now; when we re-run the db query
|
|
|
|
|
# we should instead get its partial-state prev event, which we will
|
|
|
|
|
# de-partial-state, and then come back to event.
|
2022-04-12 15:23:43 +02:00
|
|
|
|
logger.warning(
|
2022-07-26 13:47:31 +02:00
|
|
|
|
"%s still has prev_events with partial state: can't de-partial-state it yet",
|
2022-04-12 15:23:43 +02:00
|
|
|
|
event.event_id,
|
|
|
|
|
)
|
|
|
|
|
return
|
2022-08-01 12:20:05 +02:00
|
|
|
|
|
|
|
|
|
# since the state at this event has changed, we should now re-evaluate
|
|
|
|
|
# whether it should have been rejected. We must already have all of the
|
|
|
|
|
# auth events (from last time we went round this path), so there is no
|
|
|
|
|
# need to pass the origin.
|
|
|
|
|
await self._check_event_auth(None, event, context)
|
|
|
|
|
|
2022-04-12 15:23:43 +02:00
|
|
|
|
await self._store.update_state_for_partial_state_event(event, context)
|
2022-05-31 14:17:50 +02:00
|
|
|
|
self._state_storage_controller.notify_event_un_partial_stated(
|
|
|
|
|
event.event_id
|
|
|
|
|
)
|
2022-12-14 15:47:11 +01:00
|
|
|
|
# Notify that there's a new row in the un_partial_stated_events stream.
|
|
|
|
|
self._notifier.notify_replication()
|
2022-04-12 15:23:43 +02:00
|
|
|
|
|
2022-08-03 17:57:38 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def backfill(
|
2023-01-26 18:31:58 +01:00
|
|
|
|
self, dest: str, room_id: str, limit: int, extremities: StrCollection
|
2021-08-26 22:41:44 +02:00
|
|
|
|
) -> None:
|
|
|
|
|
"""Trigger a backfill request to `dest` for the given `room_id`
|
|
|
|
|
|
|
|
|
|
This will attempt to get more events from the remote. If the other side
|
|
|
|
|
has no new events to offer, this will return an empty list.
|
|
|
|
|
|
|
|
|
|
As the events are received, we check their signatures, and also do some
|
|
|
|
|
sanity-checking on them. If any of the backfilled events are invalid,
|
|
|
|
|
this method throws a SynapseError.
|
|
|
|
|
|
|
|
|
|
We might also raise an InvalidResponseError if the response from the remote
|
|
|
|
|
server is just bogus.
|
|
|
|
|
|
|
|
|
|
TODO: make this more useful to distinguish failures of the remote
|
|
|
|
|
server from invalid events (there is probably no point in trying to
|
|
|
|
|
re-fetch invalid events from every other HS in the room.)
|
|
|
|
|
"""
|
2023-05-05 16:06:22 +02:00
|
|
|
|
if self._is_mine_server_name(dest):
|
2021-08-26 22:41:44 +02:00
|
|
|
|
raise SynapseError(400, "Can't backfill from self.")
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
events = await self._federation_client.backfill(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
dest, room_id, limit=limit, extremities=extremities
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if not events:
|
|
|
|
|
return
|
|
|
|
|
|
2022-08-17 11:33:19 +02:00
|
|
|
|
with backfill_processing_after_timer.time():
|
|
|
|
|
# if there are any events in the wrong room, the remote server is buggy and
|
|
|
|
|
# should not be trusted.
|
|
|
|
|
for ev in events:
|
|
|
|
|
if ev.room_id != room_id:
|
|
|
|
|
raise InvalidResponseError(
|
|
|
|
|
f"Remote server {dest} returned event {ev.event_id} which is in "
|
|
|
|
|
f"room {ev.room_id}, when we were backfilling in {room_id}"
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-17 11:33:19 +02:00
|
|
|
|
await self._process_pulled_events(
|
|
|
|
|
dest,
|
|
|
|
|
events,
|
|
|
|
|
backfilled=True,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-03 17:57:38 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _get_missing_events_for_pdu(
|
|
|
|
|
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
|
|
|
|
|
) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Args:
|
|
|
|
|
origin: Origin of the pdu. Will be called to get the missing events
|
|
|
|
|
pdu: received pdu
|
|
|
|
|
prevs: List of event ids which we are missing
|
|
|
|
|
min_depth: Minimum depth of events to return.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
room_id = pdu.room_id
|
|
|
|
|
event_id = pdu.event_id
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
seen = await self._store.have_events_in_timeline(prevs)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
if not prevs - seen:
|
|
|
|
|
return
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
latest_list = await self._store.get_latest_event_ids_in_room(room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# We add the prev events that we have seen to the latest
|
|
|
|
|
# list to ensure the remote server doesn't give them to us
|
|
|
|
|
latest = set(latest_list)
|
|
|
|
|
latest |= seen
|
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
"Requesting missing events between %s and %s",
|
|
|
|
|
shortstr(latest),
|
|
|
|
|
event_id,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# XXX: we set timeout to 10s to help workaround
|
|
|
|
|
# https://github.com/matrix-org/synapse/issues/1733.
|
|
|
|
|
# The reason is to avoid holding the linearizer lock
|
|
|
|
|
# whilst processing inbound /send transactions, causing
|
|
|
|
|
# FDs to stack up and block other inbound transactions
|
|
|
|
|
# which empirically can currently take up to 30 minutes.
|
|
|
|
|
#
|
|
|
|
|
# N.B. this explicitly disables retry attempts.
|
|
|
|
|
#
|
|
|
|
|
# N.B. this also increases our chances of falling back to
|
|
|
|
|
# fetching fresh state for the room if the missing event
|
|
|
|
|
# can't be found, which slightly reduces our security.
|
|
|
|
|
# it may also increase our DAG extremity count for the room,
|
|
|
|
|
# causing additional state resolution? See #1760.
|
|
|
|
|
# However, fetching state doesn't hold the linearizer lock
|
|
|
|
|
# apparently.
|
|
|
|
|
#
|
|
|
|
|
# see https://github.com/matrix-org/synapse/pull/1744
|
|
|
|
|
#
|
|
|
|
|
# ----
|
|
|
|
|
#
|
|
|
|
|
# Update richvdh 2018/09/18: There are a number of problems with timing this
|
|
|
|
|
# request out aggressively on the client side:
|
|
|
|
|
#
|
|
|
|
|
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
|
|
|
|
|
# if you send too many requests at once, so you end up with the server carefully
|
|
|
|
|
# working through the backlog of your requests, which you have already timed
|
|
|
|
|
# out.
|
|
|
|
|
#
|
|
|
|
|
# - for this request in particular, we now (as of
|
|
|
|
|
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
|
|
|
|
|
# server can't produce a plausible-looking set of prev_events - so we becone
|
|
|
|
|
# much more likely to reject the event.
|
|
|
|
|
#
|
|
|
|
|
# - contrary to what it says above, we do *not* fall back to fetching fresh state
|
|
|
|
|
# for the room if get_missing_events times out. Rather, we give up processing
|
|
|
|
|
# the PDU whose prevs we are missing, which then makes it much more likely that
|
|
|
|
|
# we'll end up back here for the *next* PDU in the list, which exacerbates the
|
|
|
|
|
# problem.
|
|
|
|
|
#
|
|
|
|
|
# - the aggressive 10s timeout was introduced to deal with incoming federation
|
|
|
|
|
# requests taking 8 hours to process. It's not entirely clear why that was going
|
|
|
|
|
# on; certainly there were other issues causing traffic storms which are now
|
|
|
|
|
# resolved, and I think in any case we may be more sensible about our locking
|
|
|
|
|
# now. We're *certainly* more sensible about our logging.
|
|
|
|
|
#
|
|
|
|
|
# All that said: Let's try increasing the timeout to 60s and see what happens.
|
|
|
|
|
|
|
|
|
|
try:
|
2021-09-07 12:15:51 +02:00
|
|
|
|
missing_events = await self._federation_client.get_missing_events(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
origin,
|
|
|
|
|
room_id,
|
|
|
|
|
earliest_events_ids=list(latest),
|
|
|
|
|
latest_events=[pdu],
|
|
|
|
|
limit=10,
|
|
|
|
|
min_depth=min_depth,
|
|
|
|
|
timeout=60000,
|
|
|
|
|
)
|
|
|
|
|
except (RequestSendFailed, HttpResponseException, NotRetryingDestination) as e:
|
|
|
|
|
# We failed to get the missing events, but since we need to handle
|
|
|
|
|
# the case of `get_missing_events` not returning the necessary
|
|
|
|
|
# events anyway, it is safe to simply log the error and continue.
|
|
|
|
|
logger.warning("Failed to get prev_events: %s", e)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
logger.info("Got %d prev_events", len(missing_events))
|
|
|
|
|
await self._process_pulled_events(origin, missing_events, backfilled=False)
|
|
|
|
|
|
2022-08-03 17:57:38 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _process_pulled_events(
|
2022-08-16 19:39:40 +02:00
|
|
|
|
self, origin: str, events: Collection[EventBase], backfilled: bool
|
2021-08-26 22:41:44 +02:00
|
|
|
|
) -> None:
|
|
|
|
|
"""Process a batch of events we have pulled from a remote server
|
|
|
|
|
|
|
|
|
|
Pulls in any events required to auth the events, persists the received events,
|
|
|
|
|
and notifies clients, if appropriate.
|
|
|
|
|
|
|
|
|
|
Assumes the events have already had their signatures and hashes checked.
|
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
origin: The server we received these events from
|
|
|
|
|
events: The received events.
|
|
|
|
|
backfilled: True if this is part of a historical batch of events (inhibits
|
|
|
|
|
notification to clients, and validation of device keys.)
|
|
|
|
|
"""
|
2022-08-16 19:39:40 +02:00
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids",
|
|
|
|
|
str([event.event_id for event in events]),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
|
|
|
|
|
str(len(events)),
|
|
|
|
|
)
|
|
|
|
|
set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled))
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 22:54:13 +01:00
|
|
|
|
logger.debug(
|
|
|
|
|
"processing pulled backfilled=%s events=%s",
|
|
|
|
|
backfilled,
|
|
|
|
|
[
|
|
|
|
|
"event_id=%s,depth=%d,body=%s,prevs=%s\n"
|
|
|
|
|
% (
|
|
|
|
|
event.event_id,
|
|
|
|
|
event.depth,
|
|
|
|
|
event.content.get("body", event.type),
|
|
|
|
|
event.prev_event_ids(),
|
|
|
|
|
)
|
|
|
|
|
for event in events
|
|
|
|
|
],
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-10-18 11:33:35 +02:00
|
|
|
|
# Check if we already any of these have these events.
|
|
|
|
|
# Note: we currently make a lookup in the database directly here rather than
|
|
|
|
|
# checking the event cache, due to:
|
|
|
|
|
# https://github.com/matrix-org/synapse/issues/13476
|
|
|
|
|
existing_events_map = await self._store._get_events_from_db(
|
|
|
|
|
[event.event_id for event in events]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
new_events = []
|
|
|
|
|
for event in events:
|
|
|
|
|
event_id = event.event_id
|
|
|
|
|
|
|
|
|
|
# If we've already seen this event ID...
|
|
|
|
|
if event_id in existing_events_map:
|
|
|
|
|
existing_event = existing_events_map[event_id]
|
|
|
|
|
|
|
|
|
|
# ...and the event itself was not previously stored as an outlier...
|
|
|
|
|
if not existing_event.event.internal_metadata.is_outlier():
|
|
|
|
|
# ...then there's no need to persist it. We have it already.
|
|
|
|
|
logger.info(
|
|
|
|
|
"_process_pulled_event: Ignoring received event %s which we "
|
|
|
|
|
"have already seen",
|
|
|
|
|
event.event_id,
|
|
|
|
|
)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# While we have seen this event before, it was stored as an outlier.
|
|
|
|
|
# We'll now persist it as a non-outlier.
|
|
|
|
|
logger.info("De-outliering event %s", event_id)
|
|
|
|
|
|
|
|
|
|
# Continue on with the events that are new to us.
|
|
|
|
|
new_events.append(event)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
# We want to sort these by depth so we process them and
|
|
|
|
|
# tell clients about them in order.
|
2022-10-18 11:33:35 +02:00
|
|
|
|
sorted_events = sorted(new_events, key=lambda x: x.depth)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
for ev in sorted_events:
|
|
|
|
|
with nested_logging_context(ev.event_id):
|
|
|
|
|
await self._process_pulled_event(origin, ev, backfilled=backfilled)
|
|
|
|
|
|
2022-08-03 17:57:38 +02:00
|
|
|
|
@trace
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@tag_args
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _process_pulled_event(
|
|
|
|
|
self, origin: str, event: EventBase, backfilled: bool
|
|
|
|
|
) -> None:
|
|
|
|
|
"""Process a single event that we have pulled from a remote server
|
|
|
|
|
|
|
|
|
|
Pulls in any events required to auth the event, persists the received event,
|
|
|
|
|
and notifies clients, if appropriate.
|
|
|
|
|
|
|
|
|
|
Assumes the event has already had its signatures and hashes checked.
|
|
|
|
|
|
|
|
|
|
This is somewhat equivalent to on_receive_pdu, but applies somewhat different
|
|
|
|
|
logic in the case that we are missing prev_events (in particular, it just
|
|
|
|
|
requests the state at that point, rather than triggering a get_missing_events) -
|
|
|
|
|
so is appropriate when we have pulled the event from a remote server, rather
|
|
|
|
|
than having it pushed to us.
|
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
origin: The server we received this event from
|
|
|
|
|
events: The received event
|
|
|
|
|
backfilled: True if this is part of a historical batch of events (inhibits
|
|
|
|
|
notification to clients, and validation of device keys.)
|
|
|
|
|
"""
|
|
|
|
|
logger.info("Processing pulled event %s", event)
|
|
|
|
|
|
2022-07-20 22:58:51 +02:00
|
|
|
|
# This function should not be used to persist outliers (use something
|
|
|
|
|
# else) because this does a bunch of operations that aren't necessary
|
|
|
|
|
# (extra work; in particular, it makes sure we have all the prev_events
|
|
|
|
|
# and resolves the state across those prev events). If you happen to run
|
|
|
|
|
# into a situation where the event you're trying to process/backfill is
|
|
|
|
|
# marked as an `outlier`, then you should update that spot to return an
|
|
|
|
|
# `EventBase` copy that doesn't have `outlier` flag set.
|
|
|
|
|
#
|
|
|
|
|
# `EventBase` is used to represent both an event we have not yet
|
|
|
|
|
# persisted, and one that we have persisted and now keep in the cache.
|
|
|
|
|
# In an ideal world this method would only be called with the first type
|
|
|
|
|
# of event, but it turns out that's not actually the case and for
|
|
|
|
|
# example, you could get an event from cache that is marked as an
|
|
|
|
|
# `outlier` (fix up that spot though).
|
|
|
|
|
assert not event.internal_metadata.is_outlier(), (
|
|
|
|
|
"Outlier event passed to _process_pulled_event. "
|
|
|
|
|
"To persist an event as a non-outlier, make sure to pass in a copy without `event.internal_metadata.outlier = true`."
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
event_id = event.event_id
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
self._sanity_check_event(event)
|
|
|
|
|
except SynapseError as err:
|
|
|
|
|
logger.warning("Event %s failed sanity check: %s", event_id, err)
|
2022-09-14 20:57:50 +02:00
|
|
|
|
await self._store.record_event_failed_pull_attempt(
|
|
|
|
|
event.room_id, event_id, str(err)
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
try:
|
2022-07-26 13:39:23 +02:00
|
|
|
|
try:
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context = await self._compute_event_context_with_maybe_missing_prevs(
|
2022-07-26 13:39:23 +02:00
|
|
|
|
origin, event
|
|
|
|
|
)
|
|
|
|
|
await self._process_received_pdu(
|
|
|
|
|
origin,
|
|
|
|
|
event,
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context,
|
2022-07-26 13:39:23 +02:00
|
|
|
|
backfilled=backfilled,
|
|
|
|
|
)
|
|
|
|
|
except PartialStateConflictError:
|
|
|
|
|
# The room was un-partial stated while we were processing the event.
|
|
|
|
|
# Try once more, with full state this time.
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context = await self._compute_event_context_with_maybe_missing_prevs(
|
2022-07-26 13:39:23 +02:00
|
|
|
|
origin, event
|
|
|
|
|
)
|
2022-05-26 11:48:12 +02:00
|
|
|
|
|
2022-07-26 13:39:23 +02:00
|
|
|
|
# We ought to have full state now, barring some unlikely race where we left and
|
|
|
|
|
# rejoned the room in the background.
|
2022-08-01 14:53:56 +02:00
|
|
|
|
if context.partial_state:
|
2022-07-26 13:39:23 +02:00
|
|
|
|
raise AssertionError(
|
|
|
|
|
f"Event {event.event_id} still has a partial resolved state "
|
|
|
|
|
f"after room {event.room_id} was un-partial stated"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
await self._process_received_pdu(
|
|
|
|
|
origin,
|
|
|
|
|
event,
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context,
|
2022-07-26 13:39:23 +02:00
|
|
|
|
backfilled=backfilled,
|
|
|
|
|
)
|
2022-10-15 07:36:49 +02:00
|
|
|
|
except FederationPullAttemptBackoffError as exc:
|
|
|
|
|
# Log a warning about why we failed to process the event (the error message
|
|
|
|
|
# for `FederationPullAttemptBackoffError` is pretty good)
|
|
|
|
|
logger.warning("_process_pulled_event: %s", exc)
|
|
|
|
|
# We do not record a failed pull attempt when we backoff fetching a missing
|
|
|
|
|
# `prev_event` because not being able to fetch the `prev_events` just means
|
|
|
|
|
# we won't be able to de-outlier the pulled event. But we can still use an
|
|
|
|
|
# `outlier` in the state/auth chain for another event. So we shouldn't stop
|
|
|
|
|
# a downstream event from trying to pull it.
|
|
|
|
|
#
|
|
|
|
|
# This avoids a cascade of backoff for all events in the DAG downstream from
|
|
|
|
|
# one event backoff upstream.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
except FederationError as e:
|
2022-09-14 20:57:50 +02:00
|
|
|
|
await self._store.record_event_failed_pull_attempt(
|
|
|
|
|
event.room_id, event_id, str(e)
|
|
|
|
|
)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if e.code == 403:
|
|
|
|
|
logger.warning("Pulled event %s failed history check.", event_id)
|
|
|
|
|
else:
|
|
|
|
|
raise
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2022-08-01 14:53:56 +02:00
|
|
|
|
async def _compute_event_context_with_maybe_missing_prevs(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self, dest: str, event: EventBase
|
2022-08-01 14:53:56 +02:00
|
|
|
|
) -> EventContext:
|
|
|
|
|
"""Build an EventContext structure for a non-outlier event whose prev_events may
|
|
|
|
|
be missing.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-01 14:53:56 +02:00
|
|
|
|
This is used when we have pulled a batch of events from a remote server, and may
|
|
|
|
|
not have all the prev_events.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-01 14:53:56 +02:00
|
|
|
|
To build an EventContext, we need to calculate the state before the event. If we
|
|
|
|
|
already have all the prev_events for `event`, we can simply use the state after
|
|
|
|
|
the prev_events to calculate the state before `event`.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
Otherwise, the missing prevs become new backwards extremities, and we fall back
|
|
|
|
|
to asking the remote server for the state after each missing `prev_event`,
|
|
|
|
|
and resolving across them.
|
|
|
|
|
|
|
|
|
|
That's ok provided we then resolve the state against other bits of the DAG
|
|
|
|
|
before using it - in other words, that the received event `event` is not going
|
|
|
|
|
to become the only forwards_extremity in the room (which will ensure that you
|
|
|
|
|
can't just take over a room by sending an event, withholding its prev_events,
|
|
|
|
|
and declaring yourself to be an admin in the subsequent state request).
|
|
|
|
|
|
|
|
|
|
In other words: we should only call this method if `event` has been *pulled*
|
|
|
|
|
as part of a batch of missing prev events, or similar.
|
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
dest: the remote server to ask for state at the missing prevs. Typically,
|
|
|
|
|
this will be the server we got `event` from.
|
|
|
|
|
event: an event to check for missing prevs.
|
|
|
|
|
|
|
|
|
|
Returns:
|
2022-08-01 14:53:56 +02:00
|
|
|
|
The event context.
|
2022-05-31 16:50:29 +02:00
|
|
|
|
|
|
|
|
|
Raises:
|
2022-10-15 07:36:49 +02:00
|
|
|
|
FederationPullAttemptBackoffError if we are are deliberately not attempting
|
2023-03-30 14:36:41 +02:00
|
|
|
|
to pull one of the given event's `prev_event`s over federation because
|
|
|
|
|
we've already done so recently and are backing off.
|
2022-05-31 16:50:29 +02:00
|
|
|
|
FederationError if we fail to get the state from the remote server after any
|
|
|
|
|
missing `prev_event`s.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
|
|
|
|
room_id = event.room_id
|
|
|
|
|
event_id = event.event_id
|
|
|
|
|
|
|
|
|
|
prevs = set(event.prev_event_ids())
|
2021-09-07 12:15:51 +02:00
|
|
|
|
seen = await self._store.have_events_in_timeline(prevs)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
missing_prevs = prevs - seen
|
|
|
|
|
|
2022-10-15 07:36:49 +02:00
|
|
|
|
# If we've already recently attempted to pull this missing event, don't
|
|
|
|
|
# try it again so soon. Since we have to fetch all of the prev_events, we can
|
|
|
|
|
# bail early here if we find any to ignore.
|
2023-03-30 14:36:41 +02:00
|
|
|
|
prevs_with_pull_backoff = (
|
|
|
|
|
await self._store.get_event_ids_to_not_pull_from_backoff(
|
|
|
|
|
room_id, missing_prevs
|
|
|
|
|
)
|
2022-10-15 07:36:49 +02:00
|
|
|
|
)
|
2023-03-30 14:36:41 +02:00
|
|
|
|
if len(prevs_with_pull_backoff) > 0:
|
2022-10-15 07:36:49 +02:00
|
|
|
|
raise FederationPullAttemptBackoffError(
|
2023-03-30 14:36:41 +02:00
|
|
|
|
event_ids=prevs_with_pull_backoff.keys(),
|
|
|
|
|
message=(
|
|
|
|
|
f"While computing context for event={event_id}, not attempting to "
|
|
|
|
|
f"pull missing prev_events={list(prevs_with_pull_backoff.keys())} "
|
|
|
|
|
"because we already tried to pull recently (backing off)."
|
|
|
|
|
),
|
|
|
|
|
retry_after_ms=(
|
|
|
|
|
max(prevs_with_pull_backoff.values()) - self._clock.time_msec()
|
|
|
|
|
),
|
2022-10-15 07:36:49 +02:00
|
|
|
|
)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if not missing_prevs:
|
2022-08-01 14:53:56 +02:00
|
|
|
|
return await self._state_handler.compute_event_context(event)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
"Event %s is missing prev_events %s: calculating state for a "
|
|
|
|
|
"backwards extremity",
|
|
|
|
|
event_id,
|
|
|
|
|
shortstr(missing_prevs),
|
|
|
|
|
)
|
|
|
|
|
# Calculate the state after each of the previous events, and
|
|
|
|
|
# resolve them to find the correct state at the current event.
|
2022-05-26 11:48:12 +02:00
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
try:
|
2022-07-26 13:39:23 +02:00
|
|
|
|
# Determine whether we may be about to retrieve partial state
|
|
|
|
|
# Events may be un-partial stated right after we compute the partial state
|
|
|
|
|
# flag, but that's okay, as long as the flag errs on the conservative side.
|
|
|
|
|
partial_state_flags = await self._store.get_partial_state_events(seen)
|
|
|
|
|
partial_state = any(partial_state_flags.values())
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
# Get the state of the events we know about
|
2022-05-31 14:17:50 +02:00
|
|
|
|
ours = await self._state_storage_controller.get_state_groups_ids(
|
2022-07-26 13:39:23 +02:00
|
|
|
|
room_id, seen, await_full_state=False
|
2022-05-31 14:17:50 +02:00
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# state_maps is a list of mappings from (type, state_key) to event_id
|
|
|
|
|
state_maps: List[StateMap[str]] = list(ours.values())
|
|
|
|
|
|
|
|
|
|
# we don't need this any more, let's delete it.
|
|
|
|
|
del ours
|
|
|
|
|
|
|
|
|
|
# Ask the remote server for the states we don't
|
|
|
|
|
# know about
|
|
|
|
|
for p in missing_prevs:
|
|
|
|
|
logger.info("Requesting state after missing prev_event %s", p)
|
|
|
|
|
|
|
|
|
|
with nested_logging_context(p):
|
|
|
|
|
# note that if any of the missing prevs share missing state or
|
|
|
|
|
# auth events, the requests to fetch those events are deduped
|
|
|
|
|
# by the get_pdu_cache in federation_client.
|
2022-05-26 11:48:12 +02:00
|
|
|
|
remote_state_map = (
|
|
|
|
|
await self._get_state_ids_after_missing_prev_event(
|
|
|
|
|
dest, room_id, p
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
state_maps.append(remote_state_map)
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
room_version = await self._store.get_room_version_id(room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
state_map = await self._state_resolution_handler.resolve_events_with_store(
|
|
|
|
|
room_id,
|
|
|
|
|
room_version,
|
|
|
|
|
state_maps,
|
2022-05-26 11:48:12 +02:00
|
|
|
|
event_map={event_id: event},
|
2021-09-07 12:15:51 +02:00
|
|
|
|
state_res_store=StateResolutionStore(self._store),
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
2022-11-10 13:17:46 +01:00
|
|
|
|
except Exception as e:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
logger.warning(
|
2022-11-10 13:17:46 +01:00
|
|
|
|
"Error attempting to resolve state at missing prev_events: %s", e
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
raise FederationError(
|
|
|
|
|
"ERROR",
|
|
|
|
|
403,
|
|
|
|
|
"We can't get valid state history.",
|
|
|
|
|
affected=event_id,
|
|
|
|
|
)
|
2022-08-01 14:53:56 +02:00
|
|
|
|
return await self._state_handler.compute_event_context(
|
|
|
|
|
event, state_ids_before_event=state_map, partial_state=partial_state
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
|
|
|
|
@tag_args
|
2022-05-26 11:48:12 +02:00
|
|
|
|
async def _get_state_ids_after_missing_prev_event(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
self,
|
|
|
|
|
destination: str,
|
|
|
|
|
room_id: str,
|
|
|
|
|
event_id: str,
|
2022-05-26 11:48:12 +02:00
|
|
|
|
) -> StateMap[str]:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""Requests all of the room state at a given event from a remote homeserver.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
destination: The remote homeserver to query for the state.
|
|
|
|
|
room_id: The id of the room we're interested in.
|
|
|
|
|
event_id: The id of the event we want the state at.
|
|
|
|
|
|
|
|
|
|
Returns:
|
2022-05-26 11:48:12 +02:00
|
|
|
|
The event ids of the state *after* the given event.
|
2022-05-31 16:50:29 +02:00
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
InvalidResponseError: if the remote homeserver's response contains fields
|
|
|
|
|
of the wrong type.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
2022-08-25 01:59:27 +02:00
|
|
|
|
|
|
|
|
|
# It would be better if we could query the difference from our known
|
|
|
|
|
# state to the given `event_id` so the sending server doesn't have to
|
|
|
|
|
# send as much and we don't have to process as many events. For example
|
|
|
|
|
# in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k
|
|
|
|
|
# auth_events) from this call.
|
|
|
|
|
#
|
|
|
|
|
# Tracked by https://github.com/matrix-org/synapse/issues/13618
|
2021-08-26 22:41:44 +02:00
|
|
|
|
(
|
|
|
|
|
state_event_ids,
|
|
|
|
|
auth_event_ids,
|
2021-09-07 12:15:51 +02:00
|
|
|
|
) = await self._federation_client.get_room_state_ids(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
destination, room_id, event_id=event_id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
|
"state_ids returned %i state events, %i auth events",
|
|
|
|
|
len(state_event_ids),
|
|
|
|
|
len(auth_event_ids),
|
|
|
|
|
)
|
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
# Start by checking events we already have in the DB
|
2021-08-26 22:41:44 +02:00
|
|
|
|
desired_events = set(state_event_ids)
|
|
|
|
|
desired_events.add(event_id)
|
|
|
|
|
logger.debug("Fetching %i events from cache/store", len(desired_events))
|
2022-05-26 11:48:12 +02:00
|
|
|
|
have_events = await self._store.have_seen_events(room_id, desired_events)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
missing_desired_event_ids = desired_events - have_events
|
2021-08-26 22:41:44 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
"We are missing %i events (got %i)",
|
2022-08-16 19:39:40 +02:00
|
|
|
|
len(missing_desired_event_ids),
|
2022-05-26 11:48:12 +02:00
|
|
|
|
len(have_events),
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# We probably won't need most of the auth events, so let's just check which
|
|
|
|
|
# we have for now, rather than thrashing the event cache with them all
|
|
|
|
|
# unnecessarily.
|
|
|
|
|
|
|
|
|
|
# TODO: we probably won't actually need all of the auth events, since we
|
|
|
|
|
# already have a bunch of the state events. It would be nice if the
|
|
|
|
|
# federation api gave us a way of finding out which we actually need.
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
missing_auth_event_ids = set(auth_event_ids) - have_events
|
|
|
|
|
missing_auth_event_ids.difference_update(
|
|
|
|
|
await self._store.have_seen_events(room_id, missing_auth_event_ids)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
2022-08-16 19:39:40 +02:00
|
|
|
|
logger.debug("We are also missing %i auth events", len(missing_auth_event_ids))
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
missing_event_ids = missing_desired_event_ids | missing_auth_event_ids
|
|
|
|
|
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "missing_auth_event_ids",
|
|
|
|
|
str(missing_auth_event_ids),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "missing_auth_event_ids.length",
|
|
|
|
|
str(len(missing_auth_event_ids)),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "missing_desired_event_ids",
|
|
|
|
|
str(missing_desired_event_ids),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "missing_desired_event_ids.length",
|
|
|
|
|
str(len(missing_desired_event_ids)),
|
|
|
|
|
)
|
2022-04-01 13:53:42 +02:00
|
|
|
|
|
|
|
|
|
# Making an individual request for each of 1000s of events has a lot of
|
|
|
|
|
# overhead. On the other hand, we don't really want to fetch all of the events
|
|
|
|
|
# if we already have most of them.
|
|
|
|
|
#
|
|
|
|
|
# As an arbitrary heuristic, if we are missing more than 10% of the events, then
|
|
|
|
|
# we fetch the whole state.
|
|
|
|
|
#
|
|
|
|
|
# TODO: might it be better to have an API which lets us do an aggregate event
|
|
|
|
|
# request
|
2022-08-16 19:39:40 +02:00
|
|
|
|
if (len(missing_event_ids) * 10) >= len(auth_event_ids) + len(state_event_ids):
|
2022-04-01 13:53:42 +02:00
|
|
|
|
logger.debug("Requesting complete state from remote")
|
|
|
|
|
await self._get_state_and_persist(destination, room_id, event_id)
|
|
|
|
|
else:
|
2022-08-16 19:39:40 +02:00
|
|
|
|
logger.debug("Fetching %i events from remote", len(missing_event_ids))
|
2022-04-01 13:53:42 +02:00
|
|
|
|
await self._get_events_and_persist(
|
2022-08-16 19:39:40 +02:00
|
|
|
|
destination=destination, room_id=room_id, event_ids=missing_event_ids
|
2022-04-01 13:53:42 +02:00
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
# We now need to fill out the state map, which involves fetching the
|
|
|
|
|
# type and state key for each event ID in the state.
|
|
|
|
|
state_map = {}
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
event_metadata = await self._store.get_metadata_for_events(state_event_ids)
|
|
|
|
|
for state_event_id, metadata in event_metadata.items():
|
|
|
|
|
if metadata.room_id != room_id:
|
|
|
|
|
# This is a bogus situation, but since we may only discover it a long time
|
|
|
|
|
# after it happened, we try our best to carry on, by just omitting the
|
|
|
|
|
# bad events from the returned state set.
|
|
|
|
|
#
|
|
|
|
|
# This can happen if a remote server claims that the state or
|
|
|
|
|
# auth_events at an event in room A are actually events in room B
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Remote server %s claims event %s in room %s is an auth/state "
|
|
|
|
|
"event in room %s",
|
|
|
|
|
destination,
|
|
|
|
|
state_event_id,
|
|
|
|
|
metadata.room_id,
|
|
|
|
|
room_id,
|
|
|
|
|
)
|
|
|
|
|
continue
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
if metadata.state_key is None:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Remote server gave us non-state event in state: %s", state_event_id
|
|
|
|
|
)
|
|
|
|
|
continue
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
state_map[(metadata.event_type, metadata.state_key)] = state_event_id
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# if we couldn't get the prev event in question, that's a problem.
|
2022-05-26 11:48:12 +02:00
|
|
|
|
remote_event = await self._store.get_event(
|
|
|
|
|
event_id,
|
|
|
|
|
allow_none=True,
|
|
|
|
|
allow_rejected=True,
|
|
|
|
|
redact_behaviour=EventRedactBehaviour.as_is,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if not remote_event:
|
|
|
|
|
raise Exception("Unable to get missing prev_event %s" % (event_id,))
|
|
|
|
|
|
|
|
|
|
# missing state at that event is a warning, not a blocker
|
|
|
|
|
# XXX: this doesn't sound right? it means that we'll end up with incomplete
|
|
|
|
|
# state.
|
2022-05-26 11:48:12 +02:00
|
|
|
|
failed_to_fetch = desired_events - event_metadata.keys()
|
2022-07-19 20:15:54 +02:00
|
|
|
|
# `event_id` could be missing from `event_metadata` because it's not necessarily
|
|
|
|
|
# a state event. We've already checked that we've fetched it above.
|
|
|
|
|
failed_to_fetch.discard(event_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if failed_to_fetch:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Failed to fetch missing state events for %s %s",
|
|
|
|
|
event_id,
|
|
|
|
|
failed_to_fetch,
|
|
|
|
|
)
|
2022-08-16 19:39:40 +02:00
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "failed_to_fetch",
|
|
|
|
|
str(failed_to_fetch),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "failed_to_fetch.length",
|
|
|
|
|
str(len(failed_to_fetch)),
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
if remote_event.is_state() and remote_event.rejected_reason is None:
|
2022-05-26 11:48:12 +02:00
|
|
|
|
state_map[
|
|
|
|
|
(remote_event.type, remote_event.state_key)
|
|
|
|
|
] = remote_event.event_id
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-05-26 11:48:12 +02:00
|
|
|
|
return state_map
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
|
|
|
|
@tag_args
|
2022-04-01 13:53:42 +02:00
|
|
|
|
async def _get_state_and_persist(
|
|
|
|
|
self, destination: str, room_id: str, event_id: str
|
|
|
|
|
) -> None:
|
|
|
|
|
"""Get the complete room state at a given event, and persist any new events
|
|
|
|
|
as outliers"""
|
|
|
|
|
room_version = await self._store.get_room_version(room_id)
|
|
|
|
|
auth_events, state_events = await self._federation_client.get_room_state(
|
|
|
|
|
destination, room_id, event_id=event_id, room_version=room_version
|
|
|
|
|
)
|
|
|
|
|
logger.info("/state returned %i events", len(auth_events) + len(state_events))
|
|
|
|
|
|
|
|
|
|
await self._auth_and_persist_outliers(
|
|
|
|
|
room_id, itertools.chain(auth_events, state_events)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# we also need the event itself.
|
|
|
|
|
if not await self._store.have_seen_event(room_id, event_id):
|
|
|
|
|
await self._get_events_and_persist(
|
|
|
|
|
destination=destination, room_id=room_id, event_ids=(event_id,)
|
|
|
|
|
)
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _process_received_pdu(
|
|
|
|
|
self,
|
|
|
|
|
origin: str,
|
|
|
|
|
event: EventBase,
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context: EventContext,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
backfilled: bool = False,
|
|
|
|
|
) -> None:
|
2021-09-08 11:41:13 +02:00
|
|
|
|
"""Called when we have a new non-outlier event.
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
This is called when we have a new event to add to the room DAG. This can be
|
|
|
|
|
due to:
|
|
|
|
|
* events received directly via a /send request
|
|
|
|
|
* events retrieved via get_missing_events after a /send request
|
|
|
|
|
* events backfilled after a client request.
|
|
|
|
|
|
|
|
|
|
It's not currently used for events received from incoming send_{join,knock,leave}
|
|
|
|
|
requests (which go via on_send_membership_event), nor for joins created by a
|
|
|
|
|
remote join dance (which go via process_remote_join).
|
2021-09-08 11:41:13 +02:00
|
|
|
|
|
|
|
|
|
We need to do auth checks and put it through the StateHandler.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
origin: server sending the event
|
|
|
|
|
|
|
|
|
|
event: event to be persisted
|
|
|
|
|
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context: The `EventContext` to persist the event with.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
backfilled: True if this is part of a historical batch of events (inhibits
|
|
|
|
|
notification to clients, and validation of device keys.)
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
|
|
|
|
|
PartialStateConflictError: if the room was un-partial stated in between
|
2022-08-01 14:53:56 +02:00
|
|
|
|
computing the state at the event and persisting it. The caller should
|
|
|
|
|
recompute `context` and retry exactly once when this happens.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
|
|
|
|
logger.debug("Processing event: %s", event)
|
2021-09-08 11:41:13 +02:00
|
|
|
|
assert not event.internal_metadata.outlier
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
try:
|
2022-07-14 23:52:26 +02:00
|
|
|
|
await self._check_event_auth(origin, event, context)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
except AuthError as e:
|
2022-07-01 11:52:10 +02:00
|
|
|
|
# This happens only if we couldn't find the auth events. We'll already have
|
|
|
|
|
# logged a warning, so now we just convert to a FederationError.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
if not backfilled and not context.rejected:
|
|
|
|
|
# For new (non-backfilled and non-outlier) events we check if the event
|
|
|
|
|
# passes auth based on the current state. If it doesn't then we
|
|
|
|
|
# "soft-fail" the event.
|
2022-08-01 14:53:56 +02:00
|
|
|
|
await self._check_for_soft_fail(event, context=context, origin=origin)
|
2021-10-25 16:21:09 +02:00
|
|
|
|
|
2021-09-08 20:03:08 +02:00
|
|
|
|
await self._run_push_actions_and_persist_event(event, context, backfilled)
|
|
|
|
|
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 22:54:13 +01:00
|
|
|
|
await self._handle_marker_event(origin, event)
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
if backfilled or context.rejected:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
return
|
|
|
|
|
|
2021-10-25 16:21:09 +02:00
|
|
|
|
await self._maybe_kick_guest_users(event)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
# For encrypted messages we check that we know about the sending device,
|
|
|
|
|
# if we don't then we mark the device cache for that user as stale.
|
|
|
|
|
if event.type == EventTypes.Encrypted:
|
|
|
|
|
device_id = event.content.get("device_id")
|
|
|
|
|
sender_key = event.content.get("sender_key")
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
cached_devices = await self._store.get_cached_devices_for_user(event.sender)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
resync = False # Whether we should resync device lists.
|
|
|
|
|
|
|
|
|
|
device = None
|
|
|
|
|
if device_id is not None:
|
|
|
|
|
device = cached_devices.get(device_id)
|
|
|
|
|
if device is None:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Received event from remote device not in our cache: %s %s",
|
|
|
|
|
event.sender,
|
|
|
|
|
device_id,
|
|
|
|
|
)
|
|
|
|
|
resync = True
|
|
|
|
|
|
|
|
|
|
# We also check if the `sender_key` matches what we expect.
|
|
|
|
|
if sender_key is not None:
|
|
|
|
|
# Figure out what sender key we're expecting. If we know the
|
|
|
|
|
# device and recognize the algorithm then we can work out the
|
|
|
|
|
# exact key to expect. Otherwise check it matches any key we
|
|
|
|
|
# have for that device.
|
|
|
|
|
|
|
|
|
|
current_keys: Container[str] = []
|
|
|
|
|
|
|
|
|
|
if device:
|
|
|
|
|
keys = device.get("keys", {}).get("keys", {})
|
|
|
|
|
|
|
|
|
|
if (
|
|
|
|
|
event.content.get("algorithm")
|
|
|
|
|
== RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2
|
|
|
|
|
):
|
|
|
|
|
# For this algorithm we expect a curve25519 key.
|
|
|
|
|
key_name = "curve25519:%s" % (device_id,)
|
|
|
|
|
current_keys = [keys.get(key_name)]
|
|
|
|
|
else:
|
|
|
|
|
# We don't know understand the algorithm, so we just
|
|
|
|
|
# check it matches a key for the device.
|
|
|
|
|
current_keys = keys.values()
|
|
|
|
|
elif device_id:
|
|
|
|
|
# We don't have any keys for the device ID.
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
# The event didn't include a device ID, so we just look for
|
|
|
|
|
# keys across all devices.
|
|
|
|
|
current_keys = [
|
|
|
|
|
key
|
|
|
|
|
for device in cached_devices.values()
|
|
|
|
|
for key in device.get("keys", {}).get("keys", {}).values()
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
# We now check that the sender key matches (one of) the expected
|
|
|
|
|
# keys.
|
|
|
|
|
if sender_key not in current_keys:
|
|
|
|
|
logger.info(
|
|
|
|
|
"Received event from remote device with unexpected sender key: %s %s: %s",
|
|
|
|
|
event.sender,
|
|
|
|
|
device_id or "<no device_id>",
|
|
|
|
|
sender_key,
|
|
|
|
|
)
|
|
|
|
|
resync = True
|
|
|
|
|
|
|
|
|
|
if resync:
|
|
|
|
|
run_as_background_process(
|
|
|
|
|
"resync_device_due_to_pdu",
|
|
|
|
|
self._resync_device,
|
|
|
|
|
event.sender,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def _resync_device(self, sender: str) -> None:
|
|
|
|
|
"""We have detected that the device list for the given user may be out
|
|
|
|
|
of sync, so we try and resync them.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
try:
|
2023-01-10 12:17:59 +01:00
|
|
|
|
await self._store.mark_remote_users_device_caches_as_stale((sender,))
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# Immediately attempt a resync in the background
|
2021-09-13 19:07:12 +02:00
|
|
|
|
if self._config.worker.worker_app:
|
2023-04-21 13:06:39 +02:00
|
|
|
|
await self._multi_user_device_resync(user_ids=[sender])
|
2021-08-26 22:41:44 +02:00
|
|
|
|
else:
|
2023-04-21 13:06:39 +02:00
|
|
|
|
await self._device_list_updater.multi_user_device_resync(
|
|
|
|
|
user_ids=[sender]
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Failed to resync device for %s", sender)
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-09-20 14:56:23 +02:00
|
|
|
|
async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""Handles backfilling the insertion event when we receive a marker
|
|
|
|
|
event that points to one.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
origin: Origin of the event. Will be called to get the insertion event
|
|
|
|
|
marker_event: The event to process
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
if marker_event.type != EventTypes.MSC2716_MARKER:
|
|
|
|
|
# Not a marker event
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if marker_event.rejected_reason is not None:
|
|
|
|
|
# Rejected event
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Skip processing a marker event if the room version doesn't
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 07:58:49 +02:00
|
|
|
|
# support it or the event is not from the room creator.
|
2021-09-07 12:15:51 +02:00
|
|
|
|
room_version = await self._store.get_room_version(marker_event.room_id)
|
|
|
|
|
create_event = await self._store.get_create_event_for_room(marker_event.room_id)
|
2023-04-06 22:26:28 +02:00
|
|
|
|
if not room_version.msc2175_implicit_room_creator:
|
|
|
|
|
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
|
|
|
|
|
else:
|
|
|
|
|
room_creator = create_event.sender
|
2021-10-05 18:51:57 +02:00
|
|
|
|
if not room_version.msc2716_historical and (
|
|
|
|
|
not self._config.experimental.msc2716_enabled
|
Allow room creator to send MSC2716 related events in existing room versions (#10566)
* Allow room creator to send MSC2716 related events in existing room versions
Discussed at https://github.com/matrix-org/matrix-doc/pull/2716/#discussion_r682474869
Restoring `get_create_event_for_room_txn` from,
https://github.com/matrix-org/synapse/pull/10245/commits/44bb3f0cf5cb365ef9281554daceeecfb17cc94d
* Add changelog
* Stop people from trying to redact MSC2716 events in unsupported room versions
* Populate rooms.creator column for easy lookup
> From some [out of band discussion](https://matrix.to/#/!UytJQHLQYfvYWsGrGY:jki.re/$p2fKESoFst038x6pOOmsY0C49S2gLKMr0jhNMz_JJz0?via=jki.re&via=matrix.org), my plan is to use `rooms.creator`. But currently, we don't fill in `creator` for remote rooms when a user is invited to a room for example. So we need to add some code to fill in `creator` wherever we add to the `rooms` table. And also add a background update to fill in the rows missing `creator` (we can use the same logic that `get_create_event_for_room_txn` is doing by looking in the state events to get the `creator`).
>
> https://github.com/matrix-org/synapse/pull/10566#issuecomment-901616642
* Remove and switch away from get_create_event_for_room_txn
* Fix no create event being found because no state events persisted yet
* Fix and add tests for rooms creator bg update
* Populate rooms.creator field for easy lookup
Part of https://github.com/matrix-org/synapse/pull/10566
- Fill in creator whenever we insert into the rooms table
- Add background update to backfill any missing creator values
* Add changelog
* Fix usage
* Remove extra delta already included in #10697
* Don't worry about setting creator for invite
* Only iterate over rows missing the creator
See https://github.com/matrix-org/synapse/pull/10697#discussion_r695940898
* Use constant to fetch room creator field
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696803029
* More protection from other random types
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696806853
* Move new background update to end of list
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696814181
* Fix query casing
* Fix ambiguity iterating over cursor instead of list
Fix `psycopg2.ProgrammingError: no results to fetch` error
when tests run with Postgres.
```
SYNAPSE_POSTGRES=1 SYNAPSE_TEST_LOG_LEVEL=INFO python -m twisted.trial tests.storage.databases.main.test_room
```
---
We use `txn.fetchall` because it will return the results as a
list or an empty list when there are no results.
Docs:
> `cursor` objects are iterable, so, instead of calling explicitly fetchone() in a loop, the object itself can be used:
>
> https://www.psycopg.org/docs/cursor.html#cursor-iterable
And I'm guessing iterating over a raw cursor does something weird when there are no results.
---
Test CI failure: https://github.com/matrix-org/synapse/pull/10697/checks?check_run_id=3468916530
```
tests.test_visibility.FilterEventsForServerTestCase.test_large_room
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/tests/storage/databases/main/test_room.py", line 85, in test_background_populate_rooms_creator_column
self.get_success(
File "/home/runner/work/synapse/synapse/tests/unittest.py", line 500, in get_success
return self.successResultOf(d)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/trial/_synctest.py", line 700, in successResultOf
self.fail(
twisted.trial.unittest.FailTest: Success result expected on <Deferred at 0x7f4022f3eb50 current result: None>, found failure result instead:
Traceback (most recent call last):
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 701, in errback
self._startRunCallbacks(fail)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 764, in _startRunCallbacks
self._runCallbacks()
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1751, in gotResult
current_context.run(_inlineCallbacks, r, gen, status)
--- <exception caught here> ---
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 1657, in _inlineCallbacks
result = current_context.run(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/failure.py", line 500, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 224, in do_next_background_update
await self._do_background_update(desired_duration_ms)
File "/home/runner/work/synapse/synapse/synapse/storage/background_updates.py", line 261, in _do_background_update
items_updated = await update_handler(progress, batch_size)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1399, in _background_populate_rooms_creator_column
end = await self.db_pool.runInteraction(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 686, in runInteraction
result = await self.runWithConnection(
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 791, in runWithConnection
return await make_deferred_yieldable(
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/internet/defer.py", line 858, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/runner/work/synapse/synapse/tests/server.py", line 425, in <lambda>
d.addCallback(lambda x: function(*args, **kwargs))
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 293, in _runWithConnection
compat.reraise(excValue, excTraceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/deprecate.py", line 298, in deprecatedFunction
return function(*args, **kwargs)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/python/compat.py", line 404, in reraise
raise exception.with_traceback(traceback)
File "/home/runner/work/synapse/synapse/.tox/py/lib/python3.9/site-packages/twisted/enterprise/adbapi.py", line 284, in _runWithConnection
result = func(conn, *args, **kw)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 786, in inner_func
return func(db_conn, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/database.py", line 554, in new_transaction
r = func(cursor, *args, **kwargs)
File "/home/runner/work/synapse/synapse/synapse/storage/databases/main/room.py", line 1375, in _background_populate_rooms_creator_column_txn
for room_id, event_json in txn:
psycopg2.ProgrammingError: no results to fetch
```
* Move code not under the MSC2716 room version underneath an experimental config option
See https://github.com/matrix-org/synapse/pull/10566#issuecomment-906437909
* Add ordering to rooms creator background update
See https://github.com/matrix-org/synapse/pull/10697#discussion_r696815277
* Add comment to better document constant
See https://github.com/matrix-org/synapse/pull/10697#discussion_r699674458
* Use constant field
2021-09-04 07:58:49 +02:00
|
|
|
|
or marker_event.sender != room_creator
|
|
|
|
|
):
|
2021-08-26 22:41:44 +02:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
logger.debug("_handle_marker_event: received %s", marker_event)
|
|
|
|
|
|
|
|
|
|
insertion_event_id = marker_event.content.get(
|
2022-08-19 22:37:01 +02:00
|
|
|
|
EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if insertion_event_id is None:
|
|
|
|
|
# Nothing to retrieve then (invalid marker)
|
|
|
|
|
return
|
|
|
|
|
|
2022-05-24 03:43:37 +02:00
|
|
|
|
already_seen_insertion_event = await self._store.have_seen_event(
|
|
|
|
|
marker_event.room_id, insertion_event_id
|
|
|
|
|
)
|
|
|
|
|
if already_seen_insertion_event:
|
|
|
|
|
# No need to process a marker again if we have already seen the
|
|
|
|
|
# insertion event that it was pointing to
|
|
|
|
|
return
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
logger.debug(
|
|
|
|
|
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
await self._get_events_and_persist(
|
|
|
|
|
origin,
|
|
|
|
|
marker_event.room_id,
|
|
|
|
|
[insertion_event_id],
|
|
|
|
|
)
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
insertion_event = await self._store.get_event(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
insertion_event_id, allow_none=True
|
|
|
|
|
)
|
|
|
|
|
if insertion_event is None:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"_handle_marker_event: server %s didn't return insertion event %s for marker %s",
|
|
|
|
|
origin,
|
|
|
|
|
insertion_event_id,
|
|
|
|
|
marker_event.event_id,
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
|
"_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
|
|
|
|
|
insertion_event,
|
|
|
|
|
marker_event,
|
|
|
|
|
)
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
await self._store.insert_insertion_extremity(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
insertion_event_id, marker_event.room_id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
|
"_handle_marker_event: insertion extremity added for %s from marker event %s",
|
|
|
|
|
insertion_event,
|
|
|
|
|
marker_event,
|
|
|
|
|
)
|
|
|
|
|
|
2022-07-22 23:00:11 +02:00
|
|
|
|
async def backfill_event_id(
|
2022-10-26 23:10:55 +02:00
|
|
|
|
self, destinations: List[str], room_id: str, event_id: str
|
|
|
|
|
) -> PulledPduInfo:
|
2022-07-22 23:00:11 +02:00
|
|
|
|
"""Backfill a single event and persist it as a non-outlier which means
|
|
|
|
|
we also pull in all of the state and auth events necessary for it.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
destination: The homeserver to pull the given event_id from.
|
|
|
|
|
room_id: The room where the event is from.
|
|
|
|
|
event_id: The event ID to backfill.
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
FederationError if we are unable to find the event from the destination
|
|
|
|
|
"""
|
2022-10-26 23:10:55 +02:00
|
|
|
|
logger.info("backfill_event_id: event_id=%s", event_id)
|
2022-07-22 23:00:11 +02:00
|
|
|
|
|
|
|
|
|
room_version = await self._store.get_room_version(room_id)
|
|
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
|
pulled_pdu_info = await self._federation_client.get_pdu(
|
|
|
|
|
destinations,
|
2022-07-22 23:00:11 +02:00
|
|
|
|
event_id,
|
|
|
|
|
room_version,
|
|
|
|
|
)
|
|
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
|
if not pulled_pdu_info:
|
2022-07-22 23:00:11 +02:00
|
|
|
|
raise FederationError(
|
|
|
|
|
"ERROR",
|
|
|
|
|
404,
|
2022-10-26 23:10:55 +02:00
|
|
|
|
f"Unable to find event_id={event_id} from remote servers to backfill.",
|
2022-07-22 23:00:11 +02:00
|
|
|
|
affected=event_id,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Persist the event we just fetched, including pulling all of the state
|
|
|
|
|
# and auth events to de-outlier it. This also sets up the necessary
|
|
|
|
|
# `state_groups` for the event.
|
|
|
|
|
await self._process_pulled_events(
|
2022-10-26 23:10:55 +02:00
|
|
|
|
pulled_pdu_info.pull_origin,
|
|
|
|
|
[pulled_pdu_info.pdu],
|
2022-07-22 23:00:11 +02:00
|
|
|
|
# Prevent notifications going to clients
|
|
|
|
|
backfilled=True,
|
|
|
|
|
)
|
|
|
|
|
|
2022-10-26 23:10:55 +02:00
|
|
|
|
return pulled_pdu_info
|
2022-07-22 23:00:11 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
|
|
|
|
@tag_args
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _get_events_and_persist(
|
2023-01-26 18:31:58 +01:00
|
|
|
|
self, destination: str, room_id: str, event_ids: StrCollection
|
2021-08-26 22:41:44 +02:00
|
|
|
|
) -> None:
|
|
|
|
|
"""Fetch the given events from a server, and persist them as outliers.
|
|
|
|
|
|
|
|
|
|
This function *does not* recursively get missing auth events of the
|
2021-09-08 11:37:50 +02:00
|
|
|
|
newly fetched events. Callers must include in the `event_ids` argument
|
2021-08-26 22:41:44 +02:00
|
|
|
|
any missing events from the auth chain.
|
|
|
|
|
|
|
|
|
|
Logs a warning if we can't find the given event.
|
|
|
|
|
"""
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
room_version = await self._store.get_room_version(room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-24 12:56:33 +02:00
|
|
|
|
events: List[EventBase] = []
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-20 14:56:23 +02:00
|
|
|
|
async def get_event(event_id: str) -> None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
with nested_logging_context(event_id):
|
|
|
|
|
try:
|
2022-10-26 23:10:55 +02:00
|
|
|
|
pulled_pdu_info = await self._federation_client.get_pdu(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
[destination],
|
|
|
|
|
event_id,
|
|
|
|
|
room_version,
|
|
|
|
|
)
|
2022-10-26 23:10:55 +02:00
|
|
|
|
if pulled_pdu_info is None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
logger.warning(
|
|
|
|
|
"Server %s didn't return event %s",
|
|
|
|
|
destination,
|
|
|
|
|
event_id,
|
|
|
|
|
)
|
|
|
|
|
return
|
2022-10-26 23:10:55 +02:00
|
|
|
|
events.append(pulled_pdu_info.pdu)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Error fetching missing state/auth event %s: %s %s",
|
|
|
|
|
event_id,
|
|
|
|
|
type(e),
|
|
|
|
|
e,
|
|
|
|
|
)
|
|
|
|
|
|
2021-09-08 11:37:50 +02:00
|
|
|
|
await concurrently_execute(get_event, event_ids, 5)
|
2021-09-24 12:56:33 +02:00
|
|
|
|
logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
|
2021-10-19 11:23:55 +02:00
|
|
|
|
await self._auth_and_persist_outliers(room_id, events)
|
2021-09-24 12:56:33 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-10-19 11:23:55 +02:00
|
|
|
|
async def _auth_and_persist_outliers(
|
|
|
|
|
self, room_id: str, events: Iterable[EventBase]
|
2021-09-24 12:56:33 +02:00
|
|
|
|
) -> None:
|
2021-10-19 11:23:55 +02:00
|
|
|
|
"""Persist a batch of outlier events fetched from remote servers.
|
2021-09-24 12:56:33 +02:00
|
|
|
|
|
|
|
|
|
We first sort the events to make sure that we process each event's auth_events
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
|
before the event itself.
|
2021-09-24 12:56:33 +02:00
|
|
|
|
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
|
We then mark the events as outliers, persist them to the database, and, where
|
|
|
|
|
appropriate (eg, an invite), awake the notifier.
|
2021-09-24 12:56:33 +02:00
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
room_id: the room that the events are meant to be in (though this has
|
|
|
|
|
not yet been checked)
|
|
|
|
|
events: the events that have been fetched
|
|
|
|
|
"""
|
|
|
|
|
event_map = {event.event_id: event for event in events}
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
event_ids = event_map.keys()
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids",
|
|
|
|
|
str(event_ids),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
|
|
|
|
|
str(len(event_ids)),
|
|
|
|
|
)
|
|
|
|
|
|
2022-02-15 15:33:28 +01:00
|
|
|
|
# filter out any events we have already seen. This might happen because
|
|
|
|
|
# the events were eagerly pushed to us (eg, during a room join), or because
|
|
|
|
|
# another thread has raced against us since we decided to request the event.
|
|
|
|
|
#
|
|
|
|
|
# This is just an optimisation, so it doesn't need to be watertight - the event
|
|
|
|
|
# persister does another round of deduplication.
|
|
|
|
|
seen_remotes = await self._store.have_seen_events(room_id, event_map.keys())
|
|
|
|
|
for s in seen_remotes:
|
|
|
|
|
event_map.pop(s, None)
|
|
|
|
|
|
2021-09-08 11:37:50 +02:00
|
|
|
|
# XXX: it might be possible to kick this process off in parallel with fetching
|
|
|
|
|
# the events.
|
|
|
|
|
while event_map:
|
|
|
|
|
# build a list of events whose auth events are not in the queue.
|
|
|
|
|
roots = tuple(
|
|
|
|
|
ev
|
|
|
|
|
for ev in event_map.values()
|
|
|
|
|
if not any(aid in event_map for aid in ev.auth_event_ids())
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-08 11:37:50 +02:00
|
|
|
|
if not roots:
|
|
|
|
|
# if *none* of the remaining events are ready, that means
|
|
|
|
|
# we have a loop. This either means a bug in our logic, or that
|
|
|
|
|
# somebody has managed to create a loop (which requires finding a
|
|
|
|
|
# hash collision in room v2 and later).
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Loop found in auth events while fetching missing state/auth "
|
|
|
|
|
"events: %s",
|
|
|
|
|
shortstr(event_map.keys()),
|
|
|
|
|
)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
logger.info(
|
2021-10-07 13:43:25 +02:00
|
|
|
|
"Persisting %i of %i remaining outliers: %s",
|
|
|
|
|
len(roots),
|
|
|
|
|
len(event_map),
|
|
|
|
|
shortstr(e.event_id for e in roots),
|
2021-09-08 11:37:50 +02:00
|
|
|
|
)
|
|
|
|
|
|
2021-10-19 11:23:55 +02:00
|
|
|
|
await self._auth_and_persist_outliers_inner(room_id, roots)
|
2021-09-08 11:37:50 +02:00
|
|
|
|
|
|
|
|
|
for ev in roots:
|
|
|
|
|
del event_map[ev.event_id]
|
|
|
|
|
|
2021-10-19 11:23:55 +02:00
|
|
|
|
async def _auth_and_persist_outliers_inner(
|
|
|
|
|
self, room_id: str, fetched_events: Collection[EventBase]
|
2021-09-08 11:37:50 +02:00
|
|
|
|
) -> None:
|
2021-10-19 11:23:55 +02:00
|
|
|
|
"""Helper for _auth_and_persist_outliers
|
2021-09-08 11:37:50 +02:00
|
|
|
|
|
2021-09-24 12:56:33 +02:00
|
|
|
|
Persists a batch of events where we have (theoretically) already persisted all
|
|
|
|
|
of their auth events.
|
2021-09-08 11:37:50 +02:00
|
|
|
|
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
|
Marks the events as outliers, auths them, persists them to the database, and,
|
|
|
|
|
where appropriate (eg, an invite), awakes the notifier.
|
2021-09-08 11:37:50 +02:00
|
|
|
|
|
|
|
|
|
Params:
|
|
|
|
|
origin: where the events came from
|
|
|
|
|
room_id: the room that the events are meant to be in (though this has
|
|
|
|
|
not yet been checked)
|
2021-09-24 12:56:33 +02:00
|
|
|
|
fetched_events: the events to persist
|
2021-09-08 11:37:50 +02:00
|
|
|
|
"""
|
|
|
|
|
# get all the auth events for all the events in this batch. By now, they should
|
|
|
|
|
# have been persisted.
|
|
|
|
|
auth_events = {
|
|
|
|
|
aid for event in fetched_events for aid in event.auth_event_ids()
|
|
|
|
|
}
|
2021-09-07 12:15:51 +02:00
|
|
|
|
persisted_events = await self._store.get_events(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
auth_events,
|
|
|
|
|
allow_rejected=True,
|
|
|
|
|
)
|
|
|
|
|
|
2022-06-15 20:48:22 +02:00
|
|
|
|
events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = []
|
|
|
|
|
|
|
|
|
|
async def prep(event: EventBase) -> None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
with nested_logging_context(suffix=event.event_id):
|
2021-10-18 19:28:30 +02:00
|
|
|
|
auth = []
|
2021-09-24 12:56:13 +02:00
|
|
|
|
for auth_event_id in event.auth_event_ids():
|
|
|
|
|
ae = persisted_events.get(auth_event_id)
|
2021-09-24 15:01:45 +02:00
|
|
|
|
if not ae:
|
2021-10-25 16:21:09 +02:00
|
|
|
|
# the fact we can't find the auth event doesn't mean it doesn't
|
|
|
|
|
# exist, which means it is premature to reject `event`. Instead we
|
|
|
|
|
# just ignore it for now.
|
2021-09-24 15:01:45 +02:00
|
|
|
|
logger.warning(
|
2021-10-25 16:21:09 +02:00
|
|
|
|
"Dropping event %s, which relies on auth_event %s, which could not be found",
|
2021-09-24 15:01:45 +02:00
|
|
|
|
event,
|
|
|
|
|
auth_event_id,
|
|
|
|
|
)
|
2022-06-15 20:48:22 +02:00
|
|
|
|
return
|
2021-10-18 19:28:30 +02:00
|
|
|
|
auth.append(ae)
|
2021-09-24 12:56:13 +02:00
|
|
|
|
|
Refactor the way we set `outlier` (#11634)
* `_auth_and_persist_outliers`: mark persisted events as outliers
Mark any events that get persisted via `_auth_and_persist_outliers` as, well,
outliers.
Currently this will be a no-op as everything will already be flagged as an
outlier, but I'm going to change that.
* `process_remote_join`: stop flagging as outlier
The events are now flagged as outliers later on, by `_auth_and_persist_outliers`.
* `send_join`: remove `outlier=True`
The events created here are returned in the result of `send_join` to
`FederationHandler.do_invite_join`. From there they are passed into
`FederationEventHandler.process_remote_join`, which passes them to
`_auth_and_persist_outliers`... which sets the `outlier` flag.
* `get_event_auth`: remove `outlier=True`
stop flagging the events returned by `get_event_auth` as outliers. This method
is only called by `_get_remote_auth_chain_for_event`, which passes the results
into `_auth_and_persist_outliers`, which will flag them as outliers.
* `_get_remote_auth_chain_for_event`: remove `outlier=True`
we pass all the events into `_auth_and_persist_outliers`, which will now flag
the events as outliers.
* `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter
This param is now never set to True, so we can remove it.
* `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param
This is no longer set anywhere, so we can remove it.
* `get_pdu`: remove unused `outlier` parameter
... and chase it down into `get_pdu_from_destination_raw`.
* `event_from_pdu_json`: remove redundant `outlier` param
This is never set to `True`, so can be removed.
* changelog
* update docstring
2022-01-05 13:26:11 +01:00
|
|
|
|
# we're not bothering about room state, so flag the event as an outlier.
|
|
|
|
|
event.internal_metadata.outlier = True
|
|
|
|
|
|
2022-05-31 14:17:50 +02:00
|
|
|
|
context = EventContext.for_outlier(self._storage_controllers)
|
2021-09-28 16:25:07 +02:00
|
|
|
|
try:
|
2022-06-09 16:51:34 +02:00
|
|
|
|
validate_event_for_room_version(event)
|
2022-06-15 20:48:22 +02:00
|
|
|
|
await check_state_independent_auth_rules(self._store, event)
|
|
|
|
|
check_state_dependent_auth_rules(event, auth)
|
2021-09-28 16:25:07 +02:00
|
|
|
|
except AuthError as e:
|
|
|
|
|
logger.warning("Rejecting %r because %s", event, e)
|
|
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
2022-12-13 14:19:19 +01:00
|
|
|
|
except EventSizeError as e:
|
|
|
|
|
if e.unpersistable:
|
|
|
|
|
# This event is completely unpersistable.
|
|
|
|
|
raise e
|
|
|
|
|
# Otherwise, we are somewhat lenient and just persist the event
|
|
|
|
|
# as rejected, for moderate compatibility with older Synapse
|
|
|
|
|
# versions.
|
|
|
|
|
logger.warning("While validating received event %r: %s", event, e)
|
|
|
|
|
context.rejected = RejectedReason.OVERSIZED_EVENT
|
2021-09-28 16:25:07 +02:00
|
|
|
|
|
2022-06-15 20:48:22 +02:00
|
|
|
|
events_and_contexts_to_persist.append((event, context))
|
|
|
|
|
|
|
|
|
|
for event in fetched_events:
|
|
|
|
|
await prep(event)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 22:54:13 +01:00
|
|
|
|
await self.persist_events_and_notify(
|
|
|
|
|
room_id,
|
2022-06-15 20:48:22 +02:00
|
|
|
|
events_and_contexts_to_persist,
|
Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114)
Fix https://github.com/matrix-org/synapse/issues/11091
Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`)
1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`)
- Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)).
- Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort.
- This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date.
- We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking
2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order.
3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764
4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls.
- Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793
Before | After
--- | ---
![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png)
#### Why aren't we sorting topologically when receiving backfill events?
> The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway.
>
> As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering.
>
> -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138
See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties
2022-02-07 22:54:13 +01:00
|
|
|
|
# Mark these events backfilled as they're historic events that will
|
|
|
|
|
# eventually be backfilled. For example, missing events we fetch
|
|
|
|
|
# during backfill should be marked as backfilled as well.
|
|
|
|
|
backfilled=True,
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _check_event_auth(
|
2022-08-01 12:20:05 +02:00
|
|
|
|
self, origin: Optional[str], event: EventBase, context: EventContext
|
2022-07-14 23:52:26 +02:00
|
|
|
|
) -> None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
|
|
|
|
Checks whether an event should be rejected (for failing auth checks).
|
|
|
|
|
|
|
|
|
|
Args:
|
2022-08-01 12:20:05 +02:00
|
|
|
|
origin: The host the event originates from. This is used to fetch
|
|
|
|
|
any missing auth events. It can be set to None, but only if we are
|
|
|
|
|
sure that we already have all the auth events.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
event: The event itself.
|
|
|
|
|
context:
|
|
|
|
|
The event context.
|
|
|
|
|
|
2021-10-18 19:29:37 +02:00
|
|
|
|
Raises:
|
|
|
|
|
AuthError if we were unable to find copies of the event's auth events.
|
|
|
|
|
(Most other failures just cause us to set `context.rejected`.)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
2021-09-28 16:25:07 +02:00
|
|
|
|
# This method should only be used for non-outliers
|
|
|
|
|
assert not event.internal_metadata.outlier
|
2021-09-08 11:41:13 +02:00
|
|
|
|
|
2021-10-05 13:50:07 +02:00
|
|
|
|
# first of all, check that the event itself is valid.
|
|
|
|
|
try:
|
2022-06-09 16:51:34 +02:00
|
|
|
|
validate_event_for_room_version(event)
|
2021-10-05 13:50:07 +02:00
|
|
|
|
except AuthError as e:
|
|
|
|
|
logger.warning("While validating received event %r: %s", event, e)
|
|
|
|
|
# TODO: use a different rejected reason here?
|
|
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
2022-07-14 23:52:26 +02:00
|
|
|
|
return
|
2022-12-13 14:19:19 +01:00
|
|
|
|
except EventSizeError as e:
|
|
|
|
|
if e.unpersistable:
|
|
|
|
|
# This event is completely unpersistable.
|
|
|
|
|
raise e
|
|
|
|
|
# Otherwise, we are somewhat lenient and just persist the event
|
|
|
|
|
# as rejected, for moderate compatibility with older Synapse
|
|
|
|
|
# versions.
|
|
|
|
|
logger.warning("While validating received event %r: %s", event, e)
|
|
|
|
|
context.rejected = RejectedReason.OVERSIZED_EVENT
|
|
|
|
|
return
|
2021-10-05 13:50:07 +02:00
|
|
|
|
|
2021-10-18 19:29:37 +02:00
|
|
|
|
# next, check that we have all of the event's auth events.
|
|
|
|
|
#
|
|
|
|
|
# Note that this can raise AuthError, which we want to propagate to the
|
|
|
|
|
# caller rather than swallow with `context.rejected` (since we cannot be
|
|
|
|
|
# certain that there is a permanent problem with the event).
|
|
|
|
|
claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
|
|
|
|
|
origin, event
|
|
|
|
|
)
|
2022-08-16 19:39:40 +02:00
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "claimed_auth_events",
|
|
|
|
|
str([ev.event_id for ev in claimed_auth_events]),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "claimed_auth_events.length",
|
|
|
|
|
str(len(claimed_auth_events)),
|
|
|
|
|
)
|
2021-10-18 19:29:37 +02:00
|
|
|
|
|
|
|
|
|
# ... and check that the event passes auth at those auth events.
|
2022-07-14 23:52:26 +02:00
|
|
|
|
# https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
|
|
|
|
|
# 4. Passes authorization rules based on the event’s auth events,
|
|
|
|
|
# otherwise it is rejected.
|
2021-10-18 19:29:37 +02:00
|
|
|
|
try:
|
2022-06-15 20:48:22 +02:00
|
|
|
|
await check_state_independent_auth_rules(self._store, event)
|
|
|
|
|
check_state_dependent_auth_rules(event, claimed_auth_events)
|
2021-10-18 19:29:37 +02:00
|
|
|
|
except AuthError as e:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"While checking auth of %r against auth_events: %s", event, e
|
|
|
|
|
)
|
|
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
2022-07-14 23:52:26 +02:00
|
|
|
|
return
|
2021-10-18 19:29:37 +02:00
|
|
|
|
|
2022-07-14 23:52:26 +02:00
|
|
|
|
# now check the auth rules pass against the room state before the event
|
|
|
|
|
# https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu:
|
|
|
|
|
# 5. Passes authorization rules based on the state before the event,
|
|
|
|
|
# otherwise it is rejected.
|
|
|
|
|
#
|
|
|
|
|
# ... however, if we only have partial state for the room, then there is a good
|
|
|
|
|
# chance that we'll be missing some of the state needed to auth the new event.
|
|
|
|
|
# So, we state-resolve the auth events that we are given against the state that
|
|
|
|
|
# we know about, which ensures things like bans are applied. (Note that we'll
|
|
|
|
|
# already have checked we have all the auth events, in
|
|
|
|
|
# _load_or_fetch_auth_events_for_event above)
|
|
|
|
|
if context.partial_state:
|
|
|
|
|
room_version = await self._store.get_room_version_id(event.room_id)
|
|
|
|
|
|
|
|
|
|
local_state_id_map = await context.get_prev_state_ids()
|
|
|
|
|
claimed_auth_events_id_map = {
|
|
|
|
|
(ev.type, ev.state_key): ev.event_id for ev in claimed_auth_events
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state_for_auth_id_map = (
|
|
|
|
|
await self._state_resolution_handler.resolve_events_with_store(
|
|
|
|
|
event.room_id,
|
|
|
|
|
room_version,
|
|
|
|
|
[local_state_id_map, claimed_auth_events_id_map],
|
|
|
|
|
event_map=None,
|
|
|
|
|
state_res_store=StateResolutionStore(self._store),
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
event_types = event_auth.auth_types_for_event(event.room_version, event)
|
|
|
|
|
state_for_auth_id_map = await context.get_prev_state_ids(
|
|
|
|
|
StateFilter.from_types(event_types)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
calculated_auth_event_ids = self._event_auth_handler.compute_auth_events(
|
|
|
|
|
event, state_for_auth_id_map, for_verification=True
|
2022-05-20 10:54:12 +02:00
|
|
|
|
)
|
|
|
|
|
|
2022-07-14 23:52:26 +02:00
|
|
|
|
# if those are the same, we're done here.
|
|
|
|
|
if collections.Counter(event.auth_event_ids()) == collections.Counter(
|
|
|
|
|
calculated_auth_event_ids
|
|
|
|
|
):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# otherwise, re-run the auth checks based on what we calculated.
|
|
|
|
|
calculated_auth_events = await self._store.get_events_as_list(
|
|
|
|
|
calculated_auth_event_ids
|
2021-09-28 16:25:07 +02:00
|
|
|
|
)
|
2022-07-14 23:52:26 +02:00
|
|
|
|
|
|
|
|
|
# log the differences
|
|
|
|
|
|
|
|
|
|
claimed_auth_event_map = {(e.type, e.state_key): e for e in claimed_auth_events}
|
2021-09-28 16:25:07 +02:00
|
|
|
|
calculated_auth_event_map = {
|
2022-07-14 23:52:26 +02:00
|
|
|
|
(e.type, e.state_key): e for e in calculated_auth_events
|
2021-09-28 16:25:07 +02:00
|
|
|
|
}
|
2022-07-14 23:52:26 +02:00
|
|
|
|
logger.info(
|
|
|
|
|
"event's auth_events are different to our calculated auth_events. "
|
|
|
|
|
"Claimed but not calculated: %s. Calculated but not claimed: %s",
|
|
|
|
|
[
|
|
|
|
|
ev
|
|
|
|
|
for k, ev in claimed_auth_event_map.items()
|
|
|
|
|
if k not in calculated_auth_event_map
|
|
|
|
|
or calculated_auth_event_map[k].event_id != ev.event_id
|
|
|
|
|
],
|
|
|
|
|
[
|
|
|
|
|
ev
|
|
|
|
|
for k, ev in calculated_auth_event_map.items()
|
|
|
|
|
if k not in claimed_auth_event_map
|
|
|
|
|
or claimed_auth_event_map[k].event_id != ev.event_id
|
|
|
|
|
],
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
try:
|
2022-07-14 23:52:26 +02:00
|
|
|
|
check_state_dependent_auth_rules(event, calculated_auth_events)
|
|
|
|
|
except AuthError as e:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"While checking auth of %r against room state before the event: %s",
|
2021-09-28 16:25:07 +02:00
|
|
|
|
event,
|
2022-07-14 23:52:26 +02:00
|
|
|
|
e,
|
2021-10-20 19:22:40 +02:00
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
context.rejected = RejectedReason.AUTH_ERROR
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-09-06 13:17:16 +02:00
|
|
|
|
async def _maybe_kick_guest_users(self, event: EventBase) -> None:
|
|
|
|
|
if event.type != EventTypes.GuestAccess:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
|
|
|
|
|
if guest_access == GuestAccess.CAN_JOIN:
|
|
|
|
|
return
|
|
|
|
|
|
2022-06-06 10:24:12 +02:00
|
|
|
|
current_state = await self._storage_controllers.state.get_current_state(
|
|
|
|
|
event.room_id
|
|
|
|
|
)
|
|
|
|
|
current_state_list = list(current_state.values())
|
|
|
|
|
await self._get_room_member_handler().kick_guest_users(current_state_list)
|
2021-09-06 13:17:16 +02:00
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _check_for_soft_fail(
|
|
|
|
|
self,
|
|
|
|
|
event: EventBase,
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context: EventContext,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
origin: str,
|
|
|
|
|
) -> None:
|
|
|
|
|
"""Checks if we should soft fail the event; if so, marks the event as
|
|
|
|
|
such.
|
|
|
|
|
|
2022-07-22 11:13:01 +02:00
|
|
|
|
Does nothing for events in rooms with partial state, since we may not have an
|
|
|
|
|
accurate membership event for the sender in the current state.
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
Args:
|
|
|
|
|
event
|
2022-08-01 14:53:56 +02:00
|
|
|
|
context: The `EventContext` which we are about to persist the event with.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
origin: The host the event originates from.
|
|
|
|
|
"""
|
2022-07-22 11:13:01 +02:00
|
|
|
|
if await self._store.is_partial_state_room(event.room_id):
|
|
|
|
|
# We might not know the sender's membership in the current state, so don't
|
|
|
|
|
# soft fail anything. Even if we do have a membership for the sender in the
|
|
|
|
|
# current state, it may have been derived from state resolution between
|
|
|
|
|
# partial and full state and may not be accurate.
|
|
|
|
|
return
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
extrem_ids = set(extrem_ids_list)
|
|
|
|
|
prev_event_ids = set(event.prev_event_ids())
|
|
|
|
|
|
|
|
|
|
if extrem_ids == prev_event_ids:
|
|
|
|
|
# If they're the same then the current state is the same as the
|
|
|
|
|
# state at the event, so no point rechecking auth for soft fail.
|
|
|
|
|
return
|
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
room_version = await self._store.get_room_version_id(event.room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
|
|
|
|
|
2022-06-06 10:24:12 +02:00
|
|
|
|
# The event types we want to pull from the "current" state.
|
|
|
|
|
auth_types = auth_types_for_event(room_version_obj, event)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
# Calculate the "current state".
|
2022-08-01 14:53:56 +02:00
|
|
|
|
seen_event_ids = await self._store.have_events_in_timeline(prev_event_ids)
|
|
|
|
|
has_missing_prevs = bool(prev_event_ids - seen_event_ids)
|
|
|
|
|
if has_missing_prevs:
|
|
|
|
|
# We don't have all the prev_events of this event, which means we have a
|
|
|
|
|
# gap in the graph, and the new event is going to become a new backwards
|
|
|
|
|
# extremity.
|
|
|
|
|
#
|
|
|
|
|
# In this case we want to be a little careful as we might have been
|
|
|
|
|
# down for a while and have an incorrect view of the current state,
|
2021-08-26 22:41:44 +02:00
|
|
|
|
# however we still want to do checks as gaps are easy to
|
|
|
|
|
# maliciously manufacture.
|
|
|
|
|
#
|
|
|
|
|
# So we use a "current state" that is actually a state
|
|
|
|
|
# resolution across the current forward extremities and the
|
|
|
|
|
# given state at the event. This should correctly handle cases
|
|
|
|
|
# like bans, especially with state res v2.
|
|
|
|
|
|
2022-05-31 14:17:50 +02:00
|
|
|
|
state_sets_d = await self._state_storage_controller.get_state_groups_ids(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
event.room_id, extrem_ids
|
|
|
|
|
)
|
2022-05-26 11:48:12 +02:00
|
|
|
|
state_sets: List[StateMap[str]] = list(state_sets_d.values())
|
2022-08-01 14:53:56 +02:00
|
|
|
|
state_ids = await context.get_prev_state_ids()
|
2022-05-26 11:48:12 +02:00
|
|
|
|
state_sets.append(state_ids)
|
|
|
|
|
current_state_ids = (
|
|
|
|
|
await self._state_resolution_handler.resolve_events_with_store(
|
|
|
|
|
event.room_id,
|
|
|
|
|
room_version,
|
|
|
|
|
state_sets,
|
|
|
|
|
event_map=None,
|
|
|
|
|
state_res_store=StateResolutionStore(self._store),
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
else:
|
2022-06-06 10:24:12 +02:00
|
|
|
|
current_state_ids = (
|
|
|
|
|
await self._state_storage_controller.get_current_state_ids(
|
|
|
|
|
event.room_id, StateFilter.from_types(auth_types)
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
logger.debug(
|
|
|
|
|
"Doing soft-fail check for %s: state %s",
|
|
|
|
|
event.event_id,
|
|
|
|
|
current_state_ids,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Now check if event pass auth against said current state
|
|
|
|
|
current_state_ids_list = [
|
|
|
|
|
e for k, e in current_state_ids.items() if k in auth_types
|
|
|
|
|
]
|
2021-10-18 19:28:30 +02:00
|
|
|
|
current_auth_events = await self._store.get_events_as_list(
|
|
|
|
|
current_state_ids_list
|
|
|
|
|
)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
try:
|
2022-06-15 20:48:22 +02:00
|
|
|
|
check_state_dependent_auth_rules(event, current_auth_events)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
except AuthError as e:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Soft-failing %r (from %s) because %s",
|
|
|
|
|
event,
|
|
|
|
|
e,
|
|
|
|
|
origin,
|
|
|
|
|
extra={
|
|
|
|
|
"room_id": event.room_id,
|
|
|
|
|
"mxid": event.sender,
|
|
|
|
|
"hs": origin,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
soft_failed_event_counter.inc()
|
|
|
|
|
event.internal_metadata.soft_failed = True
|
|
|
|
|
|
2021-10-18 19:29:37 +02:00
|
|
|
|
async def _load_or_fetch_auth_events_for_event(
|
2022-08-01 12:20:05 +02:00
|
|
|
|
self, destination: Optional[str], event: EventBase
|
2021-10-18 19:29:37 +02:00
|
|
|
|
) -> Collection[EventBase]:
|
|
|
|
|
"""Fetch this event's auth_events, from database or remote
|
|
|
|
|
|
|
|
|
|
Loads any of the auth_events that we already have from the database/cache. If
|
|
|
|
|
there are any that are missing, calls /event_auth to get the complete auth
|
|
|
|
|
chain for the event (and then attempts to load the auth_events again).
|
|
|
|
|
|
|
|
|
|
If any of the auth_events cannot be found, raises an AuthError. This can happen
|
|
|
|
|
for a number of reasons; eg: the events don't exist, or we were unable to talk
|
|
|
|
|
to `destination`, or we couldn't validate the signature on the event (which
|
|
|
|
|
in turn has multiple potential causes).
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
destination: where to send the /event_auth request. Typically the server
|
|
|
|
|
that sent us `event` in the first place.
|
2022-08-01 12:20:05 +02:00
|
|
|
|
|
|
|
|
|
If this is None, no attempt is made to load any missing auth events:
|
|
|
|
|
rather, an AssertionError is raised if there are any missing events.
|
|
|
|
|
|
2021-10-18 19:29:37 +02:00
|
|
|
|
event: the event whose auth_events we want
|
|
|
|
|
|
|
|
|
|
Returns:
|
2021-11-02 14:55:52 +01:00
|
|
|
|
all of the events listed in `event.auth_events_ids`, after deduplication
|
2021-10-18 19:29:37 +02:00
|
|
|
|
|
|
|
|
|
Raises:
|
2022-08-01 12:20:05 +02:00
|
|
|
|
AssertionError if some auth events were missing and no `destination` was
|
|
|
|
|
supplied.
|
|
|
|
|
|
2021-10-18 19:29:37 +02:00
|
|
|
|
AuthError if we were unable to fetch the auth_events for any reason.
|
|
|
|
|
"""
|
|
|
|
|
event_auth_event_ids = set(event.auth_event_ids())
|
|
|
|
|
event_auth_events = await self._store.get_events(
|
|
|
|
|
event_auth_event_ids, allow_rejected=True
|
|
|
|
|
)
|
|
|
|
|
missing_auth_event_ids = event_auth_event_ids.difference(
|
|
|
|
|
event_auth_events.keys()
|
|
|
|
|
)
|
|
|
|
|
if not missing_auth_event_ids:
|
|
|
|
|
return event_auth_events.values()
|
2022-08-01 12:20:05 +02:00
|
|
|
|
if destination is None:
|
|
|
|
|
# this shouldn't happen: destination must be set unless we know we have already
|
|
|
|
|
# persisted the auth events.
|
|
|
|
|
raise AssertionError(
|
|
|
|
|
"_load_or_fetch_auth_events_for_event() called with no destination for "
|
|
|
|
|
"an event with missing auth_events"
|
|
|
|
|
)
|
2021-10-18 19:29:37 +02:00
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
"Event %s refers to unknown auth events %s: fetching auth chain",
|
|
|
|
|
event,
|
|
|
|
|
missing_auth_event_ids,
|
|
|
|
|
)
|
|
|
|
|
try:
|
|
|
|
|
await self._get_remote_auth_chain_for_event(
|
|
|
|
|
destination, event.room_id, event.event_id
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning("Failed to get auth chain for %s: %s", event, e)
|
|
|
|
|
# in this case, it's very likely we still won't have all the auth
|
|
|
|
|
# events - but we pick that up below.
|
|
|
|
|
|
|
|
|
|
# try to fetch the auth events we missed list time.
|
|
|
|
|
extra_auth_events = await self._store.get_events(
|
|
|
|
|
missing_auth_event_ids, allow_rejected=True
|
|
|
|
|
)
|
|
|
|
|
missing_auth_event_ids.difference_update(extra_auth_events.keys())
|
|
|
|
|
event_auth_events.update(extra_auth_events)
|
|
|
|
|
if not missing_auth_event_ids:
|
|
|
|
|
return event_auth_events.values()
|
|
|
|
|
|
|
|
|
|
# we still don't have all the auth events.
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Missing auth events for %s: %s",
|
|
|
|
|
event,
|
|
|
|
|
shortstr(missing_auth_event_ids),
|
|
|
|
|
)
|
|
|
|
|
# the fact we can't find the auth event doesn't mean it doesn't
|
|
|
|
|
# exist, which means it is premature to store `event` as rejected.
|
|
|
|
|
# instead we raise an AuthError, which will make the caller ignore it.
|
|
|
|
|
raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
|
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
|
|
|
|
@tag_args
|
2021-09-23 18:34:33 +02:00
|
|
|
|
async def _get_remote_auth_chain_for_event(
|
|
|
|
|
self, destination: str, room_id: str, event_id: str
|
|
|
|
|
) -> None:
|
|
|
|
|
"""If we are missing some of an event's auth events, attempt to request them
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
destination: where to fetch the auth tree from
|
|
|
|
|
room_id: the room in which we are lacking auth events
|
|
|
|
|
event_id: the event for which we are lacking auth events
|
|
|
|
|
"""
|
|
|
|
|
try:
|
2022-02-15 15:33:28 +01:00
|
|
|
|
remote_events = await self._federation_client.get_event_auth(
|
|
|
|
|
destination, room_id, event_id
|
|
|
|
|
)
|
|
|
|
|
|
2021-09-23 18:34:33 +02:00
|
|
|
|
except RequestSendFailed as e1:
|
|
|
|
|
# The other side isn't around or doesn't implement the
|
|
|
|
|
# endpoint, so lets just bail out.
|
|
|
|
|
logger.info("Failed to get event auth from remote: %s", e1)
|
|
|
|
|
return
|
|
|
|
|
|
2022-02-15 15:33:28 +01:00
|
|
|
|
logger.info("/event_auth returned %i events", len(remote_events))
|
2021-09-23 18:34:33 +02:00
|
|
|
|
|
2021-09-24 12:56:33 +02:00
|
|
|
|
# `event` may be returned, but we should not yet process it.
|
2022-02-15 15:33:28 +01:00
|
|
|
|
remote_auth_events = (e for e in remote_events if e.event_id != event_id)
|
2021-09-23 18:34:33 +02:00
|
|
|
|
|
2022-02-15 15:33:28 +01:00
|
|
|
|
await self._auth_and_persist_outliers(room_id, remote_auth_events)
|
2021-09-23 18:34:33 +02:00
|
|
|
|
|
2022-08-16 19:39:40 +02:00
|
|
|
|
@trace
|
2021-08-26 22:41:44 +02:00
|
|
|
|
async def _run_push_actions_and_persist_event(
|
|
|
|
|
self, event: EventBase, context: EventContext, backfilled: bool = False
|
2021-09-20 14:56:23 +02:00
|
|
|
|
) -> None:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""Run the push actions for a received event, and persist it.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
event: The event itself.
|
|
|
|
|
context: The event context.
|
|
|
|
|
backfilled: True if the event was backfilled.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
|
|
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
|
a room that has been un-partial stated.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
2021-09-08 20:03:08 +02:00
|
|
|
|
# this method should not be called on outliers (those code paths call
|
|
|
|
|
# persist_events_and_notify directly.)
|
|
|
|
|
assert not event.internal_metadata.outlier
|
|
|
|
|
|
2021-10-18 18:17:15 +02:00
|
|
|
|
if not backfilled and not context.rejected:
|
|
|
|
|
min_depth = await self._store.get_min_depth(event.room_id)
|
|
|
|
|
if min_depth is None or min_depth > event.depth:
|
|
|
|
|
# XXX richvdh 2021/10/07: I don't really understand what this
|
|
|
|
|
# condition is doing. I think it's trying not to send pushes
|
|
|
|
|
# for events that predate our join - but that's not really what
|
|
|
|
|
# min_depth means, and anyway ancient events are a more general
|
|
|
|
|
# problem.
|
|
|
|
|
#
|
|
|
|
|
# for now I'm just going to log about it.
|
|
|
|
|
logger.info(
|
|
|
|
|
"Skipping push actions for old event with depth %s < %s",
|
|
|
|
|
event.depth,
|
|
|
|
|
min_depth,
|
|
|
|
|
)
|
|
|
|
|
else:
|
2022-10-21 19:46:22 +02:00
|
|
|
|
await self._bulk_push_rule_evaluator.action_for_events_by_user(
|
|
|
|
|
[(event, context)]
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
2021-10-18 18:17:15 +02:00
|
|
|
|
try:
|
2021-08-26 22:41:44 +02:00
|
|
|
|
await self.persist_events_and_notify(
|
|
|
|
|
event.room_id, [(event, context)], backfilled=backfilled
|
|
|
|
|
)
|
|
|
|
|
except Exception:
|
2022-06-17 11:22:50 +02:00
|
|
|
|
await self._store.remove_push_actions_from_staging(event.event_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
async def persist_events_and_notify(
|
|
|
|
|
self,
|
|
|
|
|
room_id: str,
|
|
|
|
|
event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
|
|
|
|
|
backfilled: bool = False,
|
|
|
|
|
) -> int:
|
|
|
|
|
"""Persists events and tells the notifier/pushers about them, if
|
|
|
|
|
necessary.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
room_id: The room ID of events being persisted.
|
|
|
|
|
event_and_contexts: Sequence of events with their associated
|
|
|
|
|
context that should be persisted. All events must belong to
|
|
|
|
|
the same room.
|
|
|
|
|
backfilled: Whether these events are a result of
|
|
|
|
|
backfilling or not
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
The stream ID after which all events have been persisted.
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
PartialStateConflictError: if attempting to persist a partial state event in
|
|
|
|
|
a room that has been un-partial stated.
|
2021-08-26 22:41:44 +02:00
|
|
|
|
"""
|
|
|
|
|
if not event_and_contexts:
|
2022-01-04 17:10:27 +01:00
|
|
|
|
return self._store.get_room_max_stream_ordering()
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
2021-09-07 12:15:51 +02:00
|
|
|
|
instance = self._config.worker.events_shard_config.get_instance(room_id)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if instance != self._instance_name:
|
|
|
|
|
# Limit the number of events sent over replication. We choose 200
|
|
|
|
|
# here as that is what we default to in `max_request_body_size(..)`
|
2022-09-28 14:31:53 +02:00
|
|
|
|
result = {}
|
Handle race between persisting an event and un-partial stating a room (#13100)
Whenever we want to persist an event, we first compute an event context,
which includes the state at the event and a flag indicating whether the
state is partial. After a lot of processing, we finally try to store the
event in the database, which can fail for partial state events when the
containing room has been un-partial stated in the meantime.
We detect the race as a foreign key constraint failure in the data store
layer and turn it into a special `PartialStateConflictError` exception,
which makes its way up to the method in which we computed the event
context.
To make things difficult, the exception needs to cross a replication
request: `/fed_send_events` for events coming over federation and
`/send_event` for events from clients. We transport the
`PartialStateConflictError` as a `409 Conflict` over replication and
turn `409`s back into `PartialStateConflictError`s on the worker making
the request.
All client events go through
`EventCreationHandler.handle_new_client_event`, which is called in
*a lot* of places. Instead of trying to update all the code which
creates client events, we turn the `PartialStateConflictError` into a
`429 Too Many Requests` in
`EventCreationHandler.handle_new_client_event` and hope that clients
take it as a hint to retry their request.
On the federation event side, there are 7 places which compute event
contexts. 4 of them use outlier event contexts:
`FederationEventHandler._auth_and_persist_outliers_inner`,
`FederationHandler.do_knock`, `FederationHandler.on_invite_request` and
`FederationHandler.do_remotely_reject_invite`. These events won't have
the partial state flag, so we do not need to do anything for then.
The remaining 3 paths which create events are
`FederationEventHandler.process_remote_join`,
`FederationEventHandler.on_send_membership_event` and
`FederationEventHandler._process_received_pdu`.
We can't experience the race in `process_remote_join`, unless we're
handling an additional join into a partial state room, which currently
blocks, so we make no attempt to handle it correctly.
`on_send_membership_event` is only called by
`FederationServer._on_send_membership_event`, so we catch the
`PartialStateConflictError` there and retry just once.
`_process_received_pdu` is called by `on_receive_pdu` for incoming
events and `_process_pulled_event` for backfill. The latter should never
try to persist partial state events, so we ignore it. We catch the
`PartialStateConflictError` in `on_receive_pdu` and retry just once.
Refering to the graph of code paths in
https://github.com/matrix-org/synapse/issues/12988#issuecomment-1156857648
may make the above make more sense.
Signed-off-by: Sean Quah <seanq@matrix.org>
2022-07-05 17:12:52 +02:00
|
|
|
|
try:
|
|
|
|
|
for batch in batch_iter(event_and_contexts, 200):
|
|
|
|
|
result = await self._send_events(
|
|
|
|
|
instance_name=instance,
|
|
|
|
|
store=self._store,
|
|
|
|
|
room_id=room_id,
|
|
|
|
|
event_and_contexts=batch,
|
|
|
|
|
backfilled=backfilled,
|
|
|
|
|
)
|
|
|
|
|
except SynapseError as e:
|
|
|
|
|
if e.code == HTTPStatus.CONFLICT:
|
|
|
|
|
raise PartialStateConflictError()
|
|
|
|
|
raise
|
2021-08-26 22:41:44 +02:00
|
|
|
|
return result["max_stream_id"]
|
|
|
|
|
else:
|
2022-05-31 14:17:50 +02:00
|
|
|
|
assert self._storage_controllers.persistence
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
# Note that this returns the events that were persisted, which may not be
|
|
|
|
|
# the same as were passed in if some were deduplicated due to transaction IDs.
|
2022-05-31 14:17:50 +02:00
|
|
|
|
(
|
|
|
|
|
events,
|
|
|
|
|
max_stream_token,
|
|
|
|
|
) = await self._storage_controllers.persistence.persist_events(
|
2021-08-26 22:41:44 +02:00
|
|
|
|
event_and_contexts, backfilled=backfilled
|
|
|
|
|
)
|
|
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
|
# After persistence we always need to notify replication there may
|
|
|
|
|
# be new data.
|
|
|
|
|
self._notifier.notify_replication()
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
if self._ephemeral_messages_enabled:
|
|
|
|
|
for event in events:
|
|
|
|
|
# If there's an expiry timestamp on the event, schedule its expiry.
|
|
|
|
|
self._message_handler.maybe_schedule_expiry(event)
|
|
|
|
|
|
|
|
|
|
if not backfilled: # Never notify for backfilled events
|
2022-08-16 19:39:40 +02:00
|
|
|
|
with start_active_span("notify_persisted_events"):
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "event_ids",
|
|
|
|
|
str([ev.event_id for ev in events]),
|
|
|
|
|
)
|
|
|
|
|
set_tag(
|
|
|
|
|
SynapseTags.RESULT_PREFIX + "event_ids.length",
|
|
|
|
|
str(len(events)),
|
|
|
|
|
)
|
|
|
|
|
for event in events:
|
|
|
|
|
await self._notify_persisted_event(event, max_stream_token)
|
2021-08-26 22:41:44 +02:00
|
|
|
|
|
|
|
|
|
return max_stream_token.stream
|
|
|
|
|
|
|
|
|
|
async def _notify_persisted_event(
|
|
|
|
|
self, event: EventBase, max_stream_token: RoomStreamToken
|
|
|
|
|
) -> None:
|
|
|
|
|
"""Checks to see if notifier/pushers should be notified about the
|
|
|
|
|
event or not.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
event:
|
|
|
|
|
max_stream_token: The max_stream_id returned by persist_events
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
extra_users = []
|
|
|
|
|
if event.type == EventTypes.Member:
|
|
|
|
|
target_user_id = event.state_key
|
|
|
|
|
|
|
|
|
|
# We notify for memberships if its an invite for one of our
|
|
|
|
|
# users
|
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
|
if event.membership != Membership.INVITE:
|
2021-09-07 12:15:51 +02:00
|
|
|
|
if not self._is_mine_id(target_user_id):
|
2021-08-26 22:41:44 +02:00
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
target_user = UserID.from_string(target_user_id)
|
|
|
|
|
extra_users.append(target_user)
|
|
|
|
|
elif event.internal_metadata.is_outlier():
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# the event has been persisted so it should have a stream ordering.
|
|
|
|
|
assert event.internal_metadata.stream_ordering
|
|
|
|
|
|
|
|
|
|
event_pos = PersistedEventPosition(
|
|
|
|
|
self._instance_name, event.internal_metadata.stream_ordering
|
|
|
|
|
)
|
2022-10-05 19:12:48 +02:00
|
|
|
|
await self._notifier.on_new_room_events(
|
|
|
|
|
[(event, event_pos)], max_stream_token, extra_users=extra_users
|
2021-08-26 22:41:44 +02:00
|
|
|
|
)
|
|
|
|
|
|
2022-07-19 13:45:17 +02:00
|
|
|
|
if event.type == EventTypes.Member and event.membership == Membership.JOIN:
|
|
|
|
|
# TODO retrieve the previous state, and exclude join -> join transitions
|
|
|
|
|
self._notifier.notify_user_joined_room(event.event_id, event.room_id)
|
|
|
|
|
|
2021-08-26 22:41:44 +02:00
|
|
|
|
def _sanity_check_event(self, ev: EventBase) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Do some early sanity checks of a received event
|
|
|
|
|
|
|
|
|
|
In particular, checks it doesn't have an excessive number of
|
|
|
|
|
prev_events or auth_events, which could cause a huge state resolution
|
|
|
|
|
or cascade of event fetches.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
ev: event to be checked
|
|
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
|
SynapseError if the event does not pass muster
|
|
|
|
|
"""
|
|
|
|
|
if len(ev.prev_event_ids()) > 20:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Rejecting event %s which has %i prev_events",
|
|
|
|
|
ev.event_id,
|
|
|
|
|
len(ev.prev_event_ids()),
|
|
|
|
|
)
|
|
|
|
|
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many prev_events")
|
|
|
|
|
|
|
|
|
|
if len(ev.auth_event_ids()) > 10:
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Rejecting event %s which has %i auth_events",
|
|
|
|
|
ev.event_id,
|
|
|
|
|
len(ev.auth_event_ids()),
|
|
|
|
|
)
|
|
|
|
|
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
|