2016-11-04 16:35:25 +01:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2021-12-02 17:18:10 +01:00
|
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
2016-11-04 16:35:25 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2016-11-21 17:55:23 +01:00
|
|
|
"""A federation sender that forwards things to be sent across replication to
|
|
|
|
a worker process.
|
|
|
|
|
|
|
|
It assumes there is a single worker process feeding off of it.
|
|
|
|
|
|
|
|
Each row in the replication stream consists of a type and some json, where the
|
|
|
|
types indicate whether they are presence, or edus, etc.
|
|
|
|
|
|
|
|
Ephemeral or non-event data are queued up in-memory. When the worker requests
|
|
|
|
updates since a particular point, all in-memory data since before that point is
|
|
|
|
dropped. We also expire things in the queue after 5 minutes, to ensure that a
|
|
|
|
dead worker doesn't cause the queues to grow limitlessly.
|
|
|
|
|
|
|
|
Events are replicated via a separate events stream.
|
|
|
|
"""
|
|
|
|
|
2018-07-09 08:09:20 +02:00
|
|
|
import logging
|
2021-03-29 17:43:20 +02:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Dict,
|
|
|
|
Hashable,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Sized,
|
|
|
|
Tuple,
|
|
|
|
Type,
|
|
|
|
)
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
import attr
|
2018-06-05 16:13:57 +02:00
|
|
|
from sortedcontainers import SortedDict
|
2016-11-16 18:34:44 +01:00
|
|
|
|
2020-08-17 13:24:46 +02:00
|
|
|
from synapse.api.presence import UserPresenceState
|
2021-03-29 17:43:20 +02:00
|
|
|
from synapse.federation.sender import AbstractFederationSender, FederationSender
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.metrics import LaterGauge
|
2021-03-29 17:43:20 +02:00
|
|
|
from synapse.replication.tcp.streams.federation import FederationStream
|
|
|
|
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.metrics import Measure
|
2017-04-07 12:51:28 +02:00
|
|
|
|
2018-07-09 08:09:20 +02:00
|
|
|
from .units import Edu
|
2018-04-15 16:51:07 +02:00
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2017-04-07 12:51:28 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-11-16 18:34:44 +01:00
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
class FederationRemoteSendQueue(AbstractFederationSender):
|
2019-03-13 21:02:56 +01:00
|
|
|
"""A drop in replacement for FederationSender"""
|
2016-11-16 18:34:44 +01:00
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2016-11-17 16:46:44 +01:00
|
|
|
self.server_name = hs.hostname
|
2016-11-04 16:35:25 +01:00
|
|
|
self.clock = hs.get_clock()
|
2017-03-27 15:07:47 +02:00
|
|
|
self.notifier = hs.get_notifier()
|
2017-04-10 17:48:30 +02:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2023-05-05 16:06:22 +02:00
|
|
|
self.is_mine_server_name = hs.is_mine_server_name
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2020-07-10 19:26:36 +02:00
|
|
|
# We may have multiple federation sender instances, so we need to track
|
|
|
|
# their positions separately.
|
2020-07-30 00:22:13 +02:00
|
|
|
self._sender_instances = hs.config.worker.federation_shard_config.instances
|
2021-07-15 12:02:43 +02:00
|
|
|
self._sender_positions: Dict[str, int] = {}
|
2020-07-10 19:26:36 +02:00
|
|
|
|
2020-05-05 15:27:13 +02:00
|
|
|
# Pending presence map user_id -> UserPresenceState
|
2021-07-15 12:02:43 +02:00
|
|
|
self.presence_map: Dict[str, UserPresenceState] = {}
|
2020-05-05 15:27:13 +02:00
|
|
|
|
2019-03-26 13:45:22 +01:00
|
|
|
# Stores the destinations we need to explicitly send presence to about a
|
|
|
|
# given user.
|
|
|
|
# Stream position -> (user_id, destinations)
|
2021-07-15 12:02:43 +02:00
|
|
|
self.presence_destinations: SortedDict[
|
|
|
|
int, Tuple[str, Iterable[str]]
|
|
|
|
] = SortedDict()
|
2020-05-05 15:27:13 +02:00
|
|
|
|
|
|
|
# (destination, key) -> EDU
|
2021-07-15 12:02:43 +02:00
|
|
|
self.keyed_edu: Dict[Tuple[str, tuple], Edu] = {}
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2020-05-05 15:27:13 +02:00
|
|
|
# stream position -> (destination, key)
|
2021-07-15 12:02:43 +02:00
|
|
|
self.keyed_edu_changed: SortedDict[int, Tuple[str, tuple]] = SortedDict()
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2021-07-15 12:02:43 +02:00
|
|
|
self.edus: SortedDict[int, Edu] = SortedDict()
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2021-04-19 11:50:49 +02:00
|
|
|
# stream ID for the next entry into keyed_edu_changed/edus.
|
2016-11-04 16:35:25 +01:00
|
|
|
self.pos = 1
|
2020-05-05 15:15:57 +02:00
|
|
|
|
|
|
|
# map from stream ID to the time that stream entry was generated, so that we
|
|
|
|
# can clear out entries after a while
|
2021-07-15 12:02:43 +02:00
|
|
|
self.pos_time: SortedDict[int, int] = SortedDict()
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2016-11-21 18:34:43 +01:00
|
|
|
# EVERYTHING IS SAD. In particular, python only makes new scopes when
|
|
|
|
# we make a new function, so we need to make a new function so the inner
|
2016-11-21 18:59:39 +01:00
|
|
|
# lambda binds to the queue rather than to the name of the queue which
|
|
|
|
# changes. ARGH.
|
2021-03-29 17:43:20 +02:00
|
|
|
def register(name: str, queue: Sized) -> None:
|
2019-06-20 11:32:02 +02:00
|
|
|
LaterGauge(
|
|
|
|
"synapse_federation_send_queue_%s_size" % (queue_name,),
|
|
|
|
"",
|
|
|
|
[],
|
|
|
|
lambda: len(queue),
|
|
|
|
)
|
2016-11-21 18:34:43 +01:00
|
|
|
|
|
|
|
for queue_name in [
|
2019-06-20 11:32:02 +02:00
|
|
|
"presence_map",
|
|
|
|
"keyed_edu",
|
|
|
|
"keyed_edu_changed",
|
|
|
|
"edus",
|
|
|
|
"pos_time",
|
|
|
|
"presence_destinations",
|
2016-11-21 18:34:43 +01:00
|
|
|
]:
|
|
|
|
register(queue_name, getattr(self, queue_name))
|
2016-11-17 16:46:44 +01:00
|
|
|
|
2016-11-04 16:35:25 +01:00
|
|
|
self.clock.looping_call(self._clear_queue, 30 * 1000)
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def _next_pos(self) -> int:
|
2016-11-04 16:35:25 +01:00
|
|
|
pos = self.pos
|
|
|
|
self.pos += 1
|
|
|
|
self.pos_time[self.clock.time_msec()] = pos
|
|
|
|
return pos
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def _clear_queue(self) -> None:
|
2016-11-21 17:55:23 +01:00
|
|
|
"""Clear the queues for anything older than N minutes"""
|
2016-11-04 16:35:25 +01:00
|
|
|
|
|
|
|
FIVE_MINUTES_AGO = 5 * 60 * 1000
|
|
|
|
now = self.clock.time_msec()
|
|
|
|
|
|
|
|
keys = self.pos_time.keys()
|
2018-06-06 01:17:52 +02:00
|
|
|
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
|
2016-11-04 16:35:25 +01:00
|
|
|
if not keys[:time]:
|
|
|
|
return
|
|
|
|
|
|
|
|
position_to_delete = max(keys[:time])
|
|
|
|
for key in keys[:time]:
|
|
|
|
del self.pos_time[key]
|
|
|
|
|
|
|
|
self._clear_queue_before_pos(position_to_delete)
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def _clear_queue_before_pos(self, position_to_delete: int) -> None:
|
2016-11-21 17:55:23 +01:00
|
|
|
"""Clear all the queues from before a given position"""
|
2016-11-21 18:34:43 +01:00
|
|
|
with Measure(self.clock, "send_queue._clear"):
|
|
|
|
# Delete things out of presence maps
|
2019-03-26 13:45:22 +01:00
|
|
|
keys = self.presence_destinations.keys()
|
|
|
|
i = self.presence_destinations.bisect_left(position_to_delete)
|
|
|
|
for key in keys[:i]:
|
|
|
|
del self.presence_destinations[key]
|
|
|
|
|
2021-04-19 11:50:49 +02:00
|
|
|
user_ids = {user_id for user_id, _ in self.presence_destinations.values()}
|
2019-03-26 13:45:22 +01:00
|
|
|
|
2016-11-21 18:34:43 +01:00
|
|
|
to_del = [
|
|
|
|
user_id for user_id in self.presence_map if user_id not in user_ids
|
|
|
|
]
|
|
|
|
for user_id in to_del:
|
|
|
|
del self.presence_map[user_id]
|
|
|
|
|
|
|
|
# Delete things out of keyed edus
|
|
|
|
keys = self.keyed_edu_changed.keys()
|
2018-06-06 01:17:52 +02:00
|
|
|
i = self.keyed_edu_changed.bisect_left(position_to_delete)
|
2016-11-21 18:34:43 +01:00
|
|
|
for key in keys[:i]:
|
|
|
|
del self.keyed_edu_changed[key]
|
|
|
|
|
|
|
|
live_keys = set()
|
|
|
|
for edu_key in self.keyed_edu_changed.values():
|
|
|
|
live_keys.add(edu_key)
|
|
|
|
|
2020-05-05 15:27:13 +02:00
|
|
|
keys_to_del = [
|
|
|
|
edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
|
|
|
|
]
|
|
|
|
for edu_key in keys_to_del:
|
2016-11-21 18:34:43 +01:00
|
|
|
del self.keyed_edu[edu_key]
|
|
|
|
|
|
|
|
# Delete things out of edu map
|
|
|
|
keys = self.edus.keys()
|
2018-06-06 01:17:52 +02:00
|
|
|
i = self.edus.bisect_left(position_to_delete)
|
2016-11-21 18:34:43 +01:00
|
|
|
for key in keys[:i]:
|
|
|
|
del self.edus[key]
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def notify_new_events(self, max_token: RoomStreamToken) -> None:
|
2019-03-13 21:02:56 +01:00
|
|
|
"""As per FederationSender"""
|
2021-03-29 17:43:20 +02:00
|
|
|
# This should never get called.
|
|
|
|
raise NotImplementedError()
|
2016-11-17 16:46:44 +01:00
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def build_and_send_edu(
|
|
|
|
self,
|
|
|
|
destination: str,
|
|
|
|
edu_type: str,
|
|
|
|
content: JsonDict,
|
|
|
|
key: Optional[Hashable] = None,
|
|
|
|
) -> None:
|
2019-03-13 21:02:56 +01:00
|
|
|
"""As per FederationSender"""
|
2023-05-05 16:06:22 +02:00
|
|
|
if self.is_mine_server_name(destination):
|
2019-03-04 13:57:44 +01:00
|
|
|
logger.info("Not sending EDU to ourselves")
|
|
|
|
return
|
|
|
|
|
2016-11-04 16:35:25 +01:00
|
|
|
pos = self._next_pos()
|
|
|
|
|
2016-11-16 18:34:44 +01:00
|
|
|
edu = Edu(
|
|
|
|
origin=self.server_name,
|
|
|
|
destination=destination,
|
|
|
|
edu_type=edu_type,
|
|
|
|
content=content,
|
|
|
|
)
|
|
|
|
|
2016-11-04 16:35:25 +01:00
|
|
|
if key:
|
2016-11-17 16:46:44 +01:00
|
|
|
assert isinstance(key, tuple)
|
2016-11-16 18:34:44 +01:00
|
|
|
self.keyed_edu[(destination, key)] = edu
|
|
|
|
self.keyed_edu_changed[pos] = (destination, key)
|
2016-11-04 16:35:25 +01:00
|
|
|
else:
|
|
|
|
self.edus[pos] = edu
|
|
|
|
|
2017-03-27 15:07:47 +02:00
|
|
|
self.notifier.on_new_replication_data()
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
2019-03-13 21:02:56 +01:00
|
|
|
"""As per FederationSender
|
2019-03-13 16:55:37 +01:00
|
|
|
|
|
|
|
Args:
|
2021-03-29 17:43:20 +02:00
|
|
|
receipt:
|
2019-03-13 16:55:37 +01:00
|
|
|
"""
|
|
|
|
# nothing to do here: the replication listener will handle it.
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def send_presence_to_destinations(
|
|
|
|
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
|
|
|
|
) -> None:
|
2019-03-26 13:45:22 +01:00
|
|
|
"""As per FederationSender
|
|
|
|
|
|
|
|
Args:
|
2021-03-29 17:43:20 +02:00
|
|
|
states
|
|
|
|
destinations
|
2019-03-26 13:45:22 +01:00
|
|
|
"""
|
|
|
|
for state in states:
|
|
|
|
pos = self._next_pos()
|
|
|
|
self.presence_map.update({state.user_id: state for state in states})
|
|
|
|
self.presence_destinations[pos] = (state.user_id, destinations)
|
|
|
|
|
|
|
|
self.notifier.on_new_replication_data()
|
|
|
|
|
2023-03-22 01:59:55 +01:00
|
|
|
def send_device_messages(self, destination: str, immediate: bool = True) -> None:
|
2019-03-13 21:02:56 +01:00
|
|
|
"""As per FederationSender"""
|
2020-01-29 12:23:01 +01:00
|
|
|
# We don't need to replicate this as it gets sent down a different
|
|
|
|
# stream.
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def wake_destination(self, server: str) -> None:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_current_token(self) -> int:
|
2016-11-16 18:34:44 +01:00
|
|
|
return self.pos - 1
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def federation_ack(self, instance_name: str, token: int) -> None:
|
2020-07-10 19:26:36 +02:00
|
|
|
if self._sender_instances:
|
|
|
|
# If we have configured multiple federation sender instances we need
|
|
|
|
# to track their positions separately, and only clear the queue up
|
|
|
|
# to the token all instances have acked.
|
|
|
|
self._sender_positions[instance_name] = token
|
|
|
|
token = min(self._sender_positions.values())
|
|
|
|
|
2017-03-27 15:11:17 +02:00
|
|
|
self._clear_queue_before_pos(token)
|
|
|
|
|
2020-01-16 10:16:12 +01:00
|
|
|
async def get_replication_rows(
|
2020-05-05 15:15:57 +02:00
|
|
|
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
|
|
|
|
) -> Tuple[List[Tuple[int, Tuple]], int, bool]:
|
2017-03-27 15:11:17 +02:00
|
|
|
"""Get rows to be sent over federation between the two tokens
|
|
|
|
|
2016-11-23 11:40:44 +01:00
|
|
|
Args:
|
2020-05-05 15:15:57 +02:00
|
|
|
instance_name: the name of the current process
|
|
|
|
from_token: the previous stream token: the starting point for fetching the
|
|
|
|
updates
|
|
|
|
to_token: the new stream token: the point to get updates up to
|
|
|
|
target_row_count: a target for the number of rows to be returned.
|
|
|
|
|
|
|
|
Returns: a triplet `(updates, new_last_token, limited)`, where:
|
|
|
|
* `updates` is a list of `(token, row)` entries.
|
|
|
|
* `new_last_token` is the new position in stream.
|
|
|
|
* `limited` is whether there are more updates to fetch.
|
2016-11-23 11:40:44 +01:00
|
|
|
"""
|
2020-05-05 15:15:57 +02:00
|
|
|
# TODO: Handle target_row_count.
|
2016-11-16 18:34:44 +01:00
|
|
|
|
|
|
|
# To handle restarts where we wrap around
|
2017-03-27 15:11:17 +02:00
|
|
|
if from_token > self.pos:
|
|
|
|
from_token = -1
|
2016-11-16 18:34:44 +01:00
|
|
|
|
2017-04-10 11:02:17 +02:00
|
|
|
# list of tuple(int, BaseFederationRow), where the first is the position
|
|
|
|
# of the federation stream.
|
2021-07-15 12:02:43 +02:00
|
|
|
rows: List[Tuple[int, BaseFederationRow]] = []
|
2016-11-16 18:34:44 +01:00
|
|
|
|
2019-03-26 13:45:22 +01:00
|
|
|
# Fetch presence to send to destinations
|
|
|
|
i = self.presence_destinations.bisect_right(from_token)
|
|
|
|
j = self.presence_destinations.bisect_right(to_token) + 1
|
|
|
|
|
|
|
|
for pos, (user_id, dests) in self.presence_destinations.items()[i:j]:
|
2019-06-20 11:32:02 +02:00
|
|
|
rows.append(
|
|
|
|
(
|
|
|
|
pos,
|
|
|
|
PresenceDestinationsRow(
|
|
|
|
state=self.presence_map[user_id], destinations=list(dests)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
2019-03-26 13:45:22 +01:00
|
|
|
|
2016-11-04 16:35:25 +01:00
|
|
|
# Fetch changes keyed edus
|
2018-06-06 01:17:52 +02:00
|
|
|
i = self.keyed_edu_changed.bisect_right(from_token)
|
|
|
|
j = self.keyed_edu_changed.bisect_right(to_token) + 1
|
2017-04-10 17:49:51 +02:00
|
|
|
# We purposefully clobber based on the key here, python dict comprehensions
|
|
|
|
# always use the last value, so this will correctly point to the last
|
|
|
|
# stream position.
|
2018-06-06 01:17:52 +02:00
|
|
|
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2023-02-22 21:29:09 +01:00
|
|
|
for (destination, edu_key), pos in keyed_edus.items():
|
2019-06-20 11:32:02 +02:00
|
|
|
rows.append(
|
|
|
|
(
|
|
|
|
pos,
|
|
|
|
KeyedEduRow(
|
|
|
|
key=edu_key, edu=self.keyed_edu[(destination, edu_key)]
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
2016-11-04 16:35:25 +01:00
|
|
|
|
|
|
|
# Fetch changed edus
|
2018-06-06 01:17:52 +02:00
|
|
|
i = self.edus.bisect_right(from_token)
|
|
|
|
j = self.edus.bisect_right(to_token) + 1
|
|
|
|
edus = self.edus.items()[i:j]
|
2016-11-04 16:35:25 +01:00
|
|
|
|
2023-02-22 21:29:09 +01:00
|
|
|
for pos, edu in edus:
|
2017-04-07 12:48:27 +02:00
|
|
|
rows.append((pos, EduRow(edu)))
|
2016-11-04 16:35:25 +01:00
|
|
|
|
|
|
|
# Sort rows based on pos
|
|
|
|
rows.sort()
|
|
|
|
|
2020-05-05 15:15:57 +02:00
|
|
|
return (
|
|
|
|
[(pos, (row.TypeId, row.to_data())) for pos, row in rows],
|
|
|
|
to_token,
|
|
|
|
False,
|
|
|
|
)
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class BaseFederationRow:
|
2017-04-07 12:52:57 +02:00
|
|
|
"""Base class for rows to be sent in the federation stream.
|
|
|
|
|
|
|
|
Specifies how to identify, serialize and deserialize the different types.
|
|
|
|
"""
|
|
|
|
|
2020-07-09 15:52:58 +02:00
|
|
|
TypeId = "" # Unique string that ids the type. Must be overridden in sub classes.
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
@staticmethod
|
2021-12-02 17:18:10 +01:00
|
|
|
def from_data(data: JsonDict) -> "BaseFederationRow":
|
2017-04-07 12:48:27 +02:00
|
|
|
"""Parse the data from the federation stream into a row.
|
2017-04-07 12:52:57 +02:00
|
|
|
|
|
|
|
Args:
|
|
|
|
data: The value of ``data`` from FederationStreamRow.data, type
|
|
|
|
depends on the type of stream
|
2017-04-07 12:48:27 +02:00
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def to_data(self) -> JsonDict:
|
2017-04-07 12:52:57 +02:00
|
|
|
"""Serialize this row to be sent over the federation stream.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The value to be sent in FederationStreamRow.data. The type depends
|
|
|
|
on the type of stream.
|
2017-04-07 12:48:27 +02:00
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None:
|
2017-04-07 12:48:27 +02:00
|
|
|
"""Add this row to the appropriate field in the buffer ready for this
|
|
|
|
to be sent over federation.
|
|
|
|
|
|
|
|
We use a buffer so that we can batch up events that have come in at
|
|
|
|
the same time and send them all at once.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
buff (BufferedToSend)
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class PresenceDestinationsRow(BaseFederationRow):
|
|
|
|
state: UserPresenceState
|
|
|
|
destinations: List[str]
|
|
|
|
|
2019-03-26 13:45:22 +01:00
|
|
|
TypeId = "pd"
|
|
|
|
|
|
|
|
@staticmethod
|
2021-12-02 17:18:10 +01:00
|
|
|
def from_data(data: JsonDict) -> "PresenceDestinationsRow":
|
2019-03-26 13:45:22 +01:00
|
|
|
return PresenceDestinationsRow(
|
2019-06-20 11:32:02 +02:00
|
|
|
state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"]
|
2019-03-26 13:45:22 +01:00
|
|
|
)
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def to_data(self) -> JsonDict:
|
2019-06-20 11:32:02 +02:00
|
|
|
return {"state": self.state.as_dict(), "dests": self.destinations}
|
2019-03-26 13:45:22 +01:00
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None:
|
2019-03-26 13:45:22 +01:00
|
|
|
buff.presence_destinations.append((self.state, self.destinations))
|
|
|
|
|
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class KeyedEduRow(BaseFederationRow):
|
2017-04-12 11:36:29 +02:00
|
|
|
"""Streams EDUs that have an associated key that is ued to clobber. For example,
|
|
|
|
typing EDUs clobber based on room_id.
|
|
|
|
"""
|
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
key: Tuple[str, ...] # the edu key passed to send_edu
|
|
|
|
edu: Edu
|
|
|
|
|
2017-04-07 12:48:27 +02:00
|
|
|
TypeId = "k"
|
|
|
|
|
|
|
|
@staticmethod
|
2021-12-02 17:18:10 +01:00
|
|
|
def from_data(data: JsonDict) -> "KeyedEduRow":
|
2019-06-20 11:32:02 +02:00
|
|
|
return KeyedEduRow(key=tuple(data["key"]), edu=Edu(**data["edu"]))
|
2017-04-07 12:48:27 +02:00
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def to_data(self) -> JsonDict:
|
2019-06-20 11:32:02 +02:00
|
|
|
return {"key": self.key, "edu": self.edu.get_internal_dict()}
|
2017-04-07 12:48:27 +02:00
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None:
|
2019-06-20 11:32:02 +02:00
|
|
|
buff.keyed_edus.setdefault(self.edu.destination, {})[self.key] = self.edu
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class EduRow(BaseFederationRow):
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Streams EDUs that don't have keys. See KeyedEduRow"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
edu: Edu
|
|
|
|
|
2017-04-07 12:48:27 +02:00
|
|
|
TypeId = "e"
|
|
|
|
|
|
|
|
@staticmethod
|
2021-12-02 17:18:10 +01:00
|
|
|
def from_data(data: JsonDict) -> "EduRow":
|
2017-04-07 12:48:27 +02:00
|
|
|
return EduRow(Edu(**data))
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def to_data(self) -> JsonDict:
|
2017-04-07 12:48:27 +02:00
|
|
|
return self.edu.get_internal_dict()
|
|
|
|
|
2021-12-02 17:18:10 +01:00
|
|
|
def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None:
|
2017-04-07 12:48:27 +02:00
|
|
|
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
|
|
|
|
|
|
|
|
|
2021-07-15 12:02:43 +02:00
|
|
|
_rowtypes: Tuple[Type[BaseFederationRow], ...] = (
|
2020-05-05 15:27:13 +02:00
|
|
|
PresenceDestinationsRow,
|
|
|
|
KeyedEduRow,
|
|
|
|
EduRow,
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2020-05-05 15:27:13 +02:00
|
|
|
|
|
|
|
TypeToRow = {Row.TypeId: Row for Row in _rowtypes}
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
|
2021-12-30 19:47:12 +01:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class ParsedFederationStreamData:
|
|
|
|
# list of tuples of UserPresenceState and destinations
|
|
|
|
presence_destinations: List[Tuple[UserPresenceState, List[str]]]
|
|
|
|
# dict of destination -> { key -> Edu }
|
|
|
|
keyed_edus: Dict[str, Dict[Tuple[str, ...], Edu]]
|
|
|
|
# dict of destination -> [Edu]
|
|
|
|
edus: Dict[str, List[Edu]]
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
|
2021-03-29 17:43:20 +02:00
|
|
|
def process_rows_for_federation(
|
|
|
|
transaction_queue: FederationSender,
|
|
|
|
rows: List[FederationStream.FederationStreamRow],
|
|
|
|
) -> None:
|
2017-04-10 11:02:17 +02:00
|
|
|
"""Parse a list of rows from the federation stream and put them in the
|
|
|
|
transaction queue ready for sending to the relevant homeservers.
|
2017-04-07 12:48:27 +02:00
|
|
|
|
|
|
|
Args:
|
2021-03-29 17:43:20 +02:00
|
|
|
transaction_queue
|
|
|
|
rows
|
2017-04-07 12:48:27 +02:00
|
|
|
"""
|
|
|
|
|
2017-04-10 11:02:17 +02:00
|
|
|
# The federation stream contains a bunch of different types of
|
2017-04-07 12:48:27 +02:00
|
|
|
# rows that need to be handled differently. We parse the rows, put
|
|
|
|
# them into the appropriate collection and then send them off.
|
|
|
|
|
2017-04-10 11:03:07 +02:00
|
|
|
buff = ParsedFederationStreamData(
|
2021-02-16 23:32:34 +01:00
|
|
|
presence_destinations=[],
|
|
|
|
keyed_edus={},
|
|
|
|
edus={},
|
2017-04-07 12:48:27 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Parse the rows in the stream and add to the buffer
|
|
|
|
for row in rows:
|
2017-04-07 12:51:28 +02:00
|
|
|
if row.type not in TypeToRow:
|
|
|
|
logger.error("Unrecognized federation row type %r", row.type)
|
|
|
|
continue
|
|
|
|
|
2017-04-07 12:48:27 +02:00
|
|
|
RowType = TypeToRow[row.type]
|
|
|
|
parsed_row = RowType.from_data(row.data)
|
|
|
|
parsed_row.add_to_buffer(buff)
|
|
|
|
|
2019-03-26 13:45:22 +01:00
|
|
|
for state, destinations in buff.presence_destinations:
|
|
|
|
transaction_queue.send_presence_to_destinations(
|
2019-06-20 11:32:02 +02:00
|
|
|
states=[state], destinations=destinations
|
2019-03-26 13:45:22 +01:00
|
|
|
)
|
|
|
|
|
2021-04-20 12:50:49 +02:00
|
|
|
for edu_map in buff.keyed_edus.values():
|
2017-04-07 12:48:27 +02:00
|
|
|
for key, edu in edu_map.items():
|
2019-03-04 13:57:44 +01:00
|
|
|
transaction_queue.send_edu(edu, key)
|
2017-04-07 12:48:27 +02:00
|
|
|
|
2021-04-20 12:50:49 +02:00
|
|
|
for edu_list in buff.edus.values():
|
2017-04-07 12:48:27 +02:00
|
|
|
for edu in edu_list:
|
2019-03-04 13:57:44 +01:00
|
|
|
transaction_queue.send_edu(edu, None)
|