2016-01-07 05:26:29 +01:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
2018-08-07 16:22:40 +02:00
|
|
|
# Copyright 2018 New Vector Ltd
|
2014-08-12 16:10:52 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-09-29 14:07:45 +02:00
|
|
|
import heapq
|
2018-07-09 08:09:20 +02:00
|
|
|
import logging
|
2022-07-21 17:02:02 +02:00
|
|
|
from collections import ChainMap, defaultdict
|
2020-08-24 20:25:27 +02:00
|
|
|
from typing import (
|
2021-07-26 18:49:53 +02:00
|
|
|
TYPE_CHECKING,
|
2020-09-29 14:07:45 +02:00
|
|
|
Any,
|
2020-08-24 20:25:27 +02:00
|
|
|
Awaitable,
|
2020-09-29 14:07:45 +02:00
|
|
|
Callable,
|
2021-04-22 17:43:50 +02:00
|
|
|
Collection,
|
2020-09-29 14:07:45 +02:00
|
|
|
DefaultDict,
|
2020-08-24 20:25:27 +02:00
|
|
|
Dict,
|
2021-04-06 14:58:18 +02:00
|
|
|
FrozenSet,
|
2020-08-24 20:25:27 +02:00
|
|
|
List,
|
2021-10-27 18:27:23 +02:00
|
|
|
Mapping,
|
2020-08-24 20:25:27 +02:00
|
|
|
Optional,
|
|
|
|
Sequence,
|
|
|
|
Set,
|
2020-09-29 14:07:45 +02:00
|
|
|
Tuple,
|
2020-08-24 20:25:27 +02:00
|
|
|
)
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2018-09-03 16:13:17 +02:00
|
|
|
import attr
|
2018-07-09 08:09:20 +02:00
|
|
|
from frozendict import frozendict
|
2020-09-29 14:07:45 +02:00
|
|
|
from prometheus_client import Counter, Histogram
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2019-04-01 11:24:38 +02:00
|
|
|
from synapse.api.constants import EventTypes
|
|
|
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions
|
2019-11-06 11:01:39 +01:00
|
|
|
from synapse.events import EventBase
|
2014-12-16 16:59:17 +01:00
|
|
|
from synapse.events.snapshot import EventContext
|
2020-09-29 14:07:45 +02:00
|
|
|
from synapse.logging.context import ContextResourceUsage
|
2022-07-07 14:19:31 +02:00
|
|
|
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
|
2018-09-03 16:13:17 +02:00
|
|
|
from synapse.state import v1, v2
|
2020-08-05 22:38:57 +02:00
|
|
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
2022-07-14 15:57:02 +02:00
|
|
|
from synapse.storage.state import StateFilter
|
2021-04-22 17:43:50 +02:00
|
|
|
from synapse.types import StateMap
|
2018-08-10 15:50:21 +02:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2019-10-03 18:47:20 +02:00
|
|
|
from synapse.util.metrics import Measure, measure_func
|
2018-05-24 20:58:21 +02:00
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
2022-07-14 15:57:02 +02:00
|
|
|
from synapse.storage.controllers import StateStorageController
|
2021-07-26 18:49:53 +02:00
|
|
|
from synapse.storage.databases.main import DataStore
|
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
2020-09-29 14:07:45 +02:00
|
|
|
metrics_logger = logging.getLogger("synapse.state.metrics")
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2019-06-17 17:29:00 +02:00
|
|
|
# Metrics for number of state groups involved in a resolution.
|
|
|
|
state_groups_histogram = Histogram(
|
2019-06-19 12:49:39 +02:00
|
|
|
"synapse_state_number_state_groups_in_resolution",
|
|
|
|
"Number of state groups used when performing a state resolution",
|
2019-06-18 14:01:52 +02:00
|
|
|
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
|
2019-06-17 17:29:00 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2016-03-22 16:31:13 +01:00
|
|
|
EVICTION_TIMEOUT_SECONDS = 60 * 60
|
2015-02-06 17:52:22 +01:00
|
|
|
|
|
|
|
|
2016-08-31 16:53:19 +02:00
|
|
|
_NEXT_STATE_ID = 1
|
|
|
|
|
2017-01-13 14:16:54 +01:00
|
|
|
POWER_KEY = (EventTypes.PowerLevels, "")
|
|
|
|
|
2016-08-31 16:53:19 +02:00
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
def _gen_state_id() -> str:
|
2016-08-31 16:53:19 +02:00
|
|
|
global _NEXT_STATE_ID
|
|
|
|
s = "X%d" % (_NEXT_STATE_ID,)
|
|
|
|
_NEXT_STATE_ID += 1
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class _StateCacheEntry:
|
2022-07-15 13:06:41 +02:00
|
|
|
__slots__ = ["_state", "state_group", "prev_group", "delta_ids"]
|
2016-08-31 16:53:19 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
2022-07-14 15:57:02 +02:00
|
|
|
state: Optional[StateMap[str]],
|
2020-08-24 20:25:27 +02:00
|
|
|
state_group: Optional[int],
|
|
|
|
prev_group: Optional[int] = None,
|
|
|
|
delta_ids: Optional[StateMap[str]] = None,
|
|
|
|
):
|
2022-07-21 17:02:02 +02:00
|
|
|
if state is None and state_group is None and prev_group is None:
|
|
|
|
raise Exception("One of state, state_group or prev_group must be not None")
|
|
|
|
|
|
|
|
if prev_group is not None and delta_ids is None:
|
|
|
|
raise Exception("If prev_group is set so must delta_ids")
|
2022-07-14 15:57:02 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
# A map from (type, state_key) to event_id.
|
2022-07-15 13:06:41 +02:00
|
|
|
#
|
|
|
|
# This can be None if we have a `state_group` (as then we can fetch the
|
|
|
|
# state from the DB.)
|
|
|
|
self._state = frozendict(state) if state is not None else None
|
2018-01-19 18:33:22 +01:00
|
|
|
|
|
|
|
# the ID of a state group if one and only one is involved.
|
|
|
|
# otherwise, None otherwise?
|
2015-02-06 17:08:13 +01:00
|
|
|
self.state_group = state_group
|
|
|
|
|
2016-09-01 15:31:26 +02:00
|
|
|
self.prev_group = prev_group
|
2016-09-14 11:03:48 +02:00
|
|
|
self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None
|
2016-09-01 15:31:26 +02:00
|
|
|
|
2022-07-14 15:57:02 +02:00
|
|
|
async def get_state(
|
|
|
|
self,
|
|
|
|
state_storage: "StateStorageController",
|
|
|
|
state_filter: Optional["StateFilter"] = None,
|
|
|
|
) -> StateMap[str]:
|
|
|
|
"""Get the state map for this entry, either from the in-memory state or
|
|
|
|
looking up the state group in the DB.
|
|
|
|
"""
|
|
|
|
|
2022-07-15 13:06:41 +02:00
|
|
|
if self._state is not None:
|
|
|
|
return self._state
|
2022-07-14 15:57:02 +02:00
|
|
|
|
2022-07-21 17:02:02 +02:00
|
|
|
if self.state_group is not None:
|
|
|
|
return await state_storage.get_state_ids_for_group(
|
|
|
|
self.state_group, state_filter
|
|
|
|
)
|
|
|
|
|
|
|
|
assert self.prev_group is not None and self.delta_ids is not None
|
2022-07-14 15:57:02 +02:00
|
|
|
|
2022-07-21 17:02:02 +02:00
|
|
|
prev_state = await state_storage.get_state_ids_for_group(
|
|
|
|
self.prev_group, state_filter
|
2022-07-14 15:57:02 +02:00
|
|
|
)
|
2016-08-31 16:53:19 +02:00
|
|
|
|
2022-07-21 17:02:02 +02:00
|
|
|
# ChainMap expects MutableMapping, but since we're using it immutably
|
|
|
|
# its safe to give it immutable maps.
|
|
|
|
return ChainMap(self.delta_ids, prev_state) # type: ignore[arg-type]
|
|
|
|
|
|
|
|
def set_state_group(self, state_group: int) -> None:
|
|
|
|
"""Update the state group assigned to this state (e.g. after we've
|
|
|
|
persisted it).
|
|
|
|
|
|
|
|
Note: this will cause the cache entry to drop any stored state.
|
|
|
|
"""
|
|
|
|
|
|
|
|
self.state_group = state_group
|
|
|
|
|
|
|
|
# We clear out the state as we know longer need to explicitly keep it in
|
|
|
|
# the `state_cache` (as the store state group cache will do that).
|
|
|
|
self._state = None
|
|
|
|
|
2021-10-06 19:55:25 +02:00
|
|
|
def __len__(self) -> int:
|
2022-07-21 17:02:02 +02:00
|
|
|
# The len should be used to estimate how large this cache entry is, for
|
|
|
|
# cache eviction purposes. This is why it's fine to return 1 if we're
|
|
|
|
# not storing any state.
|
|
|
|
|
|
|
|
length = 0
|
2022-07-14 15:57:02 +02:00
|
|
|
|
2022-07-21 17:02:02 +02:00
|
|
|
if self._state:
|
|
|
|
length += len(self._state)
|
|
|
|
|
|
|
|
if self.delta_ids:
|
|
|
|
length += len(self.delta_ids)
|
|
|
|
|
|
|
|
return length or 1 # Make sure its not 0.
|
2017-01-16 14:48:04 +01:00
|
|
|
|
2015-02-06 17:08:13 +01:00
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class StateHandler:
|
2018-01-27 10:15:45 +01:00
|
|
|
"""Fetches bits of state from the stores, and does state resolution
|
|
|
|
where necessary
|
2014-08-12 16:10:52 +02:00
|
|
|
"""
|
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2015-02-06 17:08:13 +01:00
|
|
|
self.clock = hs.get_clock()
|
2022-02-23 12:04:02 +01:00
|
|
|
self.store = hs.get_datastores().main
|
2022-05-31 14:17:50 +02:00
|
|
|
self._state_storage_controller = hs.get_storage_controllers().state
|
2015-01-21 17:27:04 +01:00
|
|
|
self.hs = hs
|
2018-01-27 10:15:45 +01:00
|
|
|
self._state_resolution_handler = hs.get_state_resolution_handler()
|
2022-05-31 14:17:50 +02:00
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
2022-07-07 14:19:31 +02:00
|
|
|
self._events_shard_config = hs.config.worker.events_shard_config
|
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
|
|
|
|
self._update_current_state_client = (
|
|
|
|
ReplicationUpdateCurrentStateRestServlet.make_client(hs)
|
|
|
|
)
|
2015-02-06 17:52:22 +01:00
|
|
|
|
2022-07-15 14:20:47 +02:00
|
|
|
async def compute_state_after_events(
|
2020-08-24 20:25:27 +02:00
|
|
|
self,
|
|
|
|
room_id: str,
|
2022-07-15 14:20:47 +02:00
|
|
|
event_ids: Collection[str],
|
2022-07-18 11:05:30 +02:00
|
|
|
state_filter: Optional[StateFilter] = None,
|
2022-11-17 17:01:14 +01:00
|
|
|
await_full_state: bool = True,
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> StateMap[str]:
|
2022-07-15 14:20:47 +02:00
|
|
|
"""Fetch the state after each of the given event IDs. Resolve them and return.
|
|
|
|
|
|
|
|
This is typically used where `event_ids` is a collection of forward extremities
|
|
|
|
in a room, intended to become the `prev_events` of a new event E. If so, the
|
|
|
|
return value of this function represents the state before E.
|
2018-01-29 18:39:55 +01:00
|
|
|
|
|
|
|
Args:
|
2022-07-15 14:20:47 +02:00
|
|
|
room_id: the room_id containing the given events.
|
|
|
|
event_ids: the events whose state should be fetched and resolved.
|
2022-11-17 17:01:14 +01:00
|
|
|
await_full_state: if `True`, will block if we do not yet have complete state
|
|
|
|
at the given `event_id`s, regardless of whether `state_filter` is
|
|
|
|
satisfied by partial state.
|
2018-01-29 18:39:55 +01:00
|
|
|
|
|
|
|
Returns:
|
2022-07-15 14:20:47 +02:00
|
|
|
the state dict (a mapping from (event_type, state_key) -> event_id) which
|
|
|
|
holds the resolution of the states after the given event IDs.
|
2018-01-29 18:39:55 +01:00
|
|
|
"""
|
2022-07-15 14:20:47 +02:00
|
|
|
logger.debug("calling resolve_state_groups from compute_state_after_events")
|
2022-11-17 17:01:14 +01:00
|
|
|
ret = await self.resolve_state_groups_for_events(
|
|
|
|
room_id, event_ids, await_full_state
|
|
|
|
)
|
2022-07-18 11:05:30 +02:00
|
|
|
return await ret.get_state(self._state_storage_controller, state_filter)
|
2016-08-25 18:32:22 +02:00
|
|
|
|
2022-08-23 11:49:59 +02:00
|
|
|
async def get_current_user_ids_in_room(
|
2021-05-05 17:49:34 +02:00
|
|
|
self, room_id: str, latest_event_ids: List[str]
|
2022-08-23 11:49:59 +02:00
|
|
|
) -> Set[str]:
|
2019-04-03 15:32:20 +02:00
|
|
|
"""
|
2022-08-23 11:49:59 +02:00
|
|
|
Get the users IDs who are currently in a room.
|
2019-04-03 15:32:20 +02:00
|
|
|
|
2021-05-05 17:49:34 +02:00
|
|
|
Note: This is much slower than using the equivalent method
|
|
|
|
`DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`,
|
|
|
|
so this should only be used when wanting the users at a particular point
|
|
|
|
in the room.
|
|
|
|
|
2019-04-03 15:32:20 +02:00
|
|
|
Args:
|
2020-07-24 16:59:51 +02:00
|
|
|
room_id: The ID of the room.
|
|
|
|
latest_event_ids: Precomputed list of latest event IDs. Will be computed if None.
|
2019-04-03 15:32:20 +02:00
|
|
|
Returns:
|
2022-08-23 11:49:59 +02:00
|
|
|
Set of user IDs in the room.
|
2019-04-03 15:32:20 +02:00
|
|
|
"""
|
2021-05-05 17:49:34 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
assert latest_event_ids is not None
|
|
|
|
|
2022-08-23 11:49:59 +02:00
|
|
|
logger.debug("calling resolve_state_groups from get_current_user_ids_in_room")
|
2020-07-24 16:59:51 +02:00
|
|
|
entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
|
2022-07-14 15:57:02 +02:00
|
|
|
state = await entry.get_state(self._state_storage_controller, StateFilter.all())
|
2022-08-31 13:19:39 +02:00
|
|
|
return await self.store.get_joined_user_ids_from_state(room_id, state)
|
2016-08-26 15:54:30 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
async def get_hosts_in_room_at_events(
|
2022-03-04 11:25:18 +01:00
|
|
|
self, room_id: str, event_ids: Collection[str]
|
2022-05-17 16:29:06 +02:00
|
|
|
) -> FrozenSet[str]:
|
2019-10-03 18:47:20 +02:00
|
|
|
"""Get the hosts that were in a room at the given event ids
|
|
|
|
|
|
|
|
Args:
|
2020-08-24 20:25:27 +02:00
|
|
|
room_id:
|
|
|
|
event_ids:
|
2019-10-03 18:47:20 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
The hosts in the room at the given events
|
2019-10-03 18:47:20 +02:00
|
|
|
"""
|
2020-07-24 16:59:51 +02:00
|
|
|
entry = await self.resolve_state_groups_for_events(room_id, event_ids)
|
2022-07-14 15:57:02 +02:00
|
|
|
state = await entry.get_state(self._state_storage_controller, StateFilter.all())
|
|
|
|
return await self.store.get_joined_hosts(room_id, state, entry)
|
2017-05-02 11:36:35 +02:00
|
|
|
|
2020-07-24 16:59:51 +02:00
|
|
|
async def compute_event_context(
|
2022-03-01 13:49:54 +01:00
|
|
|
self,
|
|
|
|
event: EventBase,
|
2022-05-26 11:48:12 +02:00
|
|
|
state_ids_before_event: Optional[StateMap[str]] = None,
|
2022-07-26 13:39:23 +02:00
|
|
|
partial_state: Optional[bool] = None,
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> EventContext:
|
2021-09-22 18:58:57 +02:00
|
|
|
"""Build an EventContext structure for a non-outlier event.
|
|
|
|
|
|
|
|
(for an outlier, call EventContext.for_outlier directly)
|
2014-12-16 14:07:48 +01:00
|
|
|
|
2018-02-06 15:31:24 +01:00
|
|
|
This works out what the current state should be for the event, and
|
|
|
|
generates a new state group if necessary.
|
|
|
|
|
2014-12-16 14:07:48 +01:00
|
|
|
Args:
|
2019-11-06 11:01:39 +01:00
|
|
|
event:
|
2022-05-26 11:48:12 +02:00
|
|
|
state_ids_before_event: The event ids of the state before the event if
|
|
|
|
it can't be calculated from existing events. This is normally
|
|
|
|
only specified when receiving an event from federation where we
|
|
|
|
don't have the prev events, e.g. when backfilling.
|
2022-07-26 13:39:23 +02:00
|
|
|
partial_state:
|
|
|
|
`True` if `state_ids_before_event` is partial and omits non-critical
|
|
|
|
membership events.
|
|
|
|
`False` if `state_ids_before_event` is the full state.
|
|
|
|
`None` when `state_ids_before_event` is not provided. In this case, the
|
|
|
|
flag will be calculated based on `event`'s prev events.
|
2014-12-16 16:59:17 +01:00
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
The event context.
|
2022-08-01 14:53:56 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
RuntimeError if `state_ids_before_event` is not provided and one or more
|
|
|
|
prev events are missing or outliers.
|
2014-12-16 14:07:48 +01:00
|
|
|
"""
|
2014-12-16 16:59:17 +01:00
|
|
|
|
2021-09-22 18:58:57 +02:00
|
|
|
assert not event.internal_metadata.is_outlier()
|
2014-12-08 10:08:26 +01:00
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
#
|
2022-05-26 11:48:12 +02:00
|
|
|
# first of all, figure out the state before the event, unless we
|
|
|
|
# already have it.
|
2019-11-06 11:01:39 +01:00
|
|
|
#
|
2022-05-26 11:48:12 +02:00
|
|
|
if state_ids_before_event:
|
2019-11-06 11:01:39 +01:00
|
|
|
# if we're given the state before the event, then we use that
|
|
|
|
state_group_before_event_prev_group = None
|
|
|
|
deltas_to_state_group_before_event = None
|
2022-07-14 15:57:02 +02:00
|
|
|
|
|
|
|
# .. though we need to get a state group for it.
|
|
|
|
state_group_before_event = (
|
|
|
|
await self._state_storage_controller.store_state_group(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
prev_group=None,
|
|
|
|
delta_ids=None,
|
|
|
|
current_state_ids=state_ids_before_event,
|
|
|
|
)
|
|
|
|
)
|
2014-12-11 14:25:19 +01:00
|
|
|
|
2022-07-26 13:39:23 +02:00
|
|
|
# the partial_state flag must be provided
|
|
|
|
assert partial_state is not None
|
2019-11-06 11:01:39 +01:00
|
|
|
else:
|
|
|
|
# otherwise, we'll need to resolve the state across the prev_events.
|
2018-07-23 13:17:16 +02:00
|
|
|
|
2022-03-01 13:49:54 +01:00
|
|
|
# partial_state should not be set explicitly in this case:
|
|
|
|
# we work it out dynamically
|
2022-07-26 13:39:23 +02:00
|
|
|
assert partial_state is None
|
2022-03-01 13:49:54 +01:00
|
|
|
|
|
|
|
# if any of the prev-events have partial state, so do we.
|
|
|
|
# (This is slightly racy - the prev-events might get fixed up before we use
|
|
|
|
# their states - but I don't think that really matters; it just means we
|
|
|
|
# might redundantly recalculate the state for this event later.)
|
|
|
|
prev_event_ids = event.prev_event_ids()
|
|
|
|
incomplete_prev_events = await self.store.get_partial_state_events(
|
|
|
|
prev_event_ids
|
|
|
|
)
|
2022-07-26 13:39:23 +02:00
|
|
|
partial_state = any(incomplete_prev_events.values())
|
|
|
|
if partial_state:
|
2022-03-01 13:49:54 +01:00
|
|
|
logger.debug(
|
|
|
|
"New/incoming event %s refers to prev_events %s with partial state",
|
|
|
|
event.event_id,
|
|
|
|
[k for (k, v) in incomplete_prev_events.items() if v],
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.debug("calling resolve_state_groups from compute_event_context")
|
2022-07-01 11:19:27 +02:00
|
|
|
# we've already taken into account partial state, so no need to wait for
|
|
|
|
# complete state here.
|
2020-07-24 16:59:51 +02:00
|
|
|
entry = await self.resolve_state_groups_for_events(
|
2022-07-01 11:19:27 +02:00
|
|
|
event.room_id,
|
|
|
|
event.prev_event_ids(),
|
|
|
|
await_full_state=False,
|
2018-02-06 15:31:24 +01:00
|
|
|
)
|
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
state_group_before_event_prev_group = entry.prev_group
|
|
|
|
deltas_to_state_group_before_event = entry.delta_ids
|
2022-07-15 14:59:45 +02:00
|
|
|
state_ids_before_event = None
|
2014-12-08 10:08:26 +01:00
|
|
|
|
2022-07-14 15:57:02 +02:00
|
|
|
# We make sure that we have a state group assigned to the state.
|
|
|
|
if entry.state_group is None:
|
2022-07-15 14:59:45 +02:00
|
|
|
# store_state_group requires us to have either a previous state group
|
|
|
|
# (with deltas) or the complete state map. So, if we don't have a
|
|
|
|
# previous state group, load the complete state map now.
|
|
|
|
if state_group_before_event_prev_group is None:
|
|
|
|
state_ids_before_event = await entry.get_state(
|
|
|
|
self._state_storage_controller, StateFilter.all()
|
|
|
|
)
|
|
|
|
|
2022-07-14 15:57:02 +02:00
|
|
|
state_group_before_event = (
|
|
|
|
await self._state_storage_controller.store_state_group(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
prev_group=state_group_before_event_prev_group,
|
|
|
|
delta_ids=deltas_to_state_group_before_event,
|
|
|
|
current_state_ids=state_ids_before_event,
|
|
|
|
)
|
2022-05-31 14:17:50 +02:00
|
|
|
)
|
2022-07-21 17:02:02 +02:00
|
|
|
entry.set_state_group(state_group_before_event)
|
2022-07-14 15:57:02 +02:00
|
|
|
else:
|
|
|
|
state_group_before_event = entry.state_group
|
2019-11-06 11:01:39 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# now if it's not a state event, we're done
|
|
|
|
#
|
|
|
|
|
|
|
|
if not event.is_state():
|
|
|
|
return EventContext.with_state(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage=self._storage_controllers,
|
2019-11-06 11:01:39 +01:00
|
|
|
state_group_before_event=state_group_before_event,
|
|
|
|
state_group=state_group_before_event,
|
2022-05-10 21:43:13 +02:00
|
|
|
state_delta_due_to_event={},
|
2019-11-06 11:01:39 +01:00
|
|
|
prev_group=state_group_before_event_prev_group,
|
|
|
|
delta_ids=deltas_to_state_group_before_event,
|
2022-03-01 13:49:54 +01:00
|
|
|
partial_state=partial_state,
|
2019-11-06 11:01:39 +01:00
|
|
|
)
|
2016-08-25 14:28:31 +02:00
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
#
|
|
|
|
# otherwise, we'll need to create a new state group for after the event
|
|
|
|
#
|
2014-12-05 17:20:48 +01:00
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
key = (event.type, event.state_key)
|
2016-09-01 15:31:26 +02:00
|
|
|
|
2022-07-15 14:59:45 +02:00
|
|
|
if state_ids_before_event is not None:
|
|
|
|
replaces = state_ids_before_event.get(key)
|
|
|
|
else:
|
|
|
|
replaces_state_map = await entry.get_state(
|
|
|
|
self._state_storage_controller, StateFilter.from_types([key])
|
|
|
|
)
|
|
|
|
replaces = replaces_state_map.get(key)
|
|
|
|
|
|
|
|
if replaces and replaces != event.event_id:
|
|
|
|
event.unsigned["replaces_state"] = replaces
|
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
delta_ids = {key: event.event_id}
|
|
|
|
|
2022-05-31 14:17:50 +02:00
|
|
|
state_group_after_event = (
|
|
|
|
await self._state_storage_controller.store_state_group(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
prev_group=state_group_before_event,
|
|
|
|
delta_ids=delta_ids,
|
2022-07-15 14:59:45 +02:00
|
|
|
current_state_ids=None,
|
2022-05-31 14:17:50 +02:00
|
|
|
)
|
2018-07-23 13:17:16 +02:00
|
|
|
)
|
2016-09-01 15:31:26 +02:00
|
|
|
|
2019-11-06 11:01:39 +01:00
|
|
|
return EventContext.with_state(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage=self._storage_controllers,
|
2019-11-06 11:01:39 +01:00
|
|
|
state_group=state_group_after_event,
|
|
|
|
state_group_before_event=state_group_before_event,
|
2022-05-10 21:43:13 +02:00
|
|
|
state_delta_due_to_event=delta_ids,
|
2019-11-06 11:01:39 +01:00
|
|
|
prev_group=state_group_before_event,
|
|
|
|
delta_ids=delta_ids,
|
2022-03-01 13:49:54 +01:00
|
|
|
partial_state=partial_state,
|
2019-11-06 11:01:39 +01:00
|
|
|
)
|
2014-12-05 17:20:48 +01:00
|
|
|
|
2022-09-28 11:39:03 +02:00
|
|
|
async def compute_event_context_for_batched(
|
|
|
|
self,
|
|
|
|
event: EventBase,
|
|
|
|
state_ids_before_event: StateMap[str],
|
|
|
|
current_state_group: int,
|
|
|
|
) -> EventContext:
|
|
|
|
"""
|
|
|
|
Generate an event context for an event that has not yet been persisted to the
|
|
|
|
database. Intended for use with events that are created to be persisted in a batch.
|
|
|
|
Args:
|
|
|
|
event: the event the context is being computed for
|
|
|
|
state_ids_before_event: a state map consisting of the state ids of the events
|
|
|
|
created prior to this event.
|
|
|
|
current_state_group: the current state group before the event.
|
|
|
|
"""
|
|
|
|
state_group_before_event_prev_group = None
|
|
|
|
deltas_to_state_group_before_event = None
|
|
|
|
|
|
|
|
state_group_before_event = current_state_group
|
|
|
|
|
|
|
|
# if the event is not state, we are set
|
|
|
|
if not event.is_state():
|
|
|
|
return EventContext.with_state(
|
|
|
|
storage=self._storage_controllers,
|
|
|
|
state_group_before_event=state_group_before_event,
|
|
|
|
state_group=state_group_before_event,
|
|
|
|
state_delta_due_to_event={},
|
|
|
|
prev_group=state_group_before_event_prev_group,
|
|
|
|
delta_ids=deltas_to_state_group_before_event,
|
|
|
|
partial_state=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
# otherwise, we'll need to create a new state group for after the event
|
|
|
|
key = (event.type, event.state_key)
|
|
|
|
|
|
|
|
if state_ids_before_event is not None:
|
|
|
|
replaces = state_ids_before_event.get(key)
|
|
|
|
|
|
|
|
if replaces and replaces != event.event_id:
|
|
|
|
event.unsigned["replaces_state"] = replaces
|
|
|
|
|
|
|
|
delta_ids = {key: event.event_id}
|
|
|
|
|
|
|
|
state_group_after_event = (
|
|
|
|
await self._state_storage_controller.store_state_group(
|
|
|
|
event.event_id,
|
|
|
|
event.room_id,
|
|
|
|
prev_group=state_group_before_event,
|
|
|
|
delta_ids=delta_ids,
|
|
|
|
current_state_ids=None,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
return EventContext.with_state(
|
|
|
|
storage=self._storage_controllers,
|
|
|
|
state_group=state_group_after_event,
|
|
|
|
state_group_before_event=state_group_before_event,
|
|
|
|
state_delta_due_to_event=delta_ids,
|
|
|
|
prev_group=state_group_before_event,
|
|
|
|
delta_ids=delta_ids,
|
|
|
|
partial_state=False,
|
|
|
|
)
|
|
|
|
|
2019-10-03 18:47:20 +02:00
|
|
|
@measure_func()
|
2020-08-24 20:25:27 +02:00
|
|
|
async def resolve_state_groups_for_events(
|
2022-07-01 11:19:27 +02:00
|
|
|
self, room_id: str, event_ids: Collection[str], await_full_state: bool = True
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> _StateCacheEntry:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Given a list of event_ids this method fetches the state at each
|
2014-11-11 18:45:46 +01:00
|
|
|
event, resolves conflicts between them and returns them.
|
|
|
|
|
2018-01-19 18:33:22 +01:00
|
|
|
Args:
|
2020-08-24 20:25:27 +02:00
|
|
|
room_id
|
|
|
|
event_ids
|
2022-07-01 11:19:27 +02:00
|
|
|
await_full_state: if true, will block if we do not yet have complete
|
|
|
|
state at these events.
|
2018-01-19 18:33:22 +01:00
|
|
|
|
2016-04-01 17:08:59 +02:00
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
The resolved state
|
2022-08-01 14:53:56 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
RuntimeError if we don't have a state group for one or more of the events
|
|
|
|
(ie. they are outliers or unknown)
|
2014-11-11 18:45:46 +01:00
|
|
|
"""
|
2015-01-30 14:34:01 +01:00
|
|
|
logger.debug("resolve_state_groups event_ids %s", event_ids)
|
|
|
|
|
2022-05-31 14:17:50 +02:00
|
|
|
state_groups = await self._state_storage_controller.get_state_group_for_events(
|
2022-07-01 11:19:27 +02:00
|
|
|
event_ids, await_full_state=await_full_state
|
2022-05-31 14:17:50 +02:00
|
|
|
)
|
2014-12-08 15:50:48 +01:00
|
|
|
|
2022-05-18 19:15:52 +02:00
|
|
|
state_group_ids = state_groups.values()
|
2017-05-25 18:08:41 +02:00
|
|
|
|
2022-05-18 19:15:52 +02:00
|
|
|
# check if each event has same state group id, if so there's no state to resolve
|
|
|
|
state_group_ids_set = set(state_group_ids)
|
|
|
|
if len(state_group_ids_set) == 1:
|
|
|
|
(state_group_id,) = state_group_ids_set
|
2022-05-31 14:17:50 +02:00
|
|
|
(
|
|
|
|
prev_group,
|
|
|
|
delta_ids,
|
|
|
|
) = await self._state_storage_controller.get_state_group_delta(
|
2022-05-18 19:15:52 +02:00
|
|
|
state_group_id
|
|
|
|
)
|
2019-07-23 15:00:55 +02:00
|
|
|
return _StateCacheEntry(
|
2022-07-14 15:57:02 +02:00
|
|
|
state=None,
|
2022-05-18 19:15:52 +02:00
|
|
|
state_group=state_group_id,
|
2019-07-23 15:00:55 +02:00
|
|
|
prev_group=prev_group,
|
|
|
|
delta_ids=delta_ids,
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2022-05-18 19:15:52 +02:00
|
|
|
elif len(state_group_ids_set) == 0:
|
|
|
|
return _StateCacheEntry(state={}, state_group=None)
|
2016-03-22 19:02:36 +01:00
|
|
|
|
2020-07-24 16:59:51 +02:00
|
|
|
room_version = await self.store.get_room_version_id(room_id)
|
2018-08-08 18:01:57 +02:00
|
|
|
|
2022-05-31 14:17:50 +02:00
|
|
|
state_to_resolve = await self._state_storage_controller.get_state_for_groups(
|
2022-05-18 19:15:52 +02:00
|
|
|
state_group_ids_set
|
|
|
|
)
|
|
|
|
|
2020-07-24 16:59:51 +02:00
|
|
|
result = await self._state_resolution_handler.resolve_state_groups(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id,
|
|
|
|
room_version,
|
2022-05-18 19:15:52 +02:00
|
|
|
state_to_resolve,
|
2019-06-20 11:32:02 +02:00
|
|
|
None,
|
2018-09-03 16:13:17 +02:00
|
|
|
state_res_store=StateResolutionStore(self.store),
|
2018-01-27 10:15:45 +01:00
|
|
|
)
|
2019-07-23 15:00:55 +02:00
|
|
|
return result
|
2018-01-27 10:15:45 +01:00
|
|
|
|
2022-07-07 14:19:31 +02:00
|
|
|
async def update_current_state(self, room_id: str) -> None:
|
|
|
|
"""Recalculates the current state for a room, and persists it.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError(502): if all attempts to connect to the event persister worker
|
|
|
|
fail
|
|
|
|
"""
|
|
|
|
writer_instance = self._events_shard_config.get_instance(room_id)
|
|
|
|
if writer_instance != self._instance_name:
|
|
|
|
await self._update_current_state_client(
|
|
|
|
instance_name=writer_instance,
|
|
|
|
room_id=room_id,
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
assert self._storage_controllers.persistence is not None
|
|
|
|
await self._storage_controllers.persistence.update_current_state(room_id)
|
|
|
|
|
2018-01-27 10:15:45 +01:00
|
|
|
|
2022-01-13 14:49:28 +01:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-09-29 14:07:45 +02:00
|
|
|
class _StateResMetrics:
|
|
|
|
"""Keeps track of some usage metrics about state res."""
|
|
|
|
|
|
|
|
# System and User CPU time, in seconds
|
2022-01-13 14:49:28 +01:00
|
|
|
cpu_time: float = 0.0
|
2020-09-29 14:07:45 +02:00
|
|
|
|
|
|
|
# time spent on database transactions (excluding scheduling time). This roughly
|
|
|
|
# corresponds to the amount of work done on the db server, excluding event fetches.
|
2022-01-13 14:49:28 +01:00
|
|
|
db_time: float = 0.0
|
2020-09-29 14:07:45 +02:00
|
|
|
|
|
|
|
# number of events fetched from the db.
|
2022-01-13 14:49:28 +01:00
|
|
|
db_events: int = 0
|
2020-09-29 14:07:45 +02:00
|
|
|
|
|
|
|
|
|
|
|
_biggest_room_by_cpu_counter = Counter(
|
|
|
|
"synapse_state_res_cpu_for_biggest_room_seconds",
|
|
|
|
"CPU time spent performing state resolution for the single most expensive "
|
|
|
|
"room for state resolution",
|
|
|
|
)
|
|
|
|
_biggest_room_by_db_counter = Counter(
|
|
|
|
"synapse_state_res_db_for_biggest_room_seconds",
|
|
|
|
"Database time spent performing state resolution for the single most "
|
|
|
|
"expensive room for state resolution",
|
|
|
|
)
|
|
|
|
|
2022-06-15 16:19:49 +02:00
|
|
|
_cpu_times = Histogram(
|
|
|
|
"synapse_state_res_cpu_for_all_rooms_seconds",
|
|
|
|
"CPU time (utime+stime) spent computing a single state resolution",
|
|
|
|
)
|
|
|
|
_db_times = Histogram(
|
|
|
|
"synapse_state_res_db_for_all_rooms_seconds",
|
|
|
|
"Database time spent computing a single state resolution",
|
|
|
|
)
|
|
|
|
|
2020-09-29 14:07:45 +02:00
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class StateResolutionHandler:
|
2018-01-27 10:15:45 +01:00
|
|
|
"""Responsible for doing state conflict resolution.
|
|
|
|
|
|
|
|
Note that the storage layer depends on this handler, so all functions must
|
|
|
|
be storage-independent.
|
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2018-01-27 10:15:45 +01:00
|
|
|
self.clock = hs.get_clock()
|
|
|
|
|
|
|
|
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
|
|
|
|
|
2020-09-23 17:42:44 +02:00
|
|
|
# dict of set of event_ids -> _StateCacheEntry.
|
2021-07-15 12:02:43 +02:00
|
|
|
self._state_cache: ExpiringCache[
|
|
|
|
FrozenSet[int], _StateCacheEntry
|
|
|
|
] = ExpiringCache(
|
2018-01-27 10:15:45 +01:00
|
|
|
cache_name="state_cache",
|
|
|
|
clock=self.clock,
|
2020-05-11 19:45:23 +02:00
|
|
|
max_len=100000,
|
2018-01-27 10:15:45 +01:00
|
|
|
expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000,
|
|
|
|
iterable=True,
|
|
|
|
reset_expiry_on_get=True,
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2018-01-27 10:15:45 +01:00
|
|
|
|
2020-09-29 14:07:45 +02:00
|
|
|
#
|
|
|
|
# stuff for tracking time spent on state-res by room
|
|
|
|
#
|
|
|
|
|
|
|
|
# tracks the amount of work done on state res per room
|
2021-07-15 12:02:43 +02:00
|
|
|
self._state_res_metrics: DefaultDict[str, _StateResMetrics] = defaultdict(
|
2020-09-29 14:07:45 +02:00
|
|
|
_StateResMetrics
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2020-09-29 14:07:45 +02:00
|
|
|
|
|
|
|
self.clock.looping_call(self._report_metrics, 120 * 1000)
|
|
|
|
|
2020-07-24 16:59:51 +02:00
|
|
|
async def resolve_state_groups(
|
2020-08-24 20:25:27 +02:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
room_version: str,
|
2021-10-27 18:27:23 +02:00
|
|
|
state_groups_ids: Mapping[int, StateMap[str]],
|
2020-08-24 20:25:27 +02:00
|
|
|
event_map: Optional[Dict[str, EventBase]],
|
|
|
|
state_res_store: "StateResolutionStore",
|
2021-04-06 14:58:18 +02:00
|
|
|
) -> _StateCacheEntry:
|
2018-01-27 10:15:45 +01:00
|
|
|
"""Resolves conflicts between a set of state groups
|
|
|
|
|
|
|
|
Always generates a new state group (unless we hit the cache), so should
|
|
|
|
not be called for a single state group
|
|
|
|
|
|
|
|
Args:
|
2020-08-24 20:25:27 +02:00
|
|
|
room_id: room we are resolving for (used for logging and sanity checks)
|
|
|
|
room_version: version of the room
|
|
|
|
state_groups_ids:
|
|
|
|
A map from state group id to the state in that state group
|
2018-01-27 10:15:45 +01:00
|
|
|
(where 'state' is a map from state key to event id)
|
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
event_map:
|
2018-01-27 16:40:41 +01:00
|
|
|
a dict from event_id to event, for any events that we happen to
|
|
|
|
have in flight (eg, those currently being persisted). This will be
|
2020-10-23 18:38:40 +02:00
|
|
|
used as a starting point for finding the state we need; any missing
|
2018-09-03 16:13:17 +02:00
|
|
|
events will be requested via state_res_store.
|
|
|
|
|
|
|
|
If None, all events will be fetched via state_res_store.
|
2018-01-27 16:40:41 +01:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
state_res_store
|
2018-01-27 16:40:41 +01:00
|
|
|
|
2018-01-27 10:15:45 +01:00
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
The resolved state
|
2018-01-27 10:15:45 +01:00
|
|
|
"""
|
|
|
|
group_names = frozenset(state_groups_ids.keys())
|
|
|
|
|
2022-04-05 16:43:52 +02:00
|
|
|
async with self.resolve_linearizer.queue(group_names):
|
2020-09-23 17:42:44 +02:00
|
|
|
cache = self._state_cache.get(group_names, None)
|
|
|
|
if cache:
|
|
|
|
return cache
|
2014-11-11 15:16:41 +01:00
|
|
|
|
2016-09-01 15:55:03 +02:00
|
|
|
logger.info(
|
2021-02-16 23:32:34 +01:00
|
|
|
"Resolving state for %s with groups %s",
|
|
|
|
room_id,
|
|
|
|
list(group_names),
|
2016-09-01 15:55:03 +02:00
|
|
|
)
|
2015-02-13 15:20:05 +01:00
|
|
|
|
2019-06-18 14:01:52 +02:00
|
|
|
state_groups_histogram.observe(len(state_groups_ids))
|
|
|
|
|
2020-09-29 14:07:09 +02:00
|
|
|
new_state = await self.resolve_events_with_store(
|
|
|
|
room_id,
|
|
|
|
room_version,
|
|
|
|
list(state_groups_ids.values()),
|
|
|
|
event_map=event_map,
|
|
|
|
state_res_store=state_res_store,
|
|
|
|
)
|
2016-09-01 15:55:03 +02:00
|
|
|
|
2018-07-23 20:00:16 +02:00
|
|
|
# if the new state matches any of the input state groups, we can
|
|
|
|
# use that state group again. Otherwise we will generate a state_id
|
|
|
|
# which will be used as a cache key for future resolutions, but
|
|
|
|
# not get persisted.
|
2018-03-28 12:03:52 +02:00
|
|
|
|
2018-07-23 20:00:16 +02:00
|
|
|
with Measure(self.clock, "state.create_group_ids"):
|
|
|
|
cache = _make_state_cache_entry(new_state, state_groups_ids)
|
2016-04-20 12:39:54 +02:00
|
|
|
|
2020-09-23 17:42:44 +02:00
|
|
|
self._state_cache[group_names] = cache
|
2015-02-13 15:20:05 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return cache
|
2015-02-13 15:20:05 +01:00
|
|
|
|
2020-09-29 14:07:09 +02:00
|
|
|
async def resolve_events_with_store(
|
2020-09-28 16:20:02 +02:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
room_version: str,
|
|
|
|
state_sets: Sequence[StateMap[str]],
|
|
|
|
event_map: Optional[Dict[str, EventBase]],
|
|
|
|
state_res_store: "StateResolutionStore",
|
2020-09-29 14:07:09 +02:00
|
|
|
) -> StateMap[str]:
|
2020-09-28 16:20:02 +02:00
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
room_id: the room we are working in
|
|
|
|
|
|
|
|
room_version: Version of the room
|
|
|
|
|
|
|
|
state_sets: List of dicts of (type, state_key) -> event_id,
|
|
|
|
which are the different state groups to resolve.
|
|
|
|
|
|
|
|
event_map:
|
|
|
|
a dict from event_id to event, for any events that we happen to
|
|
|
|
have in flight (eg, those currently being persisted). This will be
|
2021-02-12 17:01:48 +01:00
|
|
|
used as a starting point for finding the state we need; any missing
|
2020-09-28 16:20:02 +02:00
|
|
|
events will be requested via state_map_factory.
|
|
|
|
|
|
|
|
If None, all events will be fetched via state_res_store.
|
|
|
|
|
|
|
|
state_res_store: a place to fetch events from
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
a map from (type, state_key) to event_id.
|
|
|
|
"""
|
2020-09-29 14:07:45 +02:00
|
|
|
try:
|
|
|
|
with Measure(self.clock, "state._resolve_events") as m:
|
2021-07-26 18:17:00 +02:00
|
|
|
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
|
|
|
if room_version_obj.state_res == StateResolutionVersions.V1:
|
2020-09-29 14:07:45 +02:00
|
|
|
return await v1.resolve_events_with_store(
|
2021-07-26 18:17:00 +02:00
|
|
|
room_id,
|
|
|
|
room_version_obj,
|
|
|
|
state_sets,
|
|
|
|
event_map,
|
|
|
|
state_res_store.get_events,
|
2020-09-29 14:07:45 +02:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
return await v2.resolve_events_with_store(
|
|
|
|
self.clock,
|
|
|
|
room_id,
|
2021-07-26 18:17:00 +02:00
|
|
|
room_version_obj,
|
2020-09-29 14:07:45 +02:00
|
|
|
state_sets,
|
|
|
|
event_map,
|
|
|
|
state_res_store,
|
|
|
|
)
|
|
|
|
finally:
|
|
|
|
self._record_state_res_metrics(room_id, m.get_resource_usage())
|
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
def _record_state_res_metrics(
|
|
|
|
self, room_id: str, rusage: ContextResourceUsage
|
|
|
|
) -> None:
|
2020-09-29 14:07:45 +02:00
|
|
|
room_metrics = self._state_res_metrics[room_id]
|
|
|
|
room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime
|
|
|
|
room_metrics.db_time += rusage.db_txn_duration_sec
|
|
|
|
room_metrics.db_events += rusage.evt_db_fetch_count
|
|
|
|
|
2022-06-15 16:19:49 +02:00
|
|
|
_cpu_times.observe(rusage.ru_utime + rusage.ru_stime)
|
|
|
|
_db_times.observe(rusage.db_txn_duration_sec)
|
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
def _report_metrics(self) -> None:
|
2020-09-29 14:07:45 +02:00
|
|
|
if not self._state_res_metrics:
|
|
|
|
# no state res has happened since the last iteration: don't bother logging.
|
|
|
|
return
|
|
|
|
|
|
|
|
self._report_biggest(
|
2021-02-16 23:32:34 +01:00
|
|
|
lambda i: i.cpu_time,
|
|
|
|
"CPU time",
|
|
|
|
_biggest_room_by_cpu_counter,
|
2020-09-29 14:07:45 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
self._report_biggest(
|
2021-02-16 23:32:34 +01:00
|
|
|
lambda i: i.db_time,
|
|
|
|
"DB time",
|
|
|
|
_biggest_room_by_db_counter,
|
2020-09-29 14:07:45 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
self._state_res_metrics.clear()
|
|
|
|
|
|
|
|
def _report_biggest(
|
|
|
|
self,
|
|
|
|
extract_key: Callable[[_StateResMetrics], Any],
|
|
|
|
metric_name: str,
|
|
|
|
prometheus_counter_metric: Counter,
|
|
|
|
) -> None:
|
|
|
|
"""Report metrics on the biggest rooms for state res
|
|
|
|
|
|
|
|
Args:
|
|
|
|
extract_key: a callable which, given a _StateResMetrics, extracts a single
|
|
|
|
metric to sort by.
|
|
|
|
metric_name: the name of the metric we have extracted, for the log line
|
|
|
|
prometheus_counter_metric: a prometheus metric recording the sum of the
|
|
|
|
the extracted metric
|
|
|
|
"""
|
|
|
|
n_to_log = 10
|
|
|
|
if not metrics_logger.isEnabledFor(logging.DEBUG):
|
|
|
|
# only need the most expensive if we don't have debug logging, which
|
|
|
|
# allows nlargest() to degrade to max()
|
|
|
|
n_to_log = 1
|
|
|
|
|
|
|
|
items = self._state_res_metrics.items()
|
|
|
|
|
|
|
|
# log the N biggest rooms
|
2021-07-15 12:02:43 +02:00
|
|
|
biggest: List[Tuple[str, _StateResMetrics]] = heapq.nlargest(
|
2020-09-29 14:07:45 +02:00
|
|
|
n_to_log, items, key=lambda i: extract_key(i[1])
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2020-09-29 14:07:45 +02:00
|
|
|
metrics_logger.debug(
|
|
|
|
"%i biggest rooms for state-res by %s: %s",
|
|
|
|
len(biggest),
|
|
|
|
metric_name,
|
|
|
|
["%s (%gs)" % (r, extract_key(m)) for (r, m) in biggest],
|
|
|
|
)
|
|
|
|
|
|
|
|
# report info on the single biggest to prometheus
|
|
|
|
_, biggest_metrics = biggest[0]
|
|
|
|
prometheus_counter_metric.inc(extract_key(biggest_metrics))
|
2020-09-28 16:20:02 +02:00
|
|
|
|
2015-02-13 15:20:05 +01:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
def _make_state_cache_entry(
|
2021-10-27 18:27:23 +02:00
|
|
|
new_state: StateMap[str], state_groups_ids: Mapping[int, StateMap[str]]
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> _StateCacheEntry:
|
2018-07-23 20:00:16 +02:00
|
|
|
"""Given a resolved state, and a set of input state groups, pick one to base
|
|
|
|
a new state group on (if any), and return an appropriately-constructed
|
|
|
|
_StateCacheEntry.
|
|
|
|
|
|
|
|
Args:
|
2020-08-24 20:25:27 +02:00
|
|
|
new_state: resolved state map (mapping from (type, state_key) to event_id)
|
2018-07-23 20:00:16 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
state_groups_ids:
|
|
|
|
map from state group id to the state in that state group (where
|
|
|
|
'state' is a map from state key to event id)
|
2018-07-23 20:00:16 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
The cache entry.
|
2018-07-23 20:00:16 +02:00
|
|
|
"""
|
|
|
|
# if the new state matches any of the input state groups, we can
|
|
|
|
# use that state group again. Otherwise we will generate a state_id
|
|
|
|
# which will be used as a cache key for future resolutions, but
|
|
|
|
# not get persisted.
|
|
|
|
|
|
|
|
# first look for exact matches
|
2020-06-15 13:03:36 +02:00
|
|
|
new_state_event_ids = set(new_state.values())
|
|
|
|
for sg, state in state_groups_ids.items():
|
2018-07-23 20:00:16 +02:00
|
|
|
if len(new_state_event_ids) != len(state):
|
|
|
|
continue
|
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
old_state_event_ids = set(state.values())
|
2018-07-23 20:00:16 +02:00
|
|
|
if new_state_event_ids == old_state_event_ids:
|
|
|
|
# got an exact match.
|
2022-07-21 17:02:02 +02:00
|
|
|
return _StateCacheEntry(state=None, state_group=sg)
|
2018-07-23 20:00:16 +02:00
|
|
|
|
|
|
|
# TODO: We want to create a state group for this set of events, to
|
|
|
|
# increase cache hits, but we need to make sure that it doesn't
|
|
|
|
# end up as a prev_group without being added to the database
|
|
|
|
|
|
|
|
# failing that, look for the closest match.
|
|
|
|
prev_group = None
|
2021-07-15 12:02:43 +02:00
|
|
|
delta_ids: Optional[StateMap[str]] = None
|
2018-07-23 20:00:16 +02:00
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
for old_group, old_state in state_groups_ids.items():
|
2022-07-15 13:06:41 +02:00
|
|
|
if old_state.keys() - new_state.keys():
|
|
|
|
# Currently we don't support deltas that remove keys from the state
|
|
|
|
# map, so we have to ignore this group as a candidate to base the
|
|
|
|
# new group on.
|
|
|
|
continue
|
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v}
|
2018-07-23 20:00:16 +02:00
|
|
|
if not delta_ids or len(n_delta_ids) < len(delta_ids):
|
|
|
|
prev_group = old_group
|
|
|
|
delta_ids = n_delta_ids
|
|
|
|
|
2022-07-21 17:02:02 +02:00
|
|
|
if prev_group is not None:
|
|
|
|
# If we have a prev group and deltas then we can drop the new state from
|
|
|
|
# the cache (to reduce memory usage).
|
|
|
|
return _StateCacheEntry(
|
|
|
|
state=None, state_group=None, prev_group=prev_group, delta_ids=delta_ids
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return _StateCacheEntry(state=new_state, state_group=None)
|
2018-08-08 18:01:57 +02:00
|
|
|
|
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-09-04 12:54:56 +02:00
|
|
|
class StateResolutionStore:
|
2018-09-03 16:13:17 +02:00
|
|
|
"""Interface that allows state resolution algorithms to access the database
|
|
|
|
in well defined way.
|
|
|
|
"""
|
|
|
|
|
2021-07-26 18:49:53 +02:00
|
|
|
store: "DataStore"
|
2018-09-03 16:13:17 +02:00
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
def get_events(
|
2021-11-26 19:41:31 +01:00
|
|
|
self, event_ids: Collection[str], allow_rejected: bool = False
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> Awaitable[Dict[str, EventBase]]:
|
2018-09-03 16:13:17 +02:00
|
|
|
"""Get events from the database
|
|
|
|
|
|
|
|
Args:
|
2020-08-24 20:25:27 +02:00
|
|
|
event_ids: The event_ids of the events to fetch
|
|
|
|
allow_rejected: If True return rejected events.
|
2018-09-03 16:13:17 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
An awaitable which resolves to a dict from event_id to event.
|
2018-09-03 16:13:17 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
return self.store.get_events(
|
|
|
|
event_ids,
|
2022-05-04 13:26:11 +02:00
|
|
|
redact_behaviour=EventRedactBehaviour.as_is,
|
2018-09-03 16:13:17 +02:00
|
|
|
get_prev_content=False,
|
|
|
|
allow_rejected=allow_rejected,
|
|
|
|
)
|
|
|
|
|
2020-08-24 20:25:27 +02:00
|
|
|
def get_auth_chain_difference(
|
2020-12-04 16:52:49 +01:00
|
|
|
self, room_id: str, state_sets: List[Set[str]]
|
2020-08-24 20:25:27 +02:00
|
|
|
) -> Awaitable[Set[str]]:
|
2020-03-18 17:46:41 +01:00
|
|
|
"""Given sets of state events figure out the auth chain difference (as
|
|
|
|
per state res v2 algorithm).
|
2020-02-19 16:04:47 +01:00
|
|
|
|
2020-03-18 17:46:41 +01:00
|
|
|
This equivalent to fetching the full auth chain for each set of state
|
|
|
|
and returning the events that don't appear in each and every auth
|
|
|
|
chain.
|
2018-09-03 16:13:17 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-08-24 20:25:27 +02:00
|
|
|
An awaitable that resolves to a set of event IDs.
|
2018-09-03 16:13:17 +02:00
|
|
|
"""
|
|
|
|
|
2020-12-04 16:52:49 +01:00
|
|
|
return self.store.get_auth_chain_difference(room_id, state_sets)
|