2019-12-20 11:48:24 +01:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
2021-09-15 15:54:13 +02:00
|
|
|
from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple
|
|
|
|
|
|
|
|
import attr
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
from synapse.api.constants import EventTypes
|
|
|
|
from synapse.storage._base import SQLBaseStore
|
2021-09-15 15:54:13 +02:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2020-08-05 22:38:57 +02:00
|
|
|
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
|
2019-12-20 11:48:24 +01:00
|
|
|
from synapse.storage.state import StateFilter
|
2020-07-16 11:58:12 +02:00
|
|
|
from synapse.storage.types import Cursor
|
|
|
|
from synapse.storage.util.sequence import build_sequence_generator
|
2021-09-15 15:54:13 +02:00
|
|
|
from synapse.types import MutableStateMap, StateKey, StateMap
|
2019-12-20 11:48:24 +01:00
|
|
|
from synapse.util.caches.descriptors import cached
|
|
|
|
from synapse.util.caches.dictionary_cache import DictionaryCache
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2019-12-20 11:48:24 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
MAX_STATE_DELTA_HOPS = 100
|
|
|
|
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class _GetStateGroupDelta:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Return type of get_state_group_delta that implements __len__, which lets
|
2021-09-15 15:54:13 +02:00
|
|
|
us use the iterable flag when caching
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
prev_group: Optional[int]
|
|
|
|
delta_ids: Optional[StateMap[str]]
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def __len__(self) -> int:
|
2019-12-20 11:48:24 +01:00
|
|
|
return len(self.delta_ids) if self.delta_ids else 0
|
|
|
|
|
|
|
|
|
|
|
|
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
2021-02-16 23:32:34 +01:00
|
|
|
"""A data store for fetching/storing state groups."""
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(database, db_conn, hs)
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
# Originally the state store used a single DictionaryCache to cache the
|
|
|
|
# event IDs for the state types in a given state group to avoid hammering
|
|
|
|
# on the state_group* tables.
|
|
|
|
#
|
|
|
|
# The point of using a DictionaryCache is that it can cache a subset
|
|
|
|
# of the state events for a given state group (i.e. a subset of the keys for a
|
|
|
|
# given dict which is an entry in the cache for a given state group ID).
|
|
|
|
#
|
|
|
|
# However, this poses problems when performing complicated queries
|
|
|
|
# on the store - for instance: "give me all the state for this group, but
|
|
|
|
# limit members to this subset of users", as DictionaryCache's API isn't
|
|
|
|
# rich enough to say "please cache any of these fields, apart from this subset".
|
|
|
|
# This is problematic when lazy loading members, which requires this behaviour,
|
|
|
|
# as without it the cache has no choice but to speculatively load all
|
|
|
|
# state events for the group, which negates the efficiency being sought.
|
|
|
|
#
|
|
|
|
# Rather than overcomplicating DictionaryCache's API, we instead split the
|
|
|
|
# state_group_cache into two halves - one for tracking non-member events,
|
|
|
|
# and the other for tracking member_events. This means that lazy loading
|
|
|
|
# queries can be made in a cache-friendly manner by querying both caches
|
|
|
|
# separately and then merging the result. So for the example above, you
|
|
|
|
# would query the members cache for a specific subset of state keys
|
|
|
|
# (which DictionaryCache will handle efficiently and fine) and the non-members
|
|
|
|
# cache for all state (which DictionaryCache will similarly handle fine)
|
|
|
|
# and then just merge the results together.
|
|
|
|
#
|
|
|
|
# We size the non-members cache to be smaller than the members cache as the
|
|
|
|
# vast majority of state in Matrix (today) is member events.
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
self._state_group_cache: DictionaryCache[int, StateKey, str] = DictionaryCache(
|
2019-12-20 11:48:24 +01:00
|
|
|
"*stateGroupCache*",
|
|
|
|
# TODO: this hasn't been tuned yet
|
2020-05-11 19:45:23 +02:00
|
|
|
50000,
|
2019-12-20 11:48:24 +01:00
|
|
|
)
|
2021-09-15 15:54:13 +02:00
|
|
|
self._state_group_members_cache: DictionaryCache[
|
|
|
|
int, StateKey, str
|
|
|
|
] = DictionaryCache(
|
2021-02-16 23:32:34 +01:00
|
|
|
"*stateGroupMembersCache*",
|
|
|
|
500000,
|
2019-12-20 11:48:24 +01:00
|
|
|
)
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def get_max_state_group_txn(txn: Cursor) -> int:
|
2020-07-16 11:58:12 +02:00
|
|
|
txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups")
|
2021-09-15 15:54:13 +02:00
|
|
|
return txn.fetchone()[0] # type: ignore
|
2020-07-16 11:58:12 +02:00
|
|
|
|
|
|
|
self._state_group_seq_gen = build_sequence_generator(
|
2021-02-24 11:13:53 +01:00
|
|
|
db_conn,
|
|
|
|
self.database_engine,
|
|
|
|
get_max_state_group_txn,
|
|
|
|
"state_group_id_seq",
|
|
|
|
table="state_groups",
|
|
|
|
id_column="id",
|
2020-09-28 19:00:30 +02:00
|
|
|
)
|
2020-07-16 11:58:12 +02:00
|
|
|
|
2019-12-20 11:48:24 +01:00
|
|
|
@cached(max_entries=10000, iterable=True)
|
2021-09-15 15:54:13 +02:00
|
|
|
async def get_state_group_delta(self, state_group: int) -> _GetStateGroupDelta:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Given a state group try to return a previous group and a delta between
|
|
|
|
the old and the new.
|
|
|
|
|
|
|
|
Returns:
|
2021-09-15 15:54:13 +02:00
|
|
|
_GetStateGroupDelta containing prev_group and delta_ids, where both may be None.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def _get_state_group_delta_txn(txn: LoggingTransaction) -> _GetStateGroupDelta:
|
2020-08-05 22:38:57 +02:00
|
|
|
prev_group = self.db_pool.simple_select_one_onecol_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": state_group},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if not prev_group:
|
|
|
|
return _GetStateGroupDelta(None, None)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
delta_ids = self.db_pool.simple_select_list_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
keyvalues={"state_group": state_group},
|
|
|
|
retcols=("type", "state_key", "event_id"),
|
|
|
|
)
|
|
|
|
|
|
|
|
return _GetStateGroupDelta(
|
|
|
|
prev_group,
|
|
|
|
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
|
|
|
|
)
|
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
return await self.db_pool.runInteraction(
|
2019-12-20 11:48:24 +01:00
|
|
|
"get_state_group_delta", _get_state_group_delta_txn
|
|
|
|
)
|
|
|
|
|
2020-07-30 13:20:41 +02:00
|
|
|
async def _get_state_groups_from_groups(
|
2020-01-16 14:31:22 +01:00
|
|
|
self, groups: List[int], state_filter: StateFilter
|
2020-07-30 13:20:41 +02:00
|
|
|
) -> Dict[int, StateMap[str]]:
|
2020-01-16 14:31:22 +01:00
|
|
|
"""Returns the state groups for a given set of groups from the
|
|
|
|
database, filtering on types of state events.
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
Args:
|
2020-01-16 14:31:22 +01:00
|
|
|
groups: list of state group IDs to query
|
|
|
|
state_filter: The state filter used to fetch state
|
2019-12-20 11:48:24 +01:00
|
|
|
from the database.
|
|
|
|
Returns:
|
2020-07-30 13:20:41 +02:00
|
|
|
Dict of state group to state map.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
2021-09-15 15:54:13 +02:00
|
|
|
results: Dict[int, StateMap[str]] = {}
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
|
|
|
|
for chunk in chunks:
|
2020-08-05 22:38:57 +02:00
|
|
|
res = await self.db_pool.runInteraction(
|
2019-12-20 11:48:24 +01:00
|
|
|
"_get_state_groups_from_groups",
|
|
|
|
self._get_state_groups_from_groups_txn,
|
|
|
|
chunk,
|
|
|
|
state_filter,
|
|
|
|
)
|
|
|
|
results.update(res)
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def _get_state_for_group_using_cache(
|
|
|
|
self,
|
|
|
|
cache: DictionaryCache[int, StateKey, str],
|
|
|
|
group: int,
|
|
|
|
state_filter: StateFilter,
|
|
|
|
) -> Tuple[MutableStateMap[str], bool]:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Checks if group is in cache. See `_get_state_for_groups`
|
|
|
|
|
|
|
|
Args:
|
2021-09-15 15:54:13 +02:00
|
|
|
cache: the state group cache to use
|
|
|
|
group: The state group to lookup
|
|
|
|
state_filter: The state filter used to fetch state from the database.
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
Returns:
|
|
|
|
2-tuple (`state_dict`, `got_all`).
|
|
|
|
`got_all` is a bool indicating if we successfully retrieved all
|
|
|
|
requests state from the cache, if False we need to query the DB for the
|
|
|
|
missing state.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
2021-03-29 18:15:33 +02:00
|
|
|
cache_entry = cache.get(group)
|
|
|
|
state_dict_ids = cache_entry.value
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2021-03-29 18:15:33 +02:00
|
|
|
if cache_entry.full or state_filter.is_full():
|
2019-12-20 11:48:24 +01:00
|
|
|
# Either we have everything or want everything, either way
|
|
|
|
# `is_all` tells us whether we've gotten everything.
|
2021-03-29 18:15:33 +02:00
|
|
|
return state_filter.filter_state(state_dict_ids), cache_entry.full
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
# tracks whether any of our requested types are missing from the cache
|
|
|
|
missing_types = False
|
|
|
|
|
|
|
|
if state_filter.has_wildcards():
|
|
|
|
# We don't know if we fetched all the state keys for the types in
|
|
|
|
# the filter that are wildcards, so we have to assume that we may
|
|
|
|
# have missed some.
|
|
|
|
missing_types = True
|
|
|
|
else:
|
|
|
|
# There aren't any wild cards, so `concrete_types()` returns the
|
|
|
|
# complete list of event types we're wanting.
|
|
|
|
for key in state_filter.concrete_types():
|
2021-03-29 18:15:33 +02:00
|
|
|
if key not in state_dict_ids and key not in cache_entry.known_absent:
|
2019-12-20 11:48:24 +01:00
|
|
|
missing_types = True
|
|
|
|
break
|
|
|
|
|
|
|
|
return state_filter.filter_state(state_dict_ids), not missing_types
|
|
|
|
|
2020-07-30 13:20:41 +02:00
|
|
|
async def _get_state_for_groups(
|
2021-04-08 23:38:54 +02:00
|
|
|
self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
|
2020-09-29 16:57:36 +02:00
|
|
|
) -> Dict[int, MutableStateMap[str]]:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Gets the state at each of a list of state groups, optionally
|
|
|
|
filtering by type/state_key
|
|
|
|
|
|
|
|
Args:
|
2020-01-16 14:31:22 +01:00
|
|
|
groups: list of state groups for which we want
|
2019-12-20 11:48:24 +01:00
|
|
|
to get the state.
|
2020-01-16 14:31:22 +01:00
|
|
|
state_filter: The state filter used to fetch state
|
2019-12-20 11:48:24 +01:00
|
|
|
from the database.
|
|
|
|
Returns:
|
2020-07-30 13:20:41 +02:00
|
|
|
Dict of state group to state map.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
2021-04-08 23:38:54 +02:00
|
|
|
state_filter = state_filter or StateFilter.all()
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
member_filter, non_member_filter = state_filter.get_member_split()
|
|
|
|
|
|
|
|
# Now we look them up in the member and non-member caches
|
|
|
|
(
|
|
|
|
non_member_state,
|
|
|
|
incomplete_groups_nm,
|
2020-07-30 13:20:41 +02:00
|
|
|
) = self._get_state_for_groups_using_cache(
|
2019-12-20 11:48:24 +01:00
|
|
|
groups, self._state_group_cache, state_filter=non_member_filter
|
|
|
|
)
|
|
|
|
|
2020-07-30 13:20:41 +02:00
|
|
|
(member_state, incomplete_groups_m,) = self._get_state_for_groups_using_cache(
|
2019-12-20 11:48:24 +01:00
|
|
|
groups, self._state_group_members_cache, state_filter=member_filter
|
|
|
|
)
|
|
|
|
|
|
|
|
state = dict(non_member_state)
|
|
|
|
for group in groups:
|
|
|
|
state[group].update(member_state[group])
|
|
|
|
|
|
|
|
# Now fetch any missing groups from the database
|
|
|
|
|
|
|
|
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
|
|
|
|
|
|
|
|
if not incomplete_groups:
|
|
|
|
return state
|
|
|
|
|
|
|
|
cache_sequence_nm = self._state_group_cache.sequence
|
|
|
|
cache_sequence_m = self._state_group_members_cache.sequence
|
|
|
|
|
|
|
|
# Help the cache hit ratio by expanding the filter a bit
|
|
|
|
db_state_filter = state_filter.return_expanded()
|
|
|
|
|
2020-07-30 13:20:41 +02:00
|
|
|
group_to_state_dict = await self._get_state_groups_from_groups(
|
2019-12-20 11:48:24 +01:00
|
|
|
list(incomplete_groups), state_filter=db_state_filter
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now lets update the caches
|
|
|
|
self._insert_into_cache(
|
|
|
|
group_to_state_dict,
|
|
|
|
db_state_filter,
|
|
|
|
cache_seq_num_members=cache_sequence_m,
|
|
|
|
cache_seq_num_non_members=cache_sequence_nm,
|
|
|
|
)
|
|
|
|
|
|
|
|
# And finally update the result dict, by filtering out any extra
|
|
|
|
# stuff we pulled out of the database.
|
2020-06-15 13:03:36 +02:00
|
|
|
for group, group_state_dict in group_to_state_dict.items():
|
2019-12-20 11:48:24 +01:00
|
|
|
# We just replace any existing entries, as we will have loaded
|
|
|
|
# everything we need from the database anyway.
|
|
|
|
state[group] = state_filter.filter_state(group_state_dict)
|
|
|
|
|
|
|
|
return state
|
|
|
|
|
2020-01-16 14:31:22 +01:00
|
|
|
def _get_state_for_groups_using_cache(
|
2021-09-15 15:54:13 +02:00
|
|
|
self,
|
|
|
|
groups: Iterable[int],
|
|
|
|
cache: DictionaryCache[int, StateKey, str],
|
|
|
|
state_filter: StateFilter,
|
|
|
|
) -> Tuple[Dict[int, MutableStateMap[str]], Set[int]]:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Gets the state at each of a list of state groups, optionally
|
|
|
|
filtering by type/state_key, querying from a specific cache.
|
|
|
|
|
|
|
|
Args:
|
2020-01-16 14:31:22 +01:00
|
|
|
groups: list of state groups for which we want to get the state.
|
|
|
|
cache: the cache of group ids to state dicts which
|
|
|
|
we will pass through - either the normal state cache or the
|
|
|
|
specific members state cache.
|
|
|
|
state_filter: The state filter used to fetch state from the
|
|
|
|
database.
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
Returns:
|
2020-01-16 14:31:22 +01:00
|
|
|
Tuple of dict of state_group_id to state map of entries in the
|
|
|
|
cache, and the state group ids either missing from the cache or
|
|
|
|
incomplete.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
results = {}
|
|
|
|
incomplete_groups = set()
|
|
|
|
for group in set(groups):
|
|
|
|
state_dict_ids, got_all = self._get_state_for_group_using_cache(
|
|
|
|
cache, group, state_filter
|
|
|
|
)
|
|
|
|
results[group] = state_dict_ids
|
|
|
|
|
|
|
|
if not got_all:
|
|
|
|
incomplete_groups.add(group)
|
|
|
|
|
|
|
|
return results, incomplete_groups
|
|
|
|
|
|
|
|
def _insert_into_cache(
|
|
|
|
self,
|
2021-09-15 15:54:13 +02:00
|
|
|
group_to_state_dict: Dict[int, StateMap[str]],
|
|
|
|
state_filter: StateFilter,
|
|
|
|
cache_seq_num_members: int,
|
|
|
|
cache_seq_num_non_members: int,
|
|
|
|
) -> None:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Inserts results from querying the database into the relevant cache.
|
|
|
|
|
|
|
|
Args:
|
2021-09-15 15:54:13 +02:00
|
|
|
group_to_state_dict: The new entries pulled from database.
|
2019-12-20 11:48:24 +01:00
|
|
|
Map from state group to state dict
|
2021-09-15 15:54:13 +02:00
|
|
|
state_filter: The state filter used to fetch state
|
2019-12-20 11:48:24 +01:00
|
|
|
from the database.
|
2021-09-15 15:54:13 +02:00
|
|
|
cache_seq_num_members: Sequence number of member cache since
|
2019-12-20 11:48:24 +01:00
|
|
|
last lookup in cache
|
2021-09-15 15:54:13 +02:00
|
|
|
cache_seq_num_non_members: Sequence number of member cache since
|
2019-12-20 11:48:24 +01:00
|
|
|
last lookup in cache
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We need to work out which types we've fetched from the DB for the
|
|
|
|
# member vs non-member caches. This should be as accurate as possible,
|
|
|
|
# but can be an underestimate (e.g. when we have wild cards)
|
|
|
|
|
|
|
|
member_filter, non_member_filter = state_filter.get_member_split()
|
|
|
|
if member_filter.is_full():
|
|
|
|
# We fetched all member events
|
|
|
|
member_types = None
|
|
|
|
else:
|
|
|
|
# `concrete_types()` will only return a subset when there are wild
|
|
|
|
# cards in the filter, but that's fine.
|
|
|
|
member_types = member_filter.concrete_types()
|
|
|
|
|
|
|
|
if non_member_filter.is_full():
|
|
|
|
# We fetched all non member events
|
|
|
|
non_member_types = None
|
|
|
|
else:
|
|
|
|
non_member_types = non_member_filter.concrete_types()
|
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
for group, group_state_dict in group_to_state_dict.items():
|
2019-12-20 11:48:24 +01:00
|
|
|
state_dict_members = {}
|
|
|
|
state_dict_non_members = {}
|
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
for k, v in group_state_dict.items():
|
2019-12-20 11:48:24 +01:00
|
|
|
if k[0] == EventTypes.Member:
|
|
|
|
state_dict_members[k] = v
|
|
|
|
else:
|
|
|
|
state_dict_non_members[k] = v
|
|
|
|
|
|
|
|
self._state_group_members_cache.update(
|
|
|
|
cache_seq_num_members,
|
|
|
|
key=group,
|
|
|
|
value=state_dict_members,
|
|
|
|
fetched_keys=member_types,
|
|
|
|
)
|
|
|
|
|
|
|
|
self._state_group_cache.update(
|
|
|
|
cache_seq_num_non_members,
|
|
|
|
key=group,
|
|
|
|
value=state_dict_non_members,
|
|
|
|
fetched_keys=non_member_types,
|
|
|
|
)
|
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
async def store_state_group(
|
2021-07-26 18:49:53 +02:00
|
|
|
self,
|
|
|
|
event_id: str,
|
|
|
|
room_id: str,
|
|
|
|
prev_group: Optional[int],
|
|
|
|
delta_ids: Optional[StateMap[str]],
|
|
|
|
current_state_ids: StateMap[str],
|
2020-08-28 15:37:55 +02:00
|
|
|
) -> int:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Store a new set of state, returning a newly assigned state group.
|
|
|
|
|
|
|
|
Args:
|
2021-07-26 18:49:53 +02:00
|
|
|
event_id: The event ID for which the state was calculated
|
|
|
|
room_id
|
|
|
|
prev_group: A previous state group for the room, optional.
|
|
|
|
delta_ids: The delta between state at `prev_group` and
|
2019-12-20 11:48:24 +01:00
|
|
|
`current_state_ids`, if `prev_group` was given. Same format as
|
|
|
|
`current_state_ids`.
|
2021-07-26 18:49:53 +02:00
|
|
|
current_state_ids: The state to store. Map of (type, state_key)
|
2019-12-20 11:48:24 +01:00
|
|
|
to event_id.
|
|
|
|
|
|
|
|
Returns:
|
2020-08-28 15:37:55 +02:00
|
|
|
The state group ID
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def _store_state_group_txn(txn: LoggingTransaction) -> int:
|
2019-12-20 11:48:24 +01:00
|
|
|
if current_state_ids is None:
|
|
|
|
# AFAIK, this can never happen
|
|
|
|
raise Exception("current_state_ids cannot be None")
|
|
|
|
|
2020-07-16 11:58:12 +02:00
|
|
|
state_group = self._state_group_seq_gen.get_next_id_txn(txn)
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups",
|
|
|
|
values={"id": state_group, "room_id": room_id, "event_id": event_id},
|
|
|
|
)
|
|
|
|
|
|
|
|
# We persist as a delta if we can, while also ensuring the chain
|
|
|
|
# of deltas isn't tooo long, as otherwise read performance degrades.
|
|
|
|
if prev_group:
|
2020-08-05 22:38:57 +02:00
|
|
|
is_in_db = self.db_pool.simple_select_one_onecol_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups",
|
|
|
|
keyvalues={"id": prev_group},
|
|
|
|
retcol="id",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if not is_in_db:
|
|
|
|
raise Exception(
|
|
|
|
"Trying to persist state with unpersisted prev_group: %r"
|
|
|
|
% (prev_group,)
|
|
|
|
)
|
|
|
|
|
|
|
|
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
|
|
|
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
|
2021-09-15 15:54:13 +02:00
|
|
|
assert delta_ids is not None
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
values={"state_group": state_group, "prev_state_group": prev_group},
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, state_id in delta_ids.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
],
|
|
|
|
)
|
|
|
|
else:
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, state_id in current_state_ids.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
# Prefill the state group caches with this group.
|
|
|
|
# It's fine to use the sequence like this as the state group map
|
|
|
|
# is immutable. (If the map wasn't immutable then this prefill could
|
|
|
|
# race with another update)
|
|
|
|
|
|
|
|
current_member_state_ids = {
|
|
|
|
s: ev
|
2020-06-15 13:03:36 +02:00
|
|
|
for (s, ev) in current_state_ids.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
if s[0] == EventTypes.Member
|
|
|
|
}
|
|
|
|
txn.call_after(
|
|
|
|
self._state_group_members_cache.update,
|
|
|
|
self._state_group_members_cache.sequence,
|
|
|
|
key=state_group,
|
|
|
|
value=dict(current_member_state_ids),
|
|
|
|
)
|
|
|
|
|
|
|
|
current_non_member_state_ids = {
|
|
|
|
s: ev
|
2020-06-15 13:03:36 +02:00
|
|
|
for (s, ev) in current_state_ids.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
if s[0] != EventTypes.Member
|
|
|
|
}
|
|
|
|
txn.call_after(
|
|
|
|
self._state_group_cache.update,
|
|
|
|
self._state_group_cache.sequence,
|
|
|
|
key=state_group,
|
|
|
|
value=dict(current_non_member_state_ids),
|
|
|
|
)
|
|
|
|
|
|
|
|
return state_group
|
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
return await self.db_pool.runInteraction(
|
|
|
|
"store_state_group", _store_state_group_txn
|
|
|
|
)
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
async def purge_unreferenced_state_groups(
|
2021-09-15 15:54:13 +02:00
|
|
|
self, room_id: str, state_groups_to_delete: Collection[int]
|
2020-08-28 15:37:55 +02:00
|
|
|
) -> None:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Deletes no longer referenced state groups and de-deltas any state
|
|
|
|
groups that reference them.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The room the state groups belong to (must all be in the
|
|
|
|
same room).
|
2021-09-15 15:54:13 +02:00
|
|
|
state_groups_to_delete: Set of all state groups to delete.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
await self.db_pool.runInteraction(
|
2019-12-20 11:48:24 +01:00
|
|
|
"purge_unreferenced_state_groups",
|
|
|
|
self._purge_unreferenced_state_groups,
|
|
|
|
room_id,
|
|
|
|
state_groups_to_delete,
|
|
|
|
)
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def _purge_unreferenced_state_groups(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
state_groups_to_delete: Collection[int],
|
|
|
|
) -> None:
|
2019-12-20 11:48:24 +01:00
|
|
|
logger.info(
|
|
|
|
"[purge] found %i state groups to delete", len(state_groups_to_delete)
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
rows = self.db_pool.simple_select_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
column="prev_state_group",
|
|
|
|
iterable=state_groups_to_delete,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("state_group",),
|
|
|
|
)
|
|
|
|
|
2020-02-21 13:15:07 +01:00
|
|
|
remaining_state_groups = {
|
2019-12-20 11:48:24 +01:00
|
|
|
row["state_group"]
|
|
|
|
for row in rows
|
|
|
|
if row["state_group"] not in state_groups_to_delete
|
2020-02-21 13:15:07 +01:00
|
|
|
}
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"[purge] de-delta-ing %i remaining state groups",
|
|
|
|
len(remaining_state_groups),
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now we turn the state groups that reference to-be-deleted state
|
|
|
|
# groups to non delta versions.
|
|
|
|
for sg in remaining_state_groups:
|
|
|
|
logger.info("[purge] de-delta-ing remaining state group %s", sg)
|
2021-09-15 15:54:13 +02:00
|
|
|
curr_state_by_group = self._get_state_groups_from_groups_txn(txn, [sg])
|
|
|
|
curr_state = curr_state_by_group[sg]
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn, table="state_groups_state", keyvalues={"state_group": sg}
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn, table="state_group_edges", keyvalues={"state_group": sg}
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": sg,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, state_id in curr_state.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("[purge] removing redundant state groups")
|
2021-01-21 15:44:12 +01:00
|
|
|
txn.execute_batch(
|
2019-12-20 11:48:24 +01:00
|
|
|
"DELETE FROM state_groups_state WHERE state_group = ?",
|
|
|
|
((sg,) for sg in state_groups_to_delete),
|
|
|
|
)
|
2021-01-21 15:44:12 +01:00
|
|
|
txn.execute_batch(
|
2019-12-20 11:48:24 +01:00
|
|
|
"DELETE FROM state_groups WHERE id = ?",
|
|
|
|
((sg,) for sg in state_groups_to_delete),
|
|
|
|
)
|
|
|
|
|
2020-07-30 13:20:41 +02:00
|
|
|
async def get_previous_state_groups(
|
|
|
|
self, state_groups: Iterable[int]
|
|
|
|
) -> Dict[int, int]:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Fetch the previous groups of the given state groups.
|
|
|
|
|
|
|
|
Args:
|
2020-07-30 13:20:41 +02:00
|
|
|
state_groups
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
Returns:
|
2020-07-30 13:20:41 +02:00
|
|
|
A mapping from state group to previous state group.
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
rows = await self.db_pool.simple_select_many_batch(
|
2019-12-20 11:48:24 +01:00
|
|
|
table="state_group_edges",
|
|
|
|
column="prev_state_group",
|
|
|
|
iterable=state_groups,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("prev_state_group", "state_group"),
|
|
|
|
desc="get_previous_state_groups",
|
|
|
|
)
|
|
|
|
|
|
|
|
return {row["state_group"]: row["prev_state_group"] for row in rows}
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
async def purge_room_state(
|
|
|
|
self, room_id: str, state_groups_to_delete: Collection[int]
|
|
|
|
) -> None:
|
2019-12-20 11:48:24 +01:00
|
|
|
"""Deletes all record of a room from state tables
|
|
|
|
|
|
|
|
Args:
|
2021-09-15 15:54:13 +02:00
|
|
|
room_id:
|
|
|
|
state_groups_to_delete: State groups to delete
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
2020-08-28 15:37:55 +02:00
|
|
|
await self.db_pool.runInteraction(
|
2019-12-20 11:48:24 +01:00
|
|
|
"purge_room_state",
|
|
|
|
self._purge_room_state_txn,
|
|
|
|
room_id,
|
|
|
|
state_groups_to_delete,
|
|
|
|
)
|
|
|
|
|
2021-09-15 15:54:13 +02:00
|
|
|
def _purge_room_state_txn(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
room_id: str,
|
|
|
|
state_groups_to_delete: Collection[int],
|
|
|
|
) -> None:
|
2019-12-20 11:48:24 +01:00
|
|
|
# first we have to delete the state groups states
|
|
|
|
logger.info("[purge] removing %s from state_groups_state", room_id)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
column="state_group",
|
2021-09-20 11:26:13 +02:00
|
|
|
values=state_groups_to_delete,
|
2019-12-20 11:48:24 +01:00
|
|
|
keyvalues={},
|
|
|
|
)
|
|
|
|
|
|
|
|
# ... and the state group edges
|
|
|
|
logger.info("[purge] removing %s from state_group_edges", room_id)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
column="state_group",
|
2021-09-20 11:26:13 +02:00
|
|
|
values=state_groups_to_delete,
|
2019-12-20 11:48:24 +01:00
|
|
|
keyvalues={},
|
|
|
|
)
|
|
|
|
|
|
|
|
# ... and the state groups
|
|
|
|
logger.info("[purge] removing %s from state_groups", room_id)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups",
|
|
|
|
column="id",
|
2021-09-20 11:26:13 +02:00
|
|
|
values=state_groups_to_delete,
|
2019-12-20 11:48:24 +01:00
|
|
|
keyvalues={},
|
|
|
|
)
|