2019-10-21 13:56:42 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-01-06 16:22:46 +01:00
|
|
|
import collections.abc
|
2019-10-21 13:56:42 +02:00
|
|
|
import logging
|
|
|
|
from collections import namedtuple
|
2019-10-28 13:29:55 +01:00
|
|
|
from typing import Iterable, Tuple
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2019-12-20 11:48:24 +01:00
|
|
|
from six import iteritems
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
from twisted.internet import defer
|
|
|
|
|
2020-01-30 18:17:44 +01:00
|
|
|
from synapse.api.constants import EventTypes, Membership
|
2019-10-21 13:56:42 +02:00
|
|
|
from synapse.api.errors import NotFoundError
|
2019-10-28 13:29:55 +01:00
|
|
|
from synapse.events import EventBase
|
|
|
|
from synapse.events.snapshot import EventContext
|
2019-10-21 13:56:42 +02:00
|
|
|
from synapse.storage._base import SQLBaseStore
|
|
|
|
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
2020-01-30 18:17:44 +01:00
|
|
|
from synapse.storage.data_stores.main.roommember import RoomMemberWorkerStore
|
2019-12-06 14:08:40 +01:00
|
|
|
from synapse.storage.database import Database
|
2019-10-21 13:56:42 +02:00
|
|
|
from synapse.storage.state import StateFilter
|
2019-12-20 11:48:24 +01:00
|
|
|
from synapse.util.caches import intern_string
|
2019-10-21 13:56:42 +02:00
|
|
|
from synapse.util.caches.descriptors import cached, cachedList
|
|
|
|
from synapse.util.stringutils import to_ascii
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
MAX_STATE_DELTA_HOPS = 100
|
|
|
|
|
|
|
|
|
|
|
|
class _GetStateGroupDelta(
|
|
|
|
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
|
|
|
|
):
|
|
|
|
"""Return type of get_state_group_delta that implements __len__, which lets
|
|
|
|
us use the itrable flag when caching
|
|
|
|
"""
|
|
|
|
|
|
|
|
__slots__ = []
|
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.delta_ids) if self.delta_ids else 0
|
|
|
|
|
|
|
|
|
|
|
|
# this inherits from EventsWorkerStore because it calls self.get_events
|
2019-12-20 11:48:24 +01:00
|
|
|
class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
2019-10-21 13:56:42 +02:00
|
|
|
"""The parts of StateGroupStore that can be called from workers.
|
|
|
|
"""
|
|
|
|
|
2019-12-06 14:08:40 +01:00
|
|
|
def __init__(self, database: Database, db_conn, hs):
|
|
|
|
super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2020-01-27 15:30:57 +01:00
|
|
|
@cached(max_entries=10000)
|
|
|
|
async def get_room_version(self, room_id: str) -> str:
|
2019-10-21 13:56:42 +02:00
|
|
|
"""Get the room_version of a given room
|
|
|
|
|
|
|
|
Raises:
|
2020-01-27 15:30:57 +01:00
|
|
|
NotFoundError: if the room is unknown
|
2019-10-21 13:56:42 +02:00
|
|
|
"""
|
2020-01-27 15:30:57 +01:00
|
|
|
|
|
|
|
# First we try looking up room version from the database, but for old
|
|
|
|
# rooms we might not have added the room version to it yet so we fall
|
|
|
|
# back to previous behaviour and look in current state events.
|
|
|
|
|
|
|
|
# We really should have an entry in the rooms table for every room we
|
|
|
|
# care about, but let's be a bit paranoid (at least while the background
|
|
|
|
# update is happening) to avoid breaking existing rooms.
|
|
|
|
version = await self.db.simple_select_one_onecol(
|
|
|
|
table="rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
retcol="room_version",
|
|
|
|
desc="get_room_version",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
if version is not None:
|
|
|
|
return version
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
# Retrieve the room's create event
|
2020-01-27 15:30:57 +01:00
|
|
|
create_event = await self.get_create_event_for_room(room_id)
|
2019-10-21 13:56:42 +02:00
|
|
|
return create_event.content.get("room_version", "1")
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_room_predecessor(self, room_id):
|
2019-12-11 14:07:25 +01:00
|
|
|
"""Get the predecessor of an upgraded room if it exists.
|
2019-10-21 13:56:42 +02:00
|
|
|
Otherwise return None.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
2019-11-04 19:17:11 +01:00
|
|
|
Deferred[dict|None]: A dictionary containing the structure of the predecessor
|
|
|
|
field from the room's create event. The structure is subject to other servers,
|
|
|
|
but it is expected to be:
|
|
|
|
* room_id (str): The room ID of the predecessor room
|
|
|
|
* event_id (str): The ID of the tombstone event in the predecessor room
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2019-12-11 14:07:25 +01:00
|
|
|
None if a predecessor key is not found, or is not a dictionary.
|
|
|
|
|
2019-10-21 13:56:42 +02:00
|
|
|
Raises:
|
2019-12-11 14:07:25 +01:00
|
|
|
NotFoundError if the given room is unknown
|
2019-10-21 13:56:42 +02:00
|
|
|
"""
|
|
|
|
# Retrieve the room's create event
|
|
|
|
create_event = yield self.get_create_event_for_room(room_id)
|
|
|
|
|
2019-12-11 14:07:25 +01:00
|
|
|
# Retrieve the predecessor key of the create event
|
|
|
|
predecessor = create_event.content.get("predecessor", None)
|
|
|
|
|
|
|
|
# Ensure the key is a dictionary
|
2020-01-06 16:22:46 +01:00
|
|
|
if not isinstance(predecessor, collections.abc.Mapping):
|
2019-12-11 14:07:25 +01:00
|
|
|
return None
|
|
|
|
|
|
|
|
return predecessor
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_create_event_for_room(self, room_id):
|
|
|
|
"""Get the create state event for a room.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[EventBase]: The room creation event.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
NotFoundError if the room is unknown
|
|
|
|
"""
|
|
|
|
state_ids = yield self.get_current_state_ids(room_id)
|
|
|
|
create_id = state_ids.get((EventTypes.Create, ""))
|
|
|
|
|
|
|
|
# If we can't find the create event, assume we've hit a dead end
|
|
|
|
if not create_id:
|
2019-12-11 14:07:25 +01:00
|
|
|
raise NotFoundError("Unknown room %s" % (room_id,))
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
# Retrieve the room's create event and return
|
|
|
|
create_event = yield self.get_event(create_id)
|
|
|
|
return create_event
|
|
|
|
|
|
|
|
@cached(max_entries=100000, iterable=True)
|
|
|
|
def get_current_state_ids(self, room_id):
|
|
|
|
"""Get the current state event ids for a room based on the
|
|
|
|
current_state_events table.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
deferred: dict of (type, state_key) -> event_id
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _get_current_state_ids_txn(txn):
|
|
|
|
txn.execute(
|
|
|
|
"""SELECT type, state_key, event_id FROM current_state_events
|
|
|
|
WHERE room_id = ?
|
|
|
|
""",
|
|
|
|
(room_id,),
|
|
|
|
)
|
|
|
|
|
|
|
|
return {
|
|
|
|
(intern_string(r[0]), intern_string(r[1])): to_ascii(r[2]) for r in txn
|
|
|
|
}
|
|
|
|
|
2019-12-04 14:52:46 +01:00
|
|
|
return self.db.runInteraction(
|
|
|
|
"get_current_state_ids", _get_current_state_ids_txn
|
|
|
|
)
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
# FIXME: how should this be cached?
|
2020-01-16 14:31:22 +01:00
|
|
|
def get_filtered_current_state_ids(
|
|
|
|
self, room_id: str, state_filter: StateFilter = StateFilter.all()
|
|
|
|
):
|
2019-10-21 13:56:42 +02:00
|
|
|
"""Get the current state event of a given type for a room based on the
|
|
|
|
current_state_events table. This may not be as up-to-date as the result
|
|
|
|
of doing a fresh state resolution as per state_handler.get_current_state
|
|
|
|
|
|
|
|
Args:
|
2020-01-16 14:31:22 +01:00
|
|
|
room_id
|
|
|
|
state_filter: The state filter used to fetch state
|
2019-10-21 13:56:42 +02:00
|
|
|
from the database.
|
|
|
|
|
|
|
|
Returns:
|
2020-01-16 14:31:22 +01:00
|
|
|
defer.Deferred[StateMap[str]]: Map from type/state_key to event ID.
|
2019-10-21 13:56:42 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
where_clause, where_args = state_filter.make_sql_filter_clause()
|
|
|
|
|
|
|
|
if not where_clause:
|
|
|
|
# We delegate to the cached version
|
|
|
|
return self.get_current_state_ids(room_id)
|
|
|
|
|
|
|
|
def _get_filtered_current_state_ids_txn(txn):
|
|
|
|
results = {}
|
|
|
|
sql = """
|
|
|
|
SELECT type, state_key, event_id FROM current_state_events
|
|
|
|
WHERE room_id = ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
if where_clause:
|
|
|
|
sql += " AND (%s)" % (where_clause,)
|
|
|
|
|
|
|
|
args = [room_id]
|
|
|
|
args.extend(where_args)
|
|
|
|
txn.execute(sql, args)
|
|
|
|
for row in txn:
|
|
|
|
typ, state_key, event_id = row
|
|
|
|
key = (intern_string(typ), intern_string(state_key))
|
|
|
|
results[key] = event_id
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
2019-12-04 14:52:46 +01:00
|
|
|
return self.db.runInteraction(
|
2019-10-21 13:56:42 +02:00
|
|
|
"get_filtered_current_state_ids", _get_filtered_current_state_ids_txn
|
|
|
|
)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_canonical_alias_for_room(self, room_id):
|
|
|
|
"""Get canonical alias for room, if any
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id (str)
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[str|None]: The canonical alias, if any
|
|
|
|
"""
|
|
|
|
|
|
|
|
state = yield self.get_filtered_current_state_ids(
|
|
|
|
room_id, StateFilter.from_types([(EventTypes.CanonicalAlias, "")])
|
|
|
|
)
|
|
|
|
|
|
|
|
event_id = state.get((EventTypes.CanonicalAlias, ""))
|
|
|
|
if not event_id:
|
|
|
|
return
|
|
|
|
|
|
|
|
event = yield self.get_event(event_id, allow_none=True)
|
|
|
|
if not event:
|
|
|
|
return
|
|
|
|
|
|
|
|
return event.content.get("canonical_alias")
|
|
|
|
|
|
|
|
@cached(max_entries=50000)
|
|
|
|
def _get_state_group_for_event(self, event_id):
|
2019-12-04 14:52:46 +01:00
|
|
|
return self.db.simple_select_one_onecol(
|
2019-10-21 13:56:42 +02:00
|
|
|
table="event_to_state_groups",
|
|
|
|
keyvalues={"event_id": event_id},
|
|
|
|
retcol="state_group",
|
|
|
|
allow_none=True,
|
|
|
|
desc="_get_state_group_for_event",
|
|
|
|
)
|
|
|
|
|
|
|
|
@cachedList(
|
|
|
|
cached_method_name="_get_state_group_for_event",
|
|
|
|
list_name="event_ids",
|
|
|
|
num_args=1,
|
|
|
|
inlineCallbacks=True,
|
|
|
|
)
|
|
|
|
def _get_state_group_for_events(self, event_ids):
|
|
|
|
"""Returns mapping event_id -> state_group
|
|
|
|
"""
|
2019-12-04 14:52:46 +01:00
|
|
|
rows = yield self.db.simple_select_many_batch(
|
2019-10-21 13:56:42 +02:00
|
|
|
table="event_to_state_groups",
|
|
|
|
column="event_id",
|
|
|
|
iterable=event_ids,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("event_id", "state_group"),
|
|
|
|
desc="_get_state_group_for_events",
|
|
|
|
)
|
|
|
|
|
|
|
|
return {row["event_id"]: row["state_group"] for row in rows}
|
|
|
|
|
2019-10-30 16:12:49 +01:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_referenced_state_groups(self, state_groups):
|
|
|
|
"""Check if the state groups are referenced by events.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state_groups (Iterable[int])
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Deferred[set[int]]: The subset of state groups that are
|
|
|
|
referenced.
|
|
|
|
"""
|
|
|
|
|
2019-12-04 14:52:46 +01:00
|
|
|
rows = yield self.db.simple_select_many_batch(
|
2019-10-30 16:12:49 +01:00
|
|
|
table="event_to_state_groups",
|
|
|
|
column="state_group",
|
|
|
|
iterable=state_groups,
|
|
|
|
keyvalues={},
|
|
|
|
retcols=("DISTINCT state_group",),
|
|
|
|
desc="get_referenced_state_groups",
|
|
|
|
)
|
|
|
|
|
|
|
|
return set(row["state_group"] for row in rows)
|
|
|
|
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2020-01-30 18:17:44 +01:00
|
|
|
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
|
|
|
EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
|
2020-01-30 18:17:44 +01:00
|
|
|
DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events"
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2019-12-06 14:08:40 +01:00
|
|
|
def __init__(self, database: Database, db_conn, hs):
|
2019-12-20 11:48:24 +01:00
|
|
|
super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
|
|
|
|
|
2020-01-30 18:17:44 +01:00
|
|
|
self.server_name = hs.hostname
|
|
|
|
|
2019-12-04 16:09:36 +01:00
|
|
|
self.db.updates.register_background_index_update(
|
2019-10-21 13:56:42 +02:00
|
|
|
self.CURRENT_STATE_INDEX_UPDATE_NAME,
|
|
|
|
index_name="current_state_events_member_index",
|
|
|
|
table="current_state_events",
|
|
|
|
columns=["state_key"],
|
|
|
|
where_clause="type='m.room.member'",
|
|
|
|
)
|
2019-12-04 16:09:36 +01:00
|
|
|
self.db.updates.register_background_index_update(
|
2019-10-21 13:56:42 +02:00
|
|
|
self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME,
|
|
|
|
index_name="event_to_state_groups_sg_index",
|
|
|
|
table="event_to_state_groups",
|
|
|
|
columns=["state_group"],
|
|
|
|
)
|
2020-01-30 18:17:44 +01:00
|
|
|
self.db.updates.register_background_update_handler(
|
|
|
|
self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms,
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _background_remove_left_rooms(self, progress, batch_size):
|
|
|
|
"""Background update to delete rows from `current_state_events` and
|
|
|
|
`event_forward_extremities` tables of rooms that the server is no
|
|
|
|
longer joined to.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_room_id = progress.get("last_room_id", "")
|
|
|
|
|
|
|
|
def _background_remove_left_rooms_txn(txn):
|
|
|
|
sql = """
|
|
|
|
SELECT DISTINCT room_id FROM current_state_events
|
|
|
|
WHERE room_id > ? ORDER BY room_id LIMIT ?
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_room_id, batch_size))
|
|
|
|
room_ids = list(row[0] for row in txn)
|
|
|
|
if not room_ids:
|
|
|
|
return True, set()
|
|
|
|
|
|
|
|
sql = """
|
|
|
|
SELECT room_id
|
|
|
|
FROM current_state_events
|
|
|
|
WHERE
|
|
|
|
room_id > ? AND room_id <= ?
|
|
|
|
AND type = 'm.room.member'
|
|
|
|
AND membership = 'join'
|
|
|
|
AND state_key LIKE ?
|
|
|
|
GROUP BY room_id
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name))
|
|
|
|
|
|
|
|
joined_room_ids = set(row[0] for row in txn)
|
|
|
|
|
|
|
|
left_rooms = set(room_ids) - joined_room_ids
|
|
|
|
|
2020-01-30 18:55:34 +01:00
|
|
|
logger.info("Deleting current state left rooms: %r", left_rooms)
|
|
|
|
|
2020-01-30 18:17:44 +01:00
|
|
|
# First we get all users that we still think were joined to the
|
|
|
|
# room. This is so that we can mark those device lists as
|
|
|
|
# potentially stale, since there may have been a period where the
|
|
|
|
# server didn't share a room with the remote user and therefore may
|
|
|
|
# have missed any device updates.
|
|
|
|
rows = self.db.simple_select_many_txn(
|
|
|
|
txn,
|
|
|
|
table="current_state_events",
|
|
|
|
column="room_id",
|
|
|
|
iterable=left_rooms,
|
|
|
|
keyvalues={"type": EventTypes.Member, "membership": Membership.JOIN},
|
|
|
|
retcols=("state_key",),
|
|
|
|
)
|
|
|
|
|
|
|
|
potentially_left_users = set(row["state_key"] for row in rows)
|
|
|
|
|
|
|
|
# Now lets actually delete the rooms from the DB.
|
|
|
|
self.db.simple_delete_many_txn(
|
|
|
|
txn,
|
|
|
|
table="current_state_events",
|
|
|
|
column="room_id",
|
|
|
|
iterable=left_rooms,
|
|
|
|
keyvalues={},
|
|
|
|
)
|
|
|
|
|
|
|
|
self.db.simple_delete_many_txn(
|
|
|
|
txn,
|
|
|
|
table="event_forward_extremities",
|
|
|
|
column="room_id",
|
|
|
|
iterable=left_rooms,
|
|
|
|
keyvalues={},
|
|
|
|
)
|
|
|
|
|
|
|
|
self.db.updates._background_update_progress_txn(
|
|
|
|
txn,
|
|
|
|
self.DELETE_CURRENT_STATE_UPDATE_NAME,
|
|
|
|
{"last_room_id": room_ids[-1]},
|
|
|
|
)
|
|
|
|
|
|
|
|
return False, potentially_left_users
|
|
|
|
|
|
|
|
finished, potentially_left_users = await self.db.runInteraction(
|
|
|
|
"_background_remove_left_rooms", _background_remove_left_rooms_txn
|
|
|
|
)
|
|
|
|
|
|
|
|
if finished:
|
|
|
|
await self.db.updates._end_background_update(
|
|
|
|
self.DELETE_CURRENT_STATE_UPDATE_NAME
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now go and check if we still share a room with the remote users in
|
|
|
|
# the deleted rooms. If not mark their device lists as stale.
|
|
|
|
joined_users = await self.get_users_server_still_shares_room_with(
|
|
|
|
potentially_left_users
|
|
|
|
)
|
|
|
|
|
|
|
|
for user_id in potentially_left_users - joined_users:
|
|
|
|
await self.mark_remote_user_device_list_as_unsubscribed(user_id)
|
|
|
|
|
|
|
|
return batch_size
|
2019-10-21 13:56:42 +02:00
|
|
|
|
|
|
|
|
2019-12-20 11:48:24 +01:00
|
|
|
class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
|
2019-10-21 13:56:42 +02:00
|
|
|
""" Keeps track of the state at a given event.
|
|
|
|
|
|
|
|
This is done by the concept of `state groups`. Every event is a assigned
|
|
|
|
a state group (identified by an arbitrary string), which references a
|
|
|
|
collection of state events. The current state of an event is then the
|
|
|
|
collection of state events referenced by the event's state group.
|
|
|
|
|
|
|
|
Hence, every change in the current state causes a new state group to be
|
|
|
|
generated. However, if no change happens (e.g., if we get a message event
|
|
|
|
with only one parent it inherits the state group from its parent.)
|
|
|
|
|
|
|
|
There are three tables:
|
|
|
|
* `state_groups`: Stores group name, first event with in the group and
|
|
|
|
room id.
|
|
|
|
* `event_to_state_groups`: Maps events to state groups.
|
|
|
|
* `state_groups_state`: Maps state group to state events.
|
|
|
|
"""
|
|
|
|
|
2019-12-06 14:08:40 +01:00
|
|
|
def __init__(self, database: Database, db_conn, hs):
|
|
|
|
super(StateStore, self).__init__(database, db_conn, hs)
|
2019-10-21 13:56:42 +02:00
|
|
|
|
2019-10-28 13:29:55 +01:00
|
|
|
def _store_event_state_mappings_txn(
|
|
|
|
self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
|
|
|
|
):
|
2019-10-21 13:56:42 +02:00
|
|
|
state_groups = {}
|
|
|
|
for event, context in events_and_contexts:
|
|
|
|
if event.internal_metadata.is_outlier():
|
|
|
|
continue
|
|
|
|
|
|
|
|
# if the event was rejected, just give it the same state as its
|
|
|
|
# predecessor.
|
|
|
|
if context.rejected:
|
2019-11-06 11:01:39 +01:00
|
|
|
state_groups[event.event_id] = context.state_group_before_event
|
2019-10-21 13:56:42 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
state_groups[event.event_id] = context.state_group
|
|
|
|
|
2019-12-04 14:52:46 +01:00
|
|
|
self.db.simple_insert_many_txn(
|
2019-10-21 13:56:42 +02:00
|
|
|
txn,
|
|
|
|
table="event_to_state_groups",
|
|
|
|
values=[
|
|
|
|
{"state_group": state_group_id, "event_id": event_id}
|
|
|
|
for event_id, state_group_id in iteritems(state_groups)
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
for event_id, state_group_id in iteritems(state_groups):
|
|
|
|
txn.call_after(
|
|
|
|
self._get_state_group_for_event.prefill, (event_id,), state_group_id
|
|
|
|
)
|