2019-12-20 11:48:24 +01:00
|
|
|
# Copyright 2014-2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
2021-04-08 23:38:54 +02:00
|
|
|
from typing import Optional
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
from synapse.storage._base import SQLBaseStore
|
2020-08-05 22:38:57 +02:00
|
|
|
from synapse.storage.database import DatabasePool
|
2019-12-20 11:48:24 +01:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
|
|
|
from synapse.storage.state import StateFilter
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
MAX_STATE_DELTA_HOPS = 100
|
|
|
|
|
|
|
|
|
|
|
|
class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
2021-02-12 17:01:48 +01:00
|
|
|
"""Defines functions related to state groups needed to run the state background
|
2019-12-20 11:48:24 +01:00
|
|
|
updates.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _count_state_group_hops_txn(self, txn, state_group):
|
|
|
|
"""Given a state group, count how many hops there are in the tree.
|
|
|
|
|
|
|
|
This is used to ensure the delta chains don't get too long.
|
|
|
|
"""
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
sql = """
|
|
|
|
WITH RECURSIVE state(state_group) AS (
|
|
|
|
VALUES(?::bigint)
|
|
|
|
UNION ALL
|
|
|
|
SELECT prev_state_group FROM state_group_edges e, state s
|
|
|
|
WHERE s.state_group = e.state_group
|
|
|
|
)
|
|
|
|
SELECT count(*) FROM state;
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.execute(sql, (state_group,))
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row and row[0]:
|
|
|
|
return row[0]
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
|
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
|
|
|
next_group = state_group
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
while next_group:
|
2020-08-05 22:38:57 +02:00
|
|
|
next_group = self.db_pool.simple_select_one_onecol_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": next_group},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
if next_group:
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
|
|
|
def _get_state_groups_from_groups_txn(
|
2021-04-08 23:38:54 +02:00
|
|
|
self, txn, groups, state_filter: Optional[StateFilter] = None
|
2019-12-20 11:48:24 +01:00
|
|
|
):
|
2021-04-08 23:38:54 +02:00
|
|
|
state_filter = state_filter or StateFilter.all()
|
|
|
|
|
2019-12-20 11:48:24 +01:00
|
|
|
results = {group: {} for group in groups}
|
|
|
|
|
|
|
|
where_clause, where_args = state_filter.make_sql_filter_clause()
|
|
|
|
|
|
|
|
# Unless the filter clause is empty, we're going to append it after an
|
|
|
|
# existing where clause
|
|
|
|
if where_clause:
|
|
|
|
where_clause = " AND (%s)" % (where_clause,)
|
|
|
|
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
# Temporarily disable sequential scans in this transaction. This is
|
|
|
|
# a temporary hack until we can add the right indices in
|
|
|
|
txn.execute("SET LOCAL enable_seqscan=off")
|
|
|
|
|
|
|
|
# The below query walks the state_group tree so that the "state"
|
|
|
|
# table includes all state_groups in the tree. It then joins
|
|
|
|
# against `state_groups_state` to fetch the latest state.
|
|
|
|
# It assumes that previous state groups are always numerically
|
|
|
|
# lesser.
|
|
|
|
# The PARTITION is used to get the event_id in the greatest state
|
|
|
|
# group for the given type, state_key.
|
|
|
|
# This may return multiple rows per (type, state_key), but last_value
|
|
|
|
# should be the same.
|
|
|
|
sql = """
|
|
|
|
WITH RECURSIVE state(state_group) AS (
|
|
|
|
VALUES(?::bigint)
|
|
|
|
UNION ALL
|
|
|
|
SELECT prev_state_group FROM state_group_edges e, state s
|
|
|
|
WHERE s.state_group = e.state_group
|
|
|
|
)
|
2020-06-01 16:23:43 +02:00
|
|
|
SELECT DISTINCT ON (type, state_key)
|
|
|
|
type, state_key, event_id
|
|
|
|
FROM state_groups_state
|
2019-12-20 11:48:24 +01:00
|
|
|
WHERE state_group IN (
|
|
|
|
SELECT state_group FROM state
|
2020-06-01 16:23:43 +02:00
|
|
|
) %s
|
|
|
|
ORDER BY type, state_key, state_group DESC
|
2019-12-20 11:48:24 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
for group in groups:
|
|
|
|
args = [group]
|
|
|
|
args.extend(where_args)
|
|
|
|
|
2020-06-01 16:23:43 +02:00
|
|
|
txn.execute(sql % (where_clause,), args)
|
2019-12-20 11:48:24 +01:00
|
|
|
for row in txn:
|
|
|
|
typ, state_key, event_id = row
|
|
|
|
key = (typ, state_key)
|
|
|
|
results[group][key] = event_id
|
|
|
|
else:
|
|
|
|
max_entries_returned = state_filter.max_entries_returned()
|
|
|
|
|
|
|
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
|
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
|
|
|
for group in groups:
|
|
|
|
next_group = group
|
|
|
|
|
|
|
|
while next_group:
|
|
|
|
# We did this before by getting the list of group ids, and
|
|
|
|
# then passing that list to sqlite to get latest event for
|
|
|
|
# each (type, state_key). However, that was terribly slow
|
|
|
|
# without the right indices (which we can't add until
|
|
|
|
# after we finish deduping state, which requires this func)
|
|
|
|
args = [next_group]
|
|
|
|
args.extend(where_args)
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"SELECT type, state_key, event_id FROM state_groups_state"
|
|
|
|
" WHERE state_group = ? " + where_clause,
|
|
|
|
args,
|
|
|
|
)
|
|
|
|
results[group].update(
|
|
|
|
((typ, state_key), event_id)
|
|
|
|
for typ, state_key, event_id in txn
|
|
|
|
if (typ, state_key) not in results[group]
|
|
|
|
)
|
|
|
|
|
|
|
|
# If the number of entries in the (type,state_key)->event_id dict
|
|
|
|
# matches the number of (type,state_keys) types we were searching
|
|
|
|
# for, then we must have found them all, so no need to go walk
|
|
|
|
# further down the tree... UNLESS our types filter contained
|
|
|
|
# wildcards (i.e. Nones) in which case we have to do an exhaustive
|
|
|
|
# search
|
|
|
|
if (
|
|
|
|
max_entries_returned is not None
|
|
|
|
and len(results[group]) == max_entries_returned
|
|
|
|
):
|
|
|
|
break
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
next_group = self.db_pool.simple_select_one_onecol_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": next_group},
|
|
|
|
retcol="prev_state_group",
|
|
|
|
allow_none=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
|
|
class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
|
|
|
|
|
|
|
|
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
|
|
|
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
|
|
|
STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
def __init__(self, database: DatabasePool, db_conn, hs):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(database, db_conn, hs)
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2019-12-20 11:48:24 +01:00
|
|
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
|
|
|
|
self._background_deduplicate_state,
|
|
|
|
)
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.updates.register_background_update_handler(
|
2019-12-20 11:48:24 +01:00
|
|
|
self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
|
|
|
|
)
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
2019-12-20 11:48:24 +01:00
|
|
|
self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME,
|
|
|
|
index_name="state_groups_room_id_idx",
|
|
|
|
table="state_groups",
|
|
|
|
columns=["room_id"],
|
|
|
|
)
|
|
|
|
|
2020-08-11 23:21:13 +02:00
|
|
|
async def _background_deduplicate_state(self, progress, batch_size):
|
2019-12-20 11:48:24 +01:00
|
|
|
"""This background update will slowly deduplicate state by reencoding
|
|
|
|
them as deltas.
|
|
|
|
"""
|
|
|
|
last_state_group = progress.get("last_state_group", 0)
|
|
|
|
rows_inserted = progress.get("rows_inserted", 0)
|
|
|
|
max_group = progress.get("max_group", None)
|
|
|
|
|
|
|
|
BATCH_SIZE_SCALE_FACTOR = 100
|
|
|
|
|
|
|
|
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
|
|
|
|
|
|
|
|
if max_group is None:
|
2020-08-11 23:21:13 +02:00
|
|
|
rows = await self.db_pool.execute(
|
2019-12-20 11:48:24 +01:00
|
|
|
"_background_deduplicate_state",
|
|
|
|
None,
|
|
|
|
"SELECT coalesce(max(id), 0) FROM state_groups",
|
|
|
|
)
|
|
|
|
max_group = rows[0][0]
|
|
|
|
|
|
|
|
def reindex_txn(txn):
|
|
|
|
new_last_state_group = last_state_group
|
|
|
|
for count in range(batch_size):
|
|
|
|
txn.execute(
|
|
|
|
"SELECT id, room_id FROM state_groups"
|
|
|
|
" WHERE ? < id AND id <= ?"
|
|
|
|
" ORDER BY id ASC"
|
|
|
|
" LIMIT 1",
|
|
|
|
(new_last_state_group, max_group),
|
|
|
|
)
|
|
|
|
row = txn.fetchone()
|
|
|
|
if row:
|
|
|
|
state_group, room_id = row
|
|
|
|
|
|
|
|
if not row or not state_group:
|
|
|
|
return True, count
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"SELECT state_group FROM state_group_edges"
|
|
|
|
" WHERE state_group = ?",
|
|
|
|
(state_group,),
|
|
|
|
)
|
|
|
|
|
|
|
|
# If we reach a point where we've already started inserting
|
|
|
|
# edges we should stop.
|
|
|
|
if txn.fetchall():
|
|
|
|
return True, count
|
|
|
|
|
|
|
|
txn.execute(
|
|
|
|
"SELECT coalesce(max(id), 0) FROM state_groups"
|
|
|
|
" WHERE id < ? AND room_id = ?",
|
|
|
|
(state_group, room_id),
|
|
|
|
)
|
|
|
|
(prev_group,) = txn.fetchone()
|
|
|
|
new_last_state_group = state_group
|
|
|
|
|
|
|
|
if prev_group:
|
|
|
|
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
|
|
|
if potential_hops >= MAX_STATE_DELTA_HOPS:
|
|
|
|
# We want to ensure chains are at most this long,#
|
|
|
|
# otherwise read performance degrades.
|
|
|
|
continue
|
|
|
|
|
|
|
|
prev_state = self._get_state_groups_from_groups_txn(
|
|
|
|
txn, [prev_group]
|
|
|
|
)
|
|
|
|
prev_state = prev_state[prev_group]
|
|
|
|
|
|
|
|
curr_state = self._get_state_groups_from_groups_txn(
|
|
|
|
txn, [state_group]
|
|
|
|
)
|
|
|
|
curr_state = curr_state[state_group]
|
|
|
|
|
|
|
|
if not set(prev_state.keys()) - set(curr_state.keys()):
|
|
|
|
# We can only do a delta if the current has a strict super set
|
|
|
|
# of keys
|
|
|
|
|
|
|
|
delta_state = {
|
|
|
|
key: value
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, value in curr_state.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
if prev_state.get(key, None) != value
|
|
|
|
}
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
keyvalues={"state_group": state_group},
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_group_edges",
|
|
|
|
values={
|
|
|
|
"state_group": state_group,
|
|
|
|
"prev_state_group": prev_group,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_delete_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
keyvalues={"state_group": state_group},
|
|
|
|
)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_many_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn,
|
|
|
|
table="state_groups_state",
|
|
|
|
values=[
|
|
|
|
{
|
|
|
|
"state_group": state_group,
|
|
|
|
"room_id": room_id,
|
|
|
|
"type": key[0],
|
|
|
|
"state_key": key[1],
|
|
|
|
"event_id": state_id,
|
|
|
|
}
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, state_id in delta_state.items()
|
2019-12-20 11:48:24 +01:00
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
progress = {
|
|
|
|
"last_state_group": state_group,
|
|
|
|
"rows_inserted": rows_inserted + batch_size,
|
|
|
|
"max_group": max_group,
|
|
|
|
}
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.updates._background_update_progress_txn(
|
2019-12-20 11:48:24 +01:00
|
|
|
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
|
|
|
|
)
|
|
|
|
|
|
|
|
return False, batch_size
|
|
|
|
|
2020-08-11 23:21:13 +02:00
|
|
|
finished, result = await self.db_pool.runInteraction(
|
2019-12-20 11:48:24 +01:00
|
|
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
|
|
|
|
)
|
|
|
|
|
|
|
|
if finished:
|
2020-08-11 23:21:13 +02:00
|
|
|
await self.db_pool.updates._end_background_update(
|
2019-12-20 11:48:24 +01:00
|
|
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
|
|
|
|
)
|
|
|
|
|
|
|
|
return result * BATCH_SIZE_SCALE_FACTOR
|
|
|
|
|
2020-08-11 23:21:13 +02:00
|
|
|
async def _background_index_state(self, progress, batch_size):
|
2019-12-20 11:48:24 +01:00
|
|
|
def reindex_txn(conn):
|
|
|
|
conn.rollback()
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
# postgres insists on autocommit for the index
|
|
|
|
conn.set_session(autocommit=True)
|
|
|
|
try:
|
|
|
|
txn = conn.cursor()
|
|
|
|
txn.execute(
|
|
|
|
"CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
|
|
|
|
" ON state_groups_state(state_group, type, state_key)"
|
|
|
|
)
|
|
|
|
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
|
|
|
finally:
|
|
|
|
conn.set_session(autocommit=False)
|
|
|
|
else:
|
|
|
|
txn = conn.cursor()
|
|
|
|
txn.execute(
|
|
|
|
"CREATE INDEX state_groups_state_type_idx"
|
|
|
|
" ON state_groups_state(state_group, type, state_key)"
|
|
|
|
)
|
|
|
|
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
|
|
|
|
2020-08-11 23:21:13 +02:00
|
|
|
await self.db_pool.runWithConnection(reindex_txn)
|
2019-12-20 11:48:24 +01:00
|
|
|
|
2020-08-11 23:21:13 +02:00
|
|
|
await self.db_pool.updates._end_background_update(
|
2020-08-05 22:38:57 +02:00
|
|
|
self.STATE_GROUP_INDEX_UPDATE_NAME
|
|
|
|
)
|
2019-12-20 11:48:24 +01:00
|
|
|
|
|
|
|
return 1
|