2019-12-03 15:28:46 +01:00
|
|
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
|
|
|
|
import itertools
|
|
|
|
import logging
|
2022-02-02 18:24:07 +01:00
|
|
|
from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple
|
2019-12-03 15:28:46 +01:00
|
|
|
|
2020-05-15 17:43:59 +02:00
|
|
|
from synapse.api.constants import EventTypes
|
2020-07-01 17:35:40 +02:00
|
|
|
from synapse.replication.tcp.streams import BackfillStream, CachesStream
|
2020-05-15 17:43:59 +02:00
|
|
|
from synapse.replication.tcp.streams.events import (
|
2020-07-01 17:35:40 +02:00
|
|
|
EventsStream,
|
2020-05-15 17:43:59 +02:00
|
|
|
EventsStreamCurrentStateRow,
|
|
|
|
EventsStreamEventRow,
|
2022-03-16 15:37:04 +01:00
|
|
|
EventsStreamRow,
|
2020-05-15 17:43:59 +02:00
|
|
|
)
|
2019-12-03 15:28:46 +01:00
|
|
|
from synapse.storage._base import SQLBaseStore
|
2022-02-02 18:24:07 +01:00
|
|
|
from synapse.storage.database import (
|
|
|
|
DatabasePool,
|
|
|
|
LoggingDatabaseConnection,
|
|
|
|
LoggingTransaction,
|
|
|
|
)
|
2019-12-03 15:28:46 +01:00
|
|
|
from synapse.storage.engines import PostgresEngine
|
2022-07-21 12:51:30 +02:00
|
|
|
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
2022-09-21 15:32:01 +02:00
|
|
|
from synapse.util.caches.descriptors import CachedFunction
|
2020-01-14 12:58:02 +01:00
|
|
|
from synapse.util.iterutils import batch_iter
|
2019-12-03 15:28:46 +01:00
|
|
|
|
2021-10-22 19:15:41 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2019-12-03 15:28:46 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
# This is a special cache name we use to batch multiple invalidations of caches
|
|
|
|
# based on the current state when notifying workers over replication.
|
2019-12-04 16:45:42 +01:00
|
|
|
CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
|
2019-12-03 15:28:46 +01:00
|
|
|
|
|
|
|
|
2020-03-25 15:54:01 +01:00
|
|
|
class CacheInvalidationWorkerStore(SQLBaseStore):
|
2021-12-13 18:05:00 +01:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
database: DatabasePool,
|
|
|
|
db_conn: LoggingDatabaseConnection,
|
|
|
|
hs: "HomeServer",
|
|
|
|
):
|
2020-05-07 14:51:08 +02:00
|
|
|
super().__init__(database, db_conn, hs)
|
|
|
|
|
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
|
2022-05-17 11:34:59 +02:00
|
|
|
self.db_pool.updates.register_background_index_update(
|
|
|
|
update_name="cache_invalidation_index_by_instance",
|
|
|
|
index_name="cache_invalidation_stream_by_instance_instance_index",
|
|
|
|
table="cache_invalidation_stream_by_instance",
|
|
|
|
columns=("instance_name", "stream_id"),
|
|
|
|
psql_only=True, # The table is only on postgres DBs.
|
|
|
|
)
|
|
|
|
|
2022-07-21 12:51:30 +02:00
|
|
|
self._cache_id_gen: Optional[MultiWriterIdGenerator]
|
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
|
|
|
# We set the `writers` to an empty list here as we don't care about
|
|
|
|
# missing updates over restarts, as we'll not have anything in our
|
|
|
|
# caches to invalidate. (This reduces the amount of writes to the DB
|
|
|
|
# that happen).
|
|
|
|
self._cache_id_gen = MultiWriterIdGenerator(
|
|
|
|
db_conn,
|
|
|
|
database,
|
2023-01-20 19:02:18 +01:00
|
|
|
notifier=hs.get_replication_notifier(),
|
2022-07-21 12:51:30 +02:00
|
|
|
stream_name="caches",
|
|
|
|
instance_name=hs.get_instance_name(),
|
|
|
|
tables=[
|
|
|
|
(
|
|
|
|
"cache_invalidation_stream_by_instance",
|
|
|
|
"instance_name",
|
|
|
|
"stream_id",
|
|
|
|
)
|
|
|
|
],
|
|
|
|
sequence_name="cache_invalidation_stream_seq",
|
|
|
|
writers=[],
|
|
|
|
)
|
|
|
|
|
|
|
|
else:
|
|
|
|
self._cache_id_gen = None
|
|
|
|
|
2020-05-07 14:51:08 +02:00
|
|
|
async def get_all_updated_caches(
|
|
|
|
self, instance_name: str, last_id: int, current_id: int, limit: int
|
2020-07-07 13:11:35 +02:00
|
|
|
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
|
|
|
|
"""Get updates for caches replication stream.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
instance_name: The writer we want to fetch updates from. Unused
|
|
|
|
here since there is only ever one writer.
|
|
|
|
last_id: The token to fetch updates from. Exclusive.
|
|
|
|
current_id: The token to fetch updates up to. Inclusive.
|
|
|
|
limit: The requested limit for the number of rows to return. The
|
|
|
|
function may return more or fewer rows.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A tuple consisting of: the updates, a token to use to fetch
|
|
|
|
subsequent updates, and whether we returned fewer rows than exists
|
|
|
|
between the requested tokens due to the limit.
|
|
|
|
|
|
|
|
The token returned can be used in a subsequent call to this
|
|
|
|
function to get further updatees.
|
|
|
|
|
|
|
|
The updates are a list of 2-tuples of stream ID and the row data
|
2020-05-07 14:51:08 +02:00
|
|
|
"""
|
|
|
|
|
2020-03-25 15:54:01 +01:00
|
|
|
if last_id == current_id:
|
2020-07-07 13:11:35 +02:00
|
|
|
return [], current_id, False
|
2020-03-25 15:54:01 +01:00
|
|
|
|
2022-03-16 15:37:04 +01:00
|
|
|
def get_all_updated_caches_txn(
|
|
|
|
txn: LoggingTransaction,
|
|
|
|
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
|
2020-03-25 15:54:01 +01:00
|
|
|
# We purposefully don't bound by the current token, as we want to
|
|
|
|
# send across cache invalidations as quickly as possible. Cache
|
|
|
|
# invalidations are idempotent, so duplicates are fine.
|
2020-05-07 14:51:08 +02:00
|
|
|
sql = """
|
|
|
|
SELECT stream_id, cache_func, keys, invalidation_ts
|
|
|
|
FROM cache_invalidation_stream_by_instance
|
|
|
|
WHERE stream_id > ? AND instance_name = ?
|
|
|
|
ORDER BY stream_id ASC
|
|
|
|
LIMIT ?
|
|
|
|
"""
|
|
|
|
txn.execute(sql, (last_id, instance_name, limit))
|
2020-07-07 13:11:35 +02:00
|
|
|
updates = [(row[0], row[1:]) for row in txn]
|
|
|
|
limited = False
|
|
|
|
upto_token = current_id
|
|
|
|
if len(updates) >= limit:
|
|
|
|
upto_token = updates[-1][0]
|
|
|
|
limited = True
|
|
|
|
|
|
|
|
return updates, upto_token, limited
|
2020-03-25 15:54:01 +01:00
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
return await self.db_pool.runInteraction(
|
2020-03-25 15:54:01 +01:00
|
|
|
"get_all_updated_caches", get_all_updated_caches_txn
|
|
|
|
)
|
|
|
|
|
2022-07-18 15:28:14 +02:00
|
|
|
def process_replication_rows(
|
2022-03-16 15:37:04 +01:00
|
|
|
self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any]
|
|
|
|
) -> None:
|
2020-07-01 17:35:40 +02:00
|
|
|
if stream_name == EventsStream.NAME:
|
2020-05-15 17:43:59 +02:00
|
|
|
for row in rows:
|
|
|
|
self._process_event_stream_row(token, row)
|
2020-07-01 17:35:40 +02:00
|
|
|
elif stream_name == BackfillStream.NAME:
|
2020-05-15 17:43:59 +02:00
|
|
|
for row in rows:
|
|
|
|
self._invalidate_caches_for_event(
|
|
|
|
-token,
|
|
|
|
row.event_id,
|
|
|
|
row.room_id,
|
|
|
|
row.type,
|
|
|
|
row.state_key,
|
|
|
|
row.redacts,
|
|
|
|
row.relates_to,
|
|
|
|
backfilled=True,
|
|
|
|
)
|
2020-07-01 17:35:40 +02:00
|
|
|
elif stream_name == CachesStream.NAME:
|
2020-05-07 14:51:08 +02:00
|
|
|
for row in rows:
|
|
|
|
if row.cache_func == CURRENT_STATE_CACHE_NAME:
|
|
|
|
if row.keys is None:
|
|
|
|
raise Exception(
|
|
|
|
"Can't send an 'invalidate all' for current state cache"
|
|
|
|
)
|
2020-01-27 14:47:50 +01:00
|
|
|
|
2020-05-07 14:51:08 +02:00
|
|
|
room_id = row.keys[0]
|
|
|
|
members_changed = set(row.keys[1:])
|
|
|
|
self._invalidate_state_caches(room_id, members_changed)
|
|
|
|
else:
|
|
|
|
self._attempt_to_invalidate_cache(row.cache_func, row.keys)
|
|
|
|
|
2022-07-18 15:28:14 +02:00
|
|
|
super().process_replication_rows(stream_name, instance_name, token, rows)
|
2020-01-27 14:47:50 +01:00
|
|
|
|
2023-01-04 12:49:26 +01:00
|
|
|
def process_replication_position(
|
|
|
|
self, stream_name: str, instance_name: str, token: int
|
|
|
|
) -> None:
|
|
|
|
if stream_name == CachesStream.NAME:
|
|
|
|
if self._cache_id_gen:
|
|
|
|
self._cache_id_gen.advance(instance_name, token)
|
|
|
|
super().process_replication_position(stream_name, instance_name, token)
|
|
|
|
|
2022-03-16 15:37:04 +01:00
|
|
|
def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None:
|
2020-05-15 17:43:59 +02:00
|
|
|
data = row.data
|
|
|
|
|
|
|
|
if row.type == EventsStreamEventRow.TypeId:
|
2022-03-16 15:37:04 +01:00
|
|
|
assert isinstance(data, EventsStreamEventRow)
|
2020-05-15 17:43:59 +02:00
|
|
|
self._invalidate_caches_for_event(
|
|
|
|
token,
|
|
|
|
data.event_id,
|
|
|
|
data.room_id,
|
|
|
|
data.type,
|
|
|
|
data.state_key,
|
|
|
|
data.redacts,
|
|
|
|
data.relates_to,
|
|
|
|
backfilled=False,
|
|
|
|
)
|
|
|
|
elif row.type == EventsStreamCurrentStateRow.TypeId:
|
2022-03-16 15:37:04 +01:00
|
|
|
assert isinstance(data, EventsStreamCurrentStateRow)
|
2023-04-27 13:59:14 +02:00
|
|
|
self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
|
2020-05-15 17:43:59 +02:00
|
|
|
|
|
|
|
if data.type == EventTypes.Member:
|
2023-04-27 13:59:14 +02:00
|
|
|
self.get_rooms_for_user_with_stream_ordering.invalidate( # type: ignore[attr-defined]
|
2020-05-15 17:43:59 +02:00
|
|
|
(data.state_key,)
|
|
|
|
)
|
2023-04-27 13:59:14 +02:00
|
|
|
self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined]
|
2020-05-15 17:43:59 +02:00
|
|
|
else:
|
|
|
|
raise Exception("Unknown events stream row type %s" % (row.type,))
|
|
|
|
|
|
|
|
def _invalidate_caches_for_event(
|
|
|
|
self,
|
2022-03-16 15:37:04 +01:00
|
|
|
stream_ordering: int,
|
|
|
|
event_id: str,
|
|
|
|
room_id: str,
|
|
|
|
etype: str,
|
|
|
|
state_key: Optional[str],
|
|
|
|
redacts: Optional[str],
|
|
|
|
relates_to: Optional[str],
|
|
|
|
backfilled: bool,
|
|
|
|
) -> None:
|
2022-07-15 11:30:46 +02:00
|
|
|
# This invalidates any local in-memory cached event objects, the original
|
|
|
|
# process triggering the invalidation is responsible for clearing any external
|
|
|
|
# cached objects.
|
2023-04-27 13:59:14 +02:00
|
|
|
self._invalidate_local_get_event_cache(event_id) # type: ignore[attr-defined]
|
2020-05-15 17:43:59 +02:00
|
|
|
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache("have_seen_event", (room_id, event_id))
|
|
|
|
self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
|
|
|
|
self._attempt_to_invalidate_cache(
|
|
|
|
"get_unread_event_push_actions_by_room_for_user", (room_id,)
|
|
|
|
)
|
2020-05-15 17:43:59 +02:00
|
|
|
|
2022-03-25 15:58:56 +01:00
|
|
|
# The `_get_membership_from_event_id` is immutable, except for the
|
|
|
|
# case where we look up an event *before* persisting it.
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,))
|
2022-03-25 15:58:56 +01:00
|
|
|
|
2020-05-15 17:43:59 +02:00
|
|
|
if not backfilled:
|
2023-04-27 13:59:14 +02:00
|
|
|
self._events_stream_cache.entity_has_changed(room_id, stream_ordering) # type: ignore[attr-defined]
|
2020-05-15 17:43:59 +02:00
|
|
|
|
|
|
|
if redacts:
|
2023-04-27 13:59:14 +02:00
|
|
|
self._invalidate_local_get_event_cache(redacts) # type: ignore[attr-defined]
|
2022-03-10 15:03:59 +01:00
|
|
|
# Caches which might leak edits must be invalidated for the event being
|
|
|
|
# redacted.
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,))
|
|
|
|
self._attempt_to_invalidate_cache("get_applicable_edit", (redacts,))
|
2022-10-14 13:10:44 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_thread_id", (redacts,))
|
2022-10-14 20:05:25 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", (redacts,))
|
2020-05-15 17:43:59 +02:00
|
|
|
|
|
|
|
if etype == EventTypes.Member:
|
2023-04-27 13:59:14 +02:00
|
|
|
self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) # type: ignore[attr-defined]
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache(
|
|
|
|
"get_invited_rooms_for_local_user", (state_key,)
|
|
|
|
)
|
2022-10-17 14:27:51 +02:00
|
|
|
self._attempt_to_invalidate_cache(
|
|
|
|
"get_rooms_for_user_with_stream_ordering", (state_key,)
|
|
|
|
)
|
|
|
|
self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
|
2020-05-15 17:43:59 +02:00
|
|
|
|
|
|
|
if relates_to:
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
|
2022-11-22 15:41:09 +01:00
|
|
|
self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,))
|
2022-09-26 17:26:35 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,))
|
|
|
|
self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,))
|
|
|
|
self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,))
|
2022-10-13 14:02:11 +02:00
|
|
|
self._attempt_to_invalidate_cache("get_threads", (room_id,))
|
2020-05-15 17:43:59 +02:00
|
|
|
|
2022-03-16 15:37:04 +01:00
|
|
|
async def invalidate_cache_and_stream(
|
|
|
|
self, cache_name: str, keys: Tuple[Any, ...]
|
|
|
|
) -> None:
|
2023-05-16 21:56:38 +02:00
|
|
|
"""Invalidates the cache and adds it to the cache stream so other workers
|
2020-05-15 17:43:59 +02:00
|
|
|
will know to invalidate their caches.
|
|
|
|
|
2023-05-16 21:56:38 +02:00
|
|
|
This should only be used to invalidate caches where other workers won't
|
|
|
|
otherwise have known from other replication streams that the cache should
|
2020-05-15 17:43:59 +02:00
|
|
|
be invalidated.
|
|
|
|
"""
|
|
|
|
cache_func = getattr(self, cache_name, None)
|
|
|
|
if not cache_func:
|
|
|
|
return
|
|
|
|
|
|
|
|
cache_func.invalidate(keys)
|
2022-09-21 15:32:01 +02:00
|
|
|
await self.send_invalidation_to_replication(
|
2020-05-15 17:43:59 +02:00
|
|
|
cache_func.__name__,
|
|
|
|
keys,
|
|
|
|
)
|
|
|
|
|
2022-03-16 15:37:04 +01:00
|
|
|
def _invalidate_cache_and_stream(
|
|
|
|
self,
|
|
|
|
txn: LoggingTransaction,
|
2022-09-21 15:32:01 +02:00
|
|
|
cache_func: CachedFunction,
|
2022-03-16 15:37:04 +01:00
|
|
|
keys: Tuple[Any, ...],
|
|
|
|
) -> None:
|
2023-05-16 21:56:38 +02:00
|
|
|
"""Invalidates the cache and adds it to the cache stream so other workers
|
2019-12-03 15:28:46 +01:00
|
|
|
will know to invalidate their caches.
|
|
|
|
|
2023-05-16 21:56:38 +02:00
|
|
|
This should only be used to invalidate caches where other workers won't
|
|
|
|
otherwise have known from other replication streams that the cache should
|
2019-12-03 15:28:46 +01:00
|
|
|
be invalidated.
|
|
|
|
"""
|
|
|
|
txn.call_after(cache_func.invalidate, keys)
|
|
|
|
self._send_invalidation_to_replication(txn, cache_func.__name__, keys)
|
|
|
|
|
2022-03-16 15:37:04 +01:00
|
|
|
def _invalidate_all_cache_and_stream(
|
2022-09-21 15:32:01 +02:00
|
|
|
self, txn: LoggingTransaction, cache_func: CachedFunction
|
2022-03-16 15:37:04 +01:00
|
|
|
) -> None:
|
2023-05-16 21:56:38 +02:00
|
|
|
"""Invalidates the entire cache and adds it to the cache stream so other workers
|
2020-01-22 11:37:00 +01:00
|
|
|
will know to invalidate their caches.
|
|
|
|
"""
|
|
|
|
|
|
|
|
txn.call_after(cache_func.invalidate_all)
|
|
|
|
self._send_invalidation_to_replication(txn, cache_func.__name__, None)
|
|
|
|
|
2022-02-02 18:24:07 +01:00
|
|
|
def _invalidate_state_caches_and_stream(
|
|
|
|
self, txn: LoggingTransaction, room_id: str, members_changed: Collection[str]
|
|
|
|
) -> None:
|
2019-12-03 15:28:46 +01:00
|
|
|
"""Special case invalidation of caches based on current state.
|
|
|
|
|
|
|
|
We special case this so that we can batch the cache invalidations into a
|
|
|
|
single replication poke.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn
|
2022-02-02 18:24:07 +01:00
|
|
|
room_id: Room where state changed
|
|
|
|
members_changed: The user_ids of members that have changed
|
2019-12-03 15:28:46 +01:00
|
|
|
"""
|
|
|
|
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
|
|
|
|
|
|
|
if members_changed:
|
|
|
|
# We need to be careful that the size of the `members_changed` list
|
|
|
|
# isn't so large that it causes problems sending over replication, so we
|
|
|
|
# send them in chunks.
|
|
|
|
# Max line length is 16K, and max user ID length is 255, so 50 should
|
|
|
|
# be safe.
|
|
|
|
for chunk in batch_iter(members_changed, 50):
|
|
|
|
keys = itertools.chain([room_id], chunk)
|
|
|
|
self._send_invalidation_to_replication(
|
2019-12-04 16:45:42 +01:00
|
|
|
txn, CURRENT_STATE_CACHE_NAME, keys
|
2019-12-03 15:28:46 +01:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
# if no members changed, we still need to invalidate the other caches.
|
|
|
|
self._send_invalidation_to_replication(
|
2019-12-04 16:45:42 +01:00
|
|
|
txn, CURRENT_STATE_CACHE_NAME, [room_id]
|
2019-12-03 15:28:46 +01:00
|
|
|
)
|
|
|
|
|
2022-09-21 15:32:01 +02:00
|
|
|
async def send_invalidation_to_replication(
|
|
|
|
self, cache_name: str, keys: Optional[Collection[Any]]
|
|
|
|
) -> None:
|
|
|
|
await self.db_pool.runInteraction(
|
|
|
|
"send_invalidation_to_replication",
|
|
|
|
self._send_invalidation_to_replication,
|
|
|
|
cache_name,
|
|
|
|
keys,
|
|
|
|
)
|
|
|
|
|
2020-01-22 11:37:00 +01:00
|
|
|
def _send_invalidation_to_replication(
|
2022-03-16 15:37:04 +01:00
|
|
|
self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]]
|
|
|
|
) -> None:
|
2019-12-03 15:28:46 +01:00
|
|
|
"""Notifies replication that given cache has been invalidated.
|
|
|
|
|
|
|
|
Note that this does *not* invalidate the cache locally.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txn
|
2020-01-22 11:37:00 +01:00
|
|
|
cache_name
|
|
|
|
keys: Entry to invalidate. If None will invalidate all.
|
2019-12-03 15:28:46 +01:00
|
|
|
"""
|
|
|
|
|
2020-01-22 11:37:00 +01:00
|
|
|
if cache_name == CURRENT_STATE_CACHE_NAME and keys is None:
|
|
|
|
raise Exception(
|
|
|
|
"Can't stream invalidate all with magic current state cache"
|
|
|
|
)
|
|
|
|
|
2019-12-03 15:28:46 +01:00
|
|
|
if isinstance(self.database_engine, PostgresEngine):
|
2023-04-27 13:59:14 +02:00
|
|
|
assert self._cache_id_gen is not None
|
|
|
|
|
2019-12-03 15:28:46 +01:00
|
|
|
# get_next() returns a context manager which is designed to wrap
|
|
|
|
# the transaction. However, we want to only get an ID when we want
|
|
|
|
# to use it, here, so we need to call __enter__ manually, and have
|
|
|
|
# __exit__ called after the transaction finishes.
|
2020-05-07 14:51:08 +02:00
|
|
|
stream_id = self._cache_id_gen.get_next_txn(txn)
|
2019-12-03 15:28:46 +01:00
|
|
|
txn.call_after(self.hs.get_notifier().on_new_replication_data)
|
|
|
|
|
2020-01-22 11:37:00 +01:00
|
|
|
if keys is not None:
|
|
|
|
keys = list(keys)
|
|
|
|
|
2020-08-05 22:38:57 +02:00
|
|
|
self.db_pool.simple_insert_txn(
|
2019-12-03 15:28:46 +01:00
|
|
|
txn,
|
2020-05-07 14:51:08 +02:00
|
|
|
table="cache_invalidation_stream_by_instance",
|
2019-12-03 15:28:46 +01:00
|
|
|
values={
|
|
|
|
"stream_id": stream_id,
|
2020-05-07 14:51:08 +02:00
|
|
|
"instance_name": self._instance_name,
|
2019-12-03 15:28:46 +01:00
|
|
|
"cache_func": cache_name,
|
2020-01-22 11:37:00 +01:00
|
|
|
"keys": keys,
|
2022-03-16 15:37:04 +01:00
|
|
|
"invalidation_ts": self._clock.time_msec(),
|
2019-12-03 15:28:46 +01:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2020-08-19 11:39:31 +02:00
|
|
|
def get_cache_stream_token_for_writer(self, instance_name: str) -> int:
|
2019-12-03 15:28:46 +01:00
|
|
|
if self._cache_id_gen:
|
2020-08-19 11:39:31 +02:00
|
|
|
return self._cache_id_gen.get_current_token_for_writer(instance_name)
|
2019-12-03 15:28:46 +01:00
|
|
|
else:
|
|
|
|
return 0
|