2021-08-19 17:12:55 +02:00
|
|
|
# Copyright 2015-2021 The Matrix.org Foundation C.I.C.
|
2015-01-26 19:53:31 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-07-09 08:09:20 +02:00
|
|
|
import itertools
|
|
|
|
import logging
|
2021-04-22 17:43:50 +02:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Collection,
|
|
|
|
Dict,
|
|
|
|
FrozenSet,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
)
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
import attr
|
2018-10-09 15:15:49 +02:00
|
|
|
from prometheus_client import Counter
|
|
|
|
|
2021-12-08 18:26:29 +01:00
|
|
|
from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes
|
2020-02-03 19:05:44 +01:00
|
|
|
from synapse.api.filtering import FilterCollection
|
2021-08-23 14:14:42 +02:00
|
|
|
from synapse.api.presence import UserPresenceState
|
2021-08-19 17:12:55 +02:00
|
|
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
2020-02-03 19:05:44 +01:00
|
|
|
from synapse.events import EventBase
|
2020-03-24 15:45:33 +01:00
|
|
|
from synapse.logging.context import current_context
|
2021-04-01 18:08:21 +02:00
|
|
|
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.push.clientformat import format_push_rules_for_user
|
2021-12-21 14:25:34 +01:00
|
|
|
from synapse.storage.databases.main.event_push_actions import NotifCounts
|
2022-01-26 14:27:04 +01:00
|
|
|
from synapse.storage.databases.main.relations import BundledAggregations
|
2018-09-12 01:50:39 +02:00
|
|
|
from synapse.storage.roommember import MemberSummary
|
2018-10-25 18:49:55 +02:00
|
|
|
from synapse.storage.state import StateFilter
|
2020-02-03 19:05:44 +01:00
|
|
|
from synapse.types import (
|
|
|
|
JsonDict,
|
2020-08-28 13:28:53 +02:00
|
|
|
MutableStateMap,
|
2020-11-17 11:51:25 +01:00
|
|
|
Requester,
|
2020-02-03 19:05:44 +01:00
|
|
|
RoomStreamToken,
|
|
|
|
StateMap,
|
|
|
|
StreamToken,
|
|
|
|
UserID,
|
|
|
|
)
|
2018-08-10 15:50:21 +02:00
|
|
|
from synapse.util.async_helpers import concurrently_execute
|
2018-07-26 23:51:30 +02:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
|
|
|
from synapse.util.caches.lrucache import LruCache
|
2021-06-17 17:23:11 +02:00
|
|
|
from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext
|
2017-02-02 19:36:17 +01:00
|
|
|
from synapse.util.metrics import Measure, measure_func
|
2016-05-11 14:42:37 +02:00
|
|
|
from synapse.visibility import filter_events_for_client
|
2018-04-28 13:19:12 +02:00
|
|
|
|
2020-09-03 13:54:10 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2015-01-26 19:53:31 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2018-10-10 12:23:17 +02:00
|
|
|
# Counts the number of times we returned a non-empty sync. `type` is one of
|
|
|
|
# "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is
|
|
|
|
# "true" or "false" depending on if the request asked for lazy loaded members or
|
|
|
|
# not.
|
|
|
|
non_empty_sync_counter = Counter(
|
2018-10-10 12:40:43 +02:00
|
|
|
"synapse_handlers_sync_nonempty_total",
|
|
|
|
"Count of non empty sync responses. type is initial_sync/full_state_sync"
|
|
|
|
"/incremental_sync. lazy_loaded indicates if lazy loaded members were "
|
|
|
|
"enabled for that request.",
|
|
|
|
["type", "lazy_loaded"],
|
2018-10-09 15:15:49 +02:00
|
|
|
)
|
|
|
|
|
2018-07-26 23:51:30 +02:00
|
|
|
# Store the cache that tracks which lazy-loaded members have been sent to a given
|
|
|
|
# client for no more than 30 minutes.
|
|
|
|
LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
|
|
|
|
|
|
|
|
# Remember the last 100 members we sent to a client for the purposes of
|
|
|
|
# avoiding redundantly sending the same lazy-loaded members to the client
|
|
|
|
LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
|
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2021-06-17 17:23:11 +02:00
|
|
|
SyncRequestKey = Tuple[Any, ...]
|
|
|
|
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class SyncConfig:
|
2021-08-18 13:27:32 +02:00
|
|
|
user: UserID
|
|
|
|
filter_collection: FilterCollection
|
|
|
|
is_guest: bool
|
|
|
|
request_key: SyncRequestKey
|
|
|
|
device_id: Optional[str]
|
2019-06-20 11:32:02 +02:00
|
|
|
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class TimelineBatch:
|
2021-08-18 13:27:32 +02:00
|
|
|
prev_batch: StreamToken
|
|
|
|
events: List[EventBase]
|
|
|
|
limited: bool
|
2022-01-13 16:45:28 +01:00
|
|
|
# A mapping of event ID to the bundled aggregations for the above events.
|
|
|
|
# This is only calculated if limited is true.
|
2022-01-26 14:27:04 +01:00
|
|
|
bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None
|
2015-10-01 18:53:07 +02:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2015-10-01 18:53:07 +02:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if room needs to be part of the sync result.
|
|
|
|
"""
|
|
|
|
return bool(self.events)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2015-10-05 17:39:22 +02:00
|
|
|
|
2020-09-02 18:19:37 +02:00
|
|
|
# We can't freeze this class, because we need to update it after it's instantiated to
|
|
|
|
# update its unread count. This is because we calculate the unread count for a room only
|
|
|
|
# if there are updates for it, which we check after the instance has been created.
|
|
|
|
# This should not be a big deal because we update the notification counts afterwards as
|
|
|
|
# well anyway.
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class JoinedSyncResult:
|
2021-08-18 13:27:32 +02:00
|
|
|
room_id: str
|
|
|
|
timeline: TimelineBatch
|
|
|
|
state: StateMap[EventBase]
|
|
|
|
ephemeral: List[JsonDict]
|
|
|
|
account_data: List[JsonDict]
|
|
|
|
unread_notifications: JsonDict
|
|
|
|
summary: Optional[JsonDict]
|
|
|
|
unread_count: int
|
2020-02-03 19:05:44 +01:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2015-01-30 16:52:05 +01:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if room needs to be part of the sync result.
|
|
|
|
"""
|
2015-11-02 17:23:15 +01:00
|
|
|
return bool(
|
|
|
|
self.timeline
|
|
|
|
or self.state
|
|
|
|
or self.ephemeral
|
2015-11-18 16:31:04 +01:00
|
|
|
or self.account_data
|
2015-12-18 18:47:00 +01:00
|
|
|
# nb the notification count does not, er, count: if there's nothing
|
|
|
|
# else in the result, we don't need to send it.
|
2015-11-02 17:23:15 +01:00
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class ArchivedSyncResult:
|
2021-08-18 13:27:32 +02:00
|
|
|
room_id: str
|
|
|
|
timeline: TimelineBatch
|
|
|
|
state: StateMap[EventBase]
|
|
|
|
account_data: List[JsonDict]
|
2020-02-03 19:05:44 +01:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2015-10-19 18:26:18 +02:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if room needs to be part of the sync result.
|
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
return bool(self.timeline or self.state or self.account_data)
|
|
|
|
|
2015-10-19 18:26:18 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class InvitedSyncResult:
|
2021-08-18 13:27:32 +02:00
|
|
|
room_id: str
|
|
|
|
invite: EventBase
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2015-10-19 18:26:18 +02:00
|
|
|
"""Invited rooms should always be reported to the client"""
|
|
|
|
return True
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2021-06-09 20:39:51 +02:00
|
|
|
class KnockedSyncResult:
|
2021-08-18 13:27:32 +02:00
|
|
|
room_id: str
|
|
|
|
knock: EventBase
|
2021-06-09 20:39:51 +02:00
|
|
|
|
|
|
|
def __bool__(self) -> bool:
|
|
|
|
"""Knocked rooms should always be reported to the client"""
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class GroupsSyncResult:
|
2021-08-18 13:27:32 +02:00
|
|
|
join: JsonDict
|
|
|
|
invite: JsonDict
|
|
|
|
leave: JsonDict
|
2017-07-10 15:53:19 +02:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2017-07-20 17:47:35 +02:00
|
|
|
return bool(self.join or self.invite or self.leave)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2017-07-10 15:53:19 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class DeviceLists:
|
|
|
|
"""
|
|
|
|
Attributes:
|
|
|
|
changed: List of user_ids whose devices may have changed
|
|
|
|
left: List of user_ids whose devices we no longer track
|
|
|
|
"""
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
changed: Collection[str]
|
|
|
|
left: Collection[str]
|
2017-09-07 16:08:39 +02:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2017-09-07 16:08:39 +02:00
|
|
|
return bool(self.changed or self.left)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2017-09-07 16:08:39 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class _RoomChanges:
|
|
|
|
"""The set of room entries to include in the sync, plus the set of joined
|
|
|
|
and left room IDs since last sync.
|
|
|
|
"""
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
room_entries: List["RoomSyncResultBuilder"]
|
|
|
|
invited: List[InvitedSyncResult]
|
|
|
|
knocked: List[KnockedSyncResult]
|
|
|
|
newly_joined_rooms: List[str]
|
|
|
|
newly_left_rooms: List[str]
|
2020-02-03 19:05:44 +01:00
|
|
|
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class SyncResult:
|
|
|
|
"""
|
|
|
|
Attributes:
|
|
|
|
next_batch: Token for the next sync
|
|
|
|
presence: List of presence events for the user.
|
|
|
|
account_data: List of account_data events for the user.
|
|
|
|
joined: JoinedSyncResult for each joined room.
|
|
|
|
invited: InvitedSyncResult for each invited room.
|
2021-06-09 20:39:51 +02:00
|
|
|
knocked: KnockedSyncResult for each knocked on room.
|
2020-02-03 19:05:44 +01:00
|
|
|
archived: ArchivedSyncResult for each archived room.
|
|
|
|
to_device: List of direct messages for the device.
|
|
|
|
device_lists: List of user_ids whose devices have changed
|
|
|
|
device_one_time_keys_count: Dict of algorithm to count for one time keys
|
|
|
|
for this device
|
2020-10-06 19:26:29 +02:00
|
|
|
device_unused_fallback_key_types: List of key types that have an unused fallback
|
|
|
|
key
|
2020-02-03 19:05:44 +01:00
|
|
|
groups: Group updates, if any
|
|
|
|
"""
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
next_batch: StreamToken
|
2021-08-23 14:14:42 +02:00
|
|
|
presence: List[UserPresenceState]
|
2021-08-18 13:27:32 +02:00
|
|
|
account_data: List[JsonDict]
|
|
|
|
joined: List[JoinedSyncResult]
|
|
|
|
invited: List[InvitedSyncResult]
|
|
|
|
knocked: List[KnockedSyncResult]
|
|
|
|
archived: List[ArchivedSyncResult]
|
|
|
|
to_device: List[JsonDict]
|
|
|
|
device_lists: DeviceLists
|
|
|
|
device_one_time_keys_count: JsonDict
|
|
|
|
device_unused_fallback_key_types: List[str]
|
|
|
|
groups: Optional[GroupsSyncResult]
|
2020-02-03 19:05:44 +01:00
|
|
|
|
2020-09-14 18:50:06 +02:00
|
|
|
def __bool__(self) -> bool:
|
2015-01-30 16:52:05 +01:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if the notifier needs to wait for more events when polling for
|
|
|
|
events.
|
|
|
|
"""
|
2015-01-27 17:24:22 +01:00
|
|
|
return bool(
|
2019-06-20 11:32:02 +02:00
|
|
|
self.presence
|
|
|
|
or self.joined
|
|
|
|
or self.invited
|
2021-06-09 20:39:51 +02:00
|
|
|
or self.knocked
|
2019-06-20 11:32:02 +02:00
|
|
|
or self.archived
|
|
|
|
or self.account_data
|
|
|
|
or self.to_device
|
|
|
|
or self.device_lists
|
|
|
|
or self.groups
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class SyncHandler:
|
2020-09-03 13:54:10 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2018-08-17 17:08:45 +02:00
|
|
|
self.hs_config = hs.config
|
2016-05-16 21:19:26 +02:00
|
|
|
self.store = hs.get_datastore()
|
|
|
|
self.notifier = hs.get_notifier()
|
|
|
|
self.presence_handler = hs.get_presence_handler()
|
2015-01-26 16:46:31 +01:00
|
|
|
self.event_sources = hs.get_event_sources()
|
2015-01-26 19:53:31 +01:00
|
|
|
self.clock = hs.get_clock()
|
2016-08-26 15:54:30 +02:00
|
|
|
self.state = hs.get_state_handler()
|
2018-08-08 18:54:49 +02:00
|
|
|
self.auth = hs.get_auth()
|
2019-10-23 18:25:54 +02:00
|
|
|
self.storage = hs.get_storage()
|
|
|
|
self.state_store = self.storage.state
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2021-08-03 15:45:04 +02:00
|
|
|
# TODO: flush cache entries on subsequent sync request.
|
|
|
|
# Once we get the next /sync request (ie, one with the same access token
|
|
|
|
# that sets 'since' to 'next_batch'), we know that device won't need a
|
|
|
|
# cached result any more, and we could flush the entry from the cache to save
|
|
|
|
# memory.
|
|
|
|
self.response_cache: ResponseCache[SyncRequestKey] = ResponseCache(
|
|
|
|
hs.get_clock(),
|
|
|
|
"sync",
|
|
|
|
timeout_ms=hs.config.caches.sync_response_cache_duration,
|
|
|
|
)
|
|
|
|
|
2021-04-06 14:58:18 +02:00
|
|
|
# ExpiringCache((User, Device)) -> LruCache(user_id => event_id)
|
2021-07-16 19:22:36 +02:00
|
|
|
self.lazy_loaded_members_cache: ExpiringCache[
|
|
|
|
Tuple[str, Optional[str]], LruCache[str, str]
|
|
|
|
] = ExpiringCache(
|
2019-06-20 11:32:02 +02:00
|
|
|
"lazy_loaded_members_cache",
|
|
|
|
self.clock,
|
|
|
|
max_len=0,
|
|
|
|
expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE,
|
2021-07-16 19:22:36 +02:00
|
|
|
)
|
2018-07-26 23:51:30 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def wait_for_sync_for_user(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
2020-11-17 11:51:25 +01:00
|
|
|
requester: Requester,
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_config: SyncConfig,
|
|
|
|
since_token: Optional[StreamToken] = None,
|
|
|
|
timeout: int = 0,
|
|
|
|
full_state: bool = False,
|
|
|
|
) -> SyncResult:
|
2015-01-27 17:24:22 +01:00
|
|
|
"""Get the sync for a client if we have new data for it now. Otherwise
|
|
|
|
wait for new data to arrive on the server. If the timeout expires, then
|
|
|
|
return an empty sync result.
|
|
|
|
"""
|
2018-08-09 18:39:12 +02:00
|
|
|
# If the user is not part of the mau group, then check that limits have
|
|
|
|
# not been exceeded (if not part of the group by this point, almost certain
|
|
|
|
# auth_blocking will occur)
|
|
|
|
user_id = sync_config.user.to_string()
|
2020-11-17 11:51:25 +01:00
|
|
|
await self.auth.check_auth_blocking(requester=requester)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2021-03-02 15:43:34 +01:00
|
|
|
res = await self.response_cache.wrap(
|
2018-04-12 13:08:59 +02:00
|
|
|
sync_config.request_key,
|
|
|
|
self._wait_for_sync_for_user,
|
2019-06-20 11:32:02 +02:00
|
|
|
sync_config,
|
|
|
|
since_token,
|
|
|
|
timeout,
|
|
|
|
full_state,
|
2021-06-17 17:23:11 +02:00
|
|
|
cache_context=True,
|
2018-04-12 13:08:59 +02:00
|
|
|
)
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Returning sync response for %s", user_id)
|
2019-07-23 15:00:55 +02:00
|
|
|
return res
|
2015-10-26 19:47:18 +01:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _wait_for_sync_for_user(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
sync_config: SyncConfig,
|
2021-06-17 17:23:11 +02:00
|
|
|
since_token: Optional[StreamToken],
|
|
|
|
timeout: int,
|
|
|
|
full_state: bool,
|
|
|
|
cache_context: ResponseCacheContext[SyncRequestKey],
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> SyncResult:
|
2021-12-02 21:58:32 +01:00
|
|
|
"""The start of the machinery that produces a /sync response.
|
|
|
|
|
|
|
|
See https://spec.matrix.org/v1.1/client-server-api/#syncing for full details.
|
|
|
|
|
|
|
|
This method does high-level bookkeeping:
|
|
|
|
- tracking the kind of sync in the logging context
|
|
|
|
- deleting any to_device messages whose delivery has been acknowledged.
|
|
|
|
- deciding if we should dispatch an instant or delayed response
|
|
|
|
- marking the sync as being lazily loaded, if appropriate
|
|
|
|
|
|
|
|
Computing the body of the response begins in the next method,
|
|
|
|
`current_sync_for_user`.
|
|
|
|
"""
|
2018-10-09 15:15:49 +02:00
|
|
|
if since_token is None:
|
|
|
|
sync_type = "initial_sync"
|
|
|
|
elif full_state:
|
|
|
|
sync_type = "full_state_sync"
|
|
|
|
else:
|
|
|
|
sync_type = "incremental_sync"
|
|
|
|
|
2020-03-24 15:45:33 +01:00
|
|
|
context = current_context()
|
2016-02-03 14:51:25 +01:00
|
|
|
if context:
|
2018-10-09 15:15:49 +02:00
|
|
|
context.tag = sync_type
|
|
|
|
|
2021-06-07 10:19:06 +02:00
|
|
|
# if we have a since token, delete any to-device messages before that token
|
|
|
|
# (since we now know that the device has received them)
|
|
|
|
if since_token is not None:
|
|
|
|
since_stream_id = since_token.to_device_key
|
|
|
|
deleted = await self.store.delete_messages_for_device(
|
|
|
|
sync_config.user.to_string(), sync_config.device_id, since_stream_id
|
|
|
|
)
|
|
|
|
logger.debug(
|
|
|
|
"Deleted %d to-device messages up to %d", deleted, since_stream_id
|
|
|
|
)
|
|
|
|
|
2015-10-26 19:47:18 +01:00
|
|
|
if timeout == 0 or since_token is None or full_state:
|
|
|
|
# we are going to return immediately, so don't bother calling
|
|
|
|
# notifier.wait_for_events.
|
2021-06-17 17:23:11 +02:00
|
|
|
result: SyncResult = await self.current_sync_for_user(
|
2019-06-20 11:32:02 +02:00
|
|
|
sync_config, since_token, full_state=full_state
|
2016-01-25 11:10:44 +01:00
|
|
|
)
|
2015-01-26 16:46:31 +01:00
|
|
|
else:
|
2021-12-02 21:58:32 +01:00
|
|
|
# Otherwise, we wait for something to happen and report it to the user.
|
2021-09-20 14:56:23 +02:00
|
|
|
async def current_sync_callback(
|
|
|
|
before_token: StreamToken, after_token: StreamToken
|
|
|
|
) -> SyncResult:
|
2021-06-17 17:23:11 +02:00
|
|
|
return await self.current_sync_for_user(sync_config, since_token)
|
2015-01-27 21:09:52 +01:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
result = await self.notifier.wait_for_events(
|
2019-06-20 11:32:02 +02:00
|
|
|
sync_config.user.to_string(),
|
|
|
|
timeout,
|
|
|
|
current_sync_callback,
|
2016-01-25 11:10:44 +01:00
|
|
|
from_token=since_token,
|
2015-01-26 16:46:31 +01:00
|
|
|
)
|
2018-10-10 12:23:17 +02:00
|
|
|
|
2021-06-17 17:23:11 +02:00
|
|
|
# if nothing has happened in any of the users' rooms since /sync was called,
|
|
|
|
# the resultant next_batch will be the same as since_token (since the result
|
|
|
|
# is generated when wait_for_events is first called, and not regenerated
|
|
|
|
# when wait_for_events times out).
|
|
|
|
#
|
|
|
|
# If that happens, we mustn't cache it, so that when the client comes back
|
|
|
|
# with the same cache token, we don't immediately return the same empty
|
|
|
|
# result, causing a tightloop. (#8518)
|
|
|
|
if result.next_batch == since_token:
|
|
|
|
cache_context.should_cache = False
|
|
|
|
|
2018-10-10 12:23:17 +02:00
|
|
|
if result:
|
|
|
|
if sync_config.filter_collection.lazy_load_members():
|
|
|
|
lazy_loaded = "true"
|
|
|
|
else:
|
|
|
|
lazy_loaded = "false"
|
|
|
|
non_empty_sync_counter.labels(sync_type, lazy_loaded).inc()
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return result
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def current_sync_for_user(
|
|
|
|
self,
|
|
|
|
sync_config: SyncConfig,
|
|
|
|
since_token: Optional[StreamToken] = None,
|
|
|
|
full_state: bool = False,
|
|
|
|
) -> SyncResult:
|
2021-12-02 21:58:32 +01:00
|
|
|
"""Generates the response body of a sync result, represented as a SyncResult.
|
|
|
|
|
|
|
|
This is a wrapper around `generate_sync_result` which starts an open tracing
|
|
|
|
span to track the sync. See `generate_sync_result` for the next part of your
|
|
|
|
indoctrination.
|
|
|
|
"""
|
2021-12-21 12:10:36 +01:00
|
|
|
with start_active_span("sync.current_sync_for_user"):
|
2021-04-01 18:08:21 +02:00
|
|
|
log_kv({"since_token": since_token})
|
|
|
|
sync_result = await self.generate_sync_result(
|
|
|
|
sync_config, since_token, full_state
|
|
|
|
)
|
|
|
|
|
|
|
|
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
|
|
|
|
return sync_result
|
2015-01-27 17:24:22 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def push_rules_for_user(self, user: UserID) -> JsonDict:
|
2016-03-04 15:44:01 +01:00
|
|
|
user_id = user.to_string()
|
2019-12-05 18:58:25 +01:00
|
|
|
rules = await self.store.get_push_rules_for_user(user_id)
|
2016-06-01 15:27:07 +02:00
|
|
|
rules = format_push_rules_for_user(user, rules)
|
2019-07-23 15:00:55 +02:00
|
|
|
return rules
|
2016-03-04 15:44:01 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def ephemeral_by_room(
|
|
|
|
self,
|
|
|
|
sync_result_builder: "SyncResultBuilder",
|
|
|
|
now_token: StreamToken,
|
|
|
|
since_token: Optional[StreamToken] = None,
|
|
|
|
) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]:
|
2015-11-02 18:54:04 +01:00
|
|
|
"""Get the ephemeral events for each room the user is in
|
2015-10-21 16:45:37 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
now_token: Where the server is currently up to.
|
|
|
|
since_token: Where the server was when the client
|
2015-10-21 16:45:37 +02:00
|
|
|
last synced.
|
|
|
|
Returns:
|
|
|
|
A tuple of the now StreamToken, updated to reflect the which typing
|
|
|
|
events are included, and a dict mapping from room_id to a list of
|
|
|
|
typing events for that room.
|
|
|
|
"""
|
|
|
|
|
2018-03-05 13:06:19 +01:00
|
|
|
sync_config = sync_result_builder.sync_config
|
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
with Measure(self.clock, "ephemeral_by_room"):
|
2020-09-11 13:22:55 +02:00
|
|
|
typing_key = since_token.typing_key if since_token else 0
|
2015-10-20 17:36:20 +02:00
|
|
|
|
2018-03-05 13:06:19 +01:00
|
|
|
room_ids = sync_result_builder.joined_room_ids
|
2015-10-20 17:36:20 +02:00
|
|
|
|
2021-09-21 19:34:26 +02:00
|
|
|
typing_source = self.event_sources.sources.typing
|
2019-12-06 11:14:59 +01:00
|
|
|
typing, typing_key = await typing_source.get_new_events(
|
2016-02-09 12:31:04 +01:00
|
|
|
user=sync_config.user,
|
|
|
|
from_key=typing_key,
|
|
|
|
limit=sync_config.filter_collection.ephemeral_limit(),
|
|
|
|
room_ids=room_ids,
|
|
|
|
is_guest=sync_config.is_guest,
|
|
|
|
)
|
|
|
|
now_token = now_token.copy_and_replace("typing_key", typing_key)
|
|
|
|
|
2021-07-16 19:22:36 +02:00
|
|
|
ephemeral_by_room: JsonDict = {}
|
2016-02-09 12:31:04 +01:00
|
|
|
|
|
|
|
for event in typing:
|
|
|
|
# we want to exclude the room_id from the event, but modifying the
|
|
|
|
# result returned by the event source is poor form (it might cache
|
|
|
|
# the object)
|
|
|
|
room_id = event["room_id"]
|
2020-06-15 13:03:36 +02:00
|
|
|
event_copy = {k: v for (k, v) in event.items() if k != "room_id"}
|
2016-02-09 12:31:04 +01:00
|
|
|
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
|
|
|
|
|
2020-09-11 13:22:55 +02:00
|
|
|
receipt_key = since_token.receipt_key if since_token else 0
|
2016-02-09 12:31:04 +01:00
|
|
|
|
2021-09-21 19:34:26 +02:00
|
|
|
receipt_source = self.event_sources.sources.receipt
|
2019-12-05 18:58:25 +01:00
|
|
|
receipts, receipt_key = await receipt_source.get_new_events(
|
2016-02-09 12:31:04 +01:00
|
|
|
user=sync_config.user,
|
|
|
|
from_key=receipt_key,
|
|
|
|
limit=sync_config.filter_collection.ephemeral_limit(),
|
|
|
|
room_ids=room_ids,
|
|
|
|
is_guest=sync_config.is_guest,
|
|
|
|
)
|
|
|
|
now_token = now_token.copy_and_replace("receipt_key", receipt_key)
|
2015-11-02 18:54:04 +01:00
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
for event in receipts:
|
|
|
|
room_id = event["room_id"]
|
|
|
|
# exclude room id, as above
|
2020-06-15 13:03:36 +02:00
|
|
|
event_copy = {k: v for (k, v) in event.items() if k != "room_id"}
|
2016-02-09 12:31:04 +01:00
|
|
|
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
|
2015-10-20 17:36:20 +02:00
|
|
|
|
2019-08-30 17:28:26 +02:00
|
|
|
return now_token, ephemeral_by_room
|
2015-10-20 17:36:20 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _load_filtered_recents(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2020-02-03 19:05:44 +01:00
|
|
|
room_id: str,
|
|
|
|
sync_config: SyncConfig,
|
|
|
|
now_token: StreamToken,
|
|
|
|
since_token: Optional[StreamToken] = None,
|
|
|
|
potential_recents: Optional[List[EventBase]] = None,
|
|
|
|
newly_joined_room: bool = False,
|
|
|
|
) -> TimelineBatch:
|
2016-02-09 12:31:04 +01:00
|
|
|
with Measure(self.clock, "load_filtered_recents"):
|
|
|
|
timeline_limit = sync_config.filter_collection.timeline_limit()
|
2019-06-20 11:32:02 +02:00
|
|
|
block_all_timeline = (
|
|
|
|
sync_config.filter_collection.blocks_all_room_timeline()
|
|
|
|
)
|
2016-02-09 12:31:04 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
if (
|
|
|
|
potential_recents is None
|
|
|
|
or newly_joined_room
|
|
|
|
or timeline_limit < len(potential_recents)
|
|
|
|
):
|
2016-02-09 14:50:29 +01:00
|
|
|
limited = True
|
|
|
|
else:
|
|
|
|
limited = False
|
2016-02-09 12:31:04 +01:00
|
|
|
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"limited": limited})
|
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
if potential_recents:
|
2021-11-09 14:10:58 +01:00
|
|
|
recents = await sync_config.filter_collection.filter_room_timeline(
|
2020-02-03 19:05:44 +01:00
|
|
|
potential_recents
|
|
|
|
)
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"recents_after_sync_filtering": len(recents)})
|
2017-09-25 18:35:39 +02:00
|
|
|
|
|
|
|
# We check if there are any state events, if there are then we pass
|
|
|
|
# all current state events to the filter_events function. This is to
|
|
|
|
# ensure that we always include current state in the timeline
|
2021-07-16 19:22:36 +02:00
|
|
|
current_state_ids: FrozenSet[str] = frozenset()
|
2017-09-25 18:35:39 +02:00
|
|
|
if any(e.is_state() for e in recents):
|
2021-06-02 16:48:17 +02:00
|
|
|
current_state_ids_map = await self.store.get_current_state_ids(
|
2020-02-03 19:05:44 +01:00
|
|
|
room_id
|
|
|
|
)
|
2020-06-15 13:03:36 +02:00
|
|
|
current_state_ids = frozenset(current_state_ids_map.values())
|
2017-09-25 18:35:39 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
recents = await filter_events_for_client(
|
2019-10-23 18:25:54 +02:00
|
|
|
self.storage,
|
2016-02-09 12:31:04 +01:00
|
|
|
sync_config.user.to_string(),
|
|
|
|
recents,
|
2017-09-18 18:13:03 +02:00
|
|
|
always_include_ids=current_state_ids,
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"recents_after_visibility_filtering": len(recents)})
|
2016-02-09 12:31:04 +01:00
|
|
|
else:
|
|
|
|
recents = []
|
|
|
|
|
2016-11-22 17:38:35 +01:00
|
|
|
if not limited or block_all_timeline:
|
2020-10-07 14:49:40 +02:00
|
|
|
prev_batch_token = now_token
|
|
|
|
if recents:
|
|
|
|
room_key = recents[0].internal_metadata.before
|
|
|
|
prev_batch_token = now_token.copy_and_replace("room_key", room_key)
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return TimelineBatch(
|
2020-10-07 14:49:40 +02:00
|
|
|
events=recents, prev_batch=prev_batch_token, limited=False
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2016-05-24 15:00:43 +02:00
|
|
|
|
|
|
|
filtering_factor = 2
|
|
|
|
load_limit = max(timeline_limit * filtering_factor, 10)
|
|
|
|
max_repeat = 5 # Only try a few times per room, otherwise
|
|
|
|
room_key = now_token.room_key
|
|
|
|
end_key = room_key
|
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
since_key = None
|
|
|
|
if since_token and not newly_joined_room:
|
|
|
|
since_key = since_token.room_key
|
|
|
|
|
|
|
|
while limited and len(recents) < timeline_limit and max_repeat:
|
2018-05-09 16:56:07 +02:00
|
|
|
# If we have a since_key then we are trying to get any events
|
|
|
|
# that have happened since `since_key` up to `end_key`, so we
|
|
|
|
# can just use `get_room_events_stream_for_room`.
|
|
|
|
# Otherwise, we want to return the last N events in the room
|
2021-12-02 21:58:32 +01:00
|
|
|
# in topological ordering.
|
2018-05-09 12:59:45 +02:00
|
|
|
if since_key:
|
2019-12-05 18:58:25 +01:00
|
|
|
events, end_key = await self.store.get_room_events_stream_for_room(
|
2018-05-09 12:59:45 +02:00
|
|
|
room_id,
|
|
|
|
limit=load_limit + 1,
|
|
|
|
from_key=since_key,
|
|
|
|
to_key=end_key,
|
|
|
|
)
|
|
|
|
else:
|
2019-12-05 18:58:25 +01:00
|
|
|
events, end_key = await self.store.get_recent_events_for_room(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id, limit=load_limit + 1, end_token=end_key
|
2018-05-09 12:59:45 +02:00
|
|
|
)
|
2021-09-06 17:08:25 +02:00
|
|
|
|
|
|
|
log_kv({"loaded_recents": len(events)})
|
|
|
|
|
2021-11-09 14:10:58 +01:00
|
|
|
loaded_recents = (
|
|
|
|
await sync_config.filter_collection.filter_room_timeline(events)
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
2017-09-25 18:35:39 +02:00
|
|
|
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"loaded_recents_after_sync_filtering": len(loaded_recents)})
|
|
|
|
|
2017-09-25 18:35:39 +02:00
|
|
|
# We check if there are any state events, if there are then we pass
|
|
|
|
# all current state events to the filter_events function. This is to
|
|
|
|
# ensure that we always include current state in the timeline
|
|
|
|
current_state_ids = frozenset()
|
|
|
|
if any(e.is_state() for e in loaded_recents):
|
2021-06-02 16:48:17 +02:00
|
|
|
current_state_ids_map = await self.store.get_current_state_ids(
|
2020-02-03 19:05:44 +01:00
|
|
|
room_id
|
|
|
|
)
|
2020-06-15 13:03:36 +02:00
|
|
|
current_state_ids = frozenset(current_state_ids_map.values())
|
2017-09-25 18:35:39 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
loaded_recents = await filter_events_for_client(
|
2019-10-23 18:25:54 +02:00
|
|
|
self.storage,
|
2016-02-09 12:31:04 +01:00
|
|
|
sync_config.user.to_string(),
|
|
|
|
loaded_recents,
|
2017-09-18 18:13:03 +02:00
|
|
|
always_include_ids=current_state_ids,
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
2021-09-06 17:08:25 +02:00
|
|
|
|
|
|
|
log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
|
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
loaded_recents.extend(recents)
|
|
|
|
recents = loaded_recents
|
2016-01-27 18:06:52 +01:00
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
if len(events) <= load_limit:
|
|
|
|
limited = False
|
|
|
|
break
|
|
|
|
max_repeat -= 1
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
if len(recents) > timeline_limit:
|
|
|
|
limited = True
|
|
|
|
recents = recents[-timeline_limit:]
|
2020-09-29 22:48:33 +02:00
|
|
|
room_key = recents[0].internal_metadata.before
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
prev_batch_token = now_token.copy_and_replace("room_key", room_key)
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2022-01-13 16:45:28 +01:00
|
|
|
# Don't bother to bundle aggregations if the timeline is unlimited,
|
|
|
|
# as clients will have all the necessary information.
|
|
|
|
bundled_aggregations = None
|
|
|
|
if limited or newly_joined_room:
|
2022-01-18 17:38:57 +01:00
|
|
|
bundled_aggregations = await self.store.get_bundled_aggregations(
|
|
|
|
recents, sync_config.user.to_string()
|
|
|
|
)
|
2022-01-13 16:45:28 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return TimelineBatch(
|
|
|
|
events=recents,
|
|
|
|
prev_batch=prev_batch_token,
|
|
|
|
limited=limited or newly_joined_room,
|
2022-01-13 16:45:28 +01:00
|
|
|
bundled_aggregations=bundled_aggregations,
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def get_state_after_event(
|
2021-04-08 23:38:54 +02:00
|
|
|
self, event: EventBase, state_filter: Optional[StateFilter] = None
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> StateMap[str]:
|
2016-05-23 18:37:01 +02:00
|
|
|
"""
|
|
|
|
Get the room state after the given event
|
2015-11-10 19:27:23 +01:00
|
|
|
|
2016-04-01 17:08:59 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
event: event of interest
|
|
|
|
state_filter: The state filter used to fetch state from the database.
|
2015-11-10 19:27:23 +01:00
|
|
|
"""
|
2019-12-05 18:58:25 +01:00
|
|
|
state_ids = await self.state_store.get_state_ids_for_event(
|
2021-04-08 23:38:54 +02:00
|
|
|
event.event_id, state_filter=state_filter or StateFilter.all()
|
2018-07-19 19:32:02 +02:00
|
|
|
)
|
2015-11-10 19:27:23 +01:00
|
|
|
if event.is_state():
|
2020-12-30 14:09:53 +01:00
|
|
|
state_ids = dict(state_ids)
|
2016-08-25 19:59:44 +02:00
|
|
|
state_ids[(event.type, event.state_key)] = event.event_id
|
2019-07-23 15:00:55 +02:00
|
|
|
return state_ids
|
2015-11-10 19:27:23 +01:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def get_state_at(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
stream_position: StreamToken,
|
2021-04-08 23:38:54 +02:00
|
|
|
state_filter: Optional[StateFilter] = None,
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> StateMap[str]:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Get the room state at a particular stream position
|
2016-04-01 17:08:59 +02:00
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
room_id: room for which to get state
|
|
|
|
stream_position: point at which to get state
|
|
|
|
state_filter: The state filter used to fetch state from the database.
|
2015-01-27 17:24:22 +01:00
|
|
|
"""
|
2018-05-29 23:31:18 +02:00
|
|
|
# FIXME this claims to get the state at a stream position, but
|
|
|
|
# get_recent_events_for_room operates by topo ordering. This therefore
|
|
|
|
# does not reliably give you the state at the given stream position.
|
|
|
|
# (https://github.com/matrix-org/synapse/issues/3305)
|
2019-12-05 18:58:25 +01:00
|
|
|
last_events, _ = await self.store.get_recent_events_for_room(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id, end_token=stream_position.room_key, limit=1
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
if last_events:
|
2015-11-10 19:27:23 +01:00
|
|
|
last_event = last_events[-1]
|
2019-12-05 18:58:25 +01:00
|
|
|
state = await self.get_state_after_event(
|
2021-04-08 23:38:54 +02:00
|
|
|
last_event, state_filter=state_filter or StateFilter.all()
|
2018-07-19 19:32:02 +02:00
|
|
|
)
|
2015-11-10 19:27:23 +01:00
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
else:
|
2015-11-10 19:27:23 +01:00
|
|
|
# no events in this room - so presumably no state
|
2015-11-12 17:34:42 +01:00
|
|
|
state = {}
|
2019-07-23 15:00:55 +02:00
|
|
|
return state
|
2015-01-27 17:24:22 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def compute_summary(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
sync_config: SyncConfig,
|
|
|
|
batch: TimelineBatch,
|
2020-08-28 13:28:53 +02:00
|
|
|
state: MutableStateMap[EventBase],
|
2020-02-03 19:05:44 +01:00
|
|
|
now_token: StreamToken,
|
|
|
|
) -> Optional[JsonDict]:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Works out a room summary block for this room, summarising the number
|
2018-08-16 10:46:50 +02:00
|
|
|
of joined members in the room, and providing the 'hero' members if the
|
|
|
|
room has no name so clients can consistently name rooms. Also adds
|
|
|
|
state events to 'state' if needed to describe the heroes.
|
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
Args
|
|
|
|
room_id
|
|
|
|
sync_config
|
|
|
|
batch: The timeline batch for the room that will be sent to the user.
|
|
|
|
state: State as returned by compute_state_delta
|
|
|
|
now_token: Token of the end of the current batch.
|
2018-08-16 10:46:50 +02:00
|
|
|
"""
|
|
|
|
|
2018-09-12 01:50:39 +02:00
|
|
|
# FIXME: we could/should get this from room_stats when matthew/stats lands
|
|
|
|
|
2018-08-16 10:46:50 +02:00
|
|
|
# FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305
|
2019-12-05 18:58:25 +01:00
|
|
|
last_events, _ = await self.store.get_recent_event_ids_for_room(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id, end_token=now_token.room_key, limit=1
|
2018-08-16 10:46:50 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if not last_events:
|
2019-07-23 15:00:55 +02:00
|
|
|
return None
|
2018-08-16 10:46:50 +02:00
|
|
|
|
|
|
|
last_event = last_events[-1]
|
2019-12-05 18:58:25 +01:00
|
|
|
state_ids = await self.state_store.get_state_ids_for_event(
|
2018-10-25 18:49:55 +02:00
|
|
|
last_event.event_id,
|
2019-06-20 11:32:02 +02:00
|
|
|
state_filter=StateFilter.from_types(
|
|
|
|
[(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
|
|
|
|
),
|
2018-08-16 10:46:50 +02:00
|
|
|
)
|
|
|
|
|
2018-09-12 01:50:39 +02:00
|
|
|
# this is heavily cached, thus: fast.
|
2019-12-05 18:58:25 +01:00
|
|
|
details = await self.store.get_room_summary(room_id)
|
2018-09-12 01:50:39 +02:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
name_id = state_ids.get((EventTypes.Name, ""))
|
|
|
|
canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ""))
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2021-08-18 15:22:07 +02:00
|
|
|
summary: JsonDict = {}
|
2018-09-12 01:50:39 +02:00
|
|
|
empty_ms = MemberSummary([], 0)
|
2018-08-16 10:46:50 +02:00
|
|
|
|
|
|
|
# TODO: only send these when they change.
|
2019-06-20 11:32:02 +02:00
|
|
|
summary["m.joined_member_count"] = details.get(Membership.JOIN, empty_ms).count
|
|
|
|
summary["m.invited_member_count"] = details.get(
|
|
|
|
Membership.INVITE, empty_ms
|
|
|
|
).count
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2018-09-12 18:11:05 +02:00
|
|
|
# if the room has a name or canonical_alias set, we can skip
|
2019-04-22 18:59:00 +02:00
|
|
|
# calculating heroes. Empty strings are falsey, so we check
|
|
|
|
# for the "name" value and default to an empty string.
|
2018-09-12 18:11:05 +02:00
|
|
|
if name_id:
|
2019-12-05 18:58:25 +01:00
|
|
|
name = await self.store.get_event(name_id, allow_none=True)
|
2019-06-06 11:34:12 +02:00
|
|
|
if name and name.content.get("name"):
|
2019-07-23 15:00:55 +02:00
|
|
|
return summary
|
2018-09-12 18:11:05 +02:00
|
|
|
|
|
|
|
if canonical_alias_id:
|
2019-12-05 18:58:25 +01:00
|
|
|
canonical_alias = await self.store.get_event(
|
2019-06-20 11:32:02 +02:00
|
|
|
canonical_alias_id, allow_none=True
|
2018-09-12 18:11:05 +02:00
|
|
|
)
|
2019-06-06 11:34:12 +02:00
|
|
|
if canonical_alias and canonical_alias.content.get("alias"):
|
2019-07-23 15:00:55 +02:00
|
|
|
return summary
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2019-06-05 12:50:27 +02:00
|
|
|
me = sync_config.user.to_string()
|
|
|
|
|
2018-09-12 01:50:39 +02:00
|
|
|
joined_user_ids = [
|
2019-06-20 11:32:02 +02:00
|
|
|
r[0] for r in details.get(Membership.JOIN, empty_ms).members if r[0] != me
|
2018-09-12 01:50:39 +02:00
|
|
|
]
|
|
|
|
invited_user_ids = [
|
2019-06-20 11:32:02 +02:00
|
|
|
r[0] for r in details.get(Membership.INVITE, empty_ms).members if r[0] != me
|
2018-09-12 01:50:39 +02:00
|
|
|
]
|
2019-06-20 11:32:02 +02:00
|
|
|
gone_user_ids = [
|
|
|
|
r[0] for r in details.get(Membership.LEAVE, empty_ms).members if r[0] != me
|
|
|
|
] + [r[0] for r in details.get(Membership.BAN, empty_ms).members if r[0] != me]
|
2018-09-12 01:50:39 +02:00
|
|
|
|
|
|
|
# FIXME: only build up a member_ids list for our heroes
|
|
|
|
member_ids = {}
|
|
|
|
for membership in (
|
|
|
|
Membership.JOIN,
|
|
|
|
Membership.INVITE,
|
|
|
|
Membership.LEAVE,
|
2019-06-20 11:32:02 +02:00
|
|
|
Membership.BAN,
|
2018-09-12 01:50:39 +02:00
|
|
|
):
|
|
|
|
for user_id, event_id in details.get(membership, empty_ms).members:
|
|
|
|
member_ids[user_id] = event_id
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2018-09-12 01:50:39 +02:00
|
|
|
# FIXME: order by stream ordering rather than as returned by SQL
|
2019-06-20 11:32:02 +02:00
|
|
|
if joined_user_ids or invited_user_ids:
|
2020-02-21 13:15:07 +01:00
|
|
|
summary["m.heroes"] = sorted(joined_user_ids + invited_user_ids)[0:5]
|
2018-08-16 10:46:50 +02:00
|
|
|
else:
|
2020-02-21 13:15:07 +01:00
|
|
|
summary["m.heroes"] = sorted(gone_user_ids)[0:5]
|
2018-08-16 10:46:50 +02:00
|
|
|
|
|
|
|
if not sync_config.filter_collection.lazy_load_members():
|
2019-07-23 15:00:55 +02:00
|
|
|
return summary
|
2018-08-16 10:46:50 +02:00
|
|
|
|
|
|
|
# ensure we send membership events for heroes if needed
|
|
|
|
cache_key = (sync_config.user.to_string(), sync_config.device_id)
|
|
|
|
cache = self.get_lazy_loaded_members_cache(cache_key)
|
|
|
|
|
|
|
|
# track which members the client should already know about via LL:
|
|
|
|
# Ones which are already in state...
|
2020-02-21 13:15:07 +01:00
|
|
|
existing_members = {
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id for (typ, user_id) in state.keys() if typ == EventTypes.Member
|
2020-02-21 13:15:07 +01:00
|
|
|
}
|
2018-08-16 10:46:50 +02:00
|
|
|
|
|
|
|
# ...or ones which are in the timeline...
|
|
|
|
for ev in batch.events:
|
|
|
|
if ev.type == EventTypes.Member:
|
|
|
|
existing_members.add(ev.state_key)
|
|
|
|
|
|
|
|
# ...and then ensure any missing ones get included in state.
|
|
|
|
missing_hero_event_ids = [
|
|
|
|
member_ids[hero_id]
|
2019-06-20 11:32:02 +02:00
|
|
|
for hero_id in summary["m.heroes"]
|
2018-08-16 10:46:50 +02:00
|
|
|
if (
|
2019-06-20 11:32:02 +02:00
|
|
|
cache.get(hero_id) != member_ids[hero_id]
|
|
|
|
and hero_id not in existing_members
|
2018-08-16 10:46:50 +02:00
|
|
|
)
|
|
|
|
]
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
missing_hero_state = await self.store.get_events(missing_hero_event_ids)
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2020-09-03 13:54:10 +02:00
|
|
|
for s in missing_hero_state.values():
|
2018-08-16 10:46:50 +02:00
|
|
|
cache.set(s.state_key, s.event_id)
|
|
|
|
state[(EventTypes.Member, s.state_key)] = s
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return summary
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2021-03-24 11:48:46 +01:00
|
|
|
def get_lazy_loaded_members_cache(
|
|
|
|
self, cache_key: Tuple[str, Optional[str]]
|
2021-04-06 14:58:18 +02:00
|
|
|
) -> LruCache[str, str]:
|
2021-07-16 19:22:36 +02:00
|
|
|
cache: Optional[LruCache[str, str]] = self.lazy_loaded_members_cache.get(
|
2021-04-06 14:58:18 +02:00
|
|
|
cache_key
|
2021-07-16 19:22:36 +02:00
|
|
|
)
|
2018-08-16 10:46:50 +02:00
|
|
|
if cache is None:
|
|
|
|
logger.debug("creating LruCache for %r", cache_key)
|
|
|
|
cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE)
|
|
|
|
self.lazy_loaded_members_cache[cache_key] = cache
|
|
|
|
else:
|
|
|
|
logger.debug("found LruCache for %r", cache_key)
|
|
|
|
return cache
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def compute_state_delta(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
batch: TimelineBatch,
|
|
|
|
sync_config: SyncConfig,
|
|
|
|
since_token: Optional[StreamToken],
|
|
|
|
now_token: StreamToken,
|
|
|
|
full_state: bool,
|
2020-08-28 13:28:53 +02:00
|
|
|
) -> MutableStateMap[EventBase]:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Works out the difference in state between the start of the timeline
|
2016-02-01 16:59:40 +01:00
|
|
|
and the previous sync.
|
|
|
|
|
2016-04-01 17:08:59 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
room_id:
|
|
|
|
batch: The timeline batch for the room that will be sent to the user.
|
|
|
|
sync_config:
|
|
|
|
since_token: Token of the end of the previous batch. May be None.
|
|
|
|
now_token: Token of the end of the current batch.
|
|
|
|
full_state: Whether to force returning the full state.
|
2015-01-27 17:24:22 +01:00
|
|
|
"""
|
|
|
|
# TODO(mjark) Check if the state events were received by the server
|
|
|
|
# after the previous sync, since we need to include those state
|
2020-10-23 18:38:40 +02:00
|
|
|
# updates even if they occurred logically before the previous event.
|
2015-01-27 17:24:22 +01:00
|
|
|
# TODO(mjark) Check for new redactions in the state events.
|
2015-11-12 17:34:42 +01:00
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
with Measure(self.clock, "compute_state_delta"):
|
2018-03-11 21:01:41 +01:00
|
|
|
|
2018-10-25 18:49:55 +02:00
|
|
|
members_to_fetch = None
|
2018-03-13 23:03:42 +01:00
|
|
|
|
2018-07-26 23:51:30 +02:00
|
|
|
lazy_load_members = sync_config.filter_collection.lazy_load_members()
|
|
|
|
include_redundant_members = (
|
|
|
|
sync_config.filter_collection.include_redundant_members()
|
|
|
|
)
|
|
|
|
|
2018-03-19 02:15:13 +01:00
|
|
|
if lazy_load_members:
|
2018-03-11 21:01:41 +01:00
|
|
|
# We only request state for the members needed to display the
|
|
|
|
# timeline:
|
2018-03-16 02:37:53 +01:00
|
|
|
|
2020-02-21 13:15:07 +01:00
|
|
|
members_to_fetch = {
|
2018-10-25 18:49:55 +02:00
|
|
|
event.sender # FIXME: we also care about invite targets etc.
|
|
|
|
for event in batch.events
|
2020-02-21 13:15:07 +01:00
|
|
|
}
|
2018-03-11 21:01:41 +01:00
|
|
|
|
2018-10-25 18:49:55 +02:00
|
|
|
if full_state:
|
|
|
|
# always make sure we LL ourselves so we know we're in the room
|
|
|
|
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
|
|
|
|
# We only need apply this on full state syncs given we disabled
|
|
|
|
# LL for incr syncs in #3840.
|
|
|
|
members_to_fetch.add(sync_config.user.to_string())
|
|
|
|
|
|
|
|
state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
|
|
|
|
else:
|
|
|
|
state_filter = StateFilter.all()
|
2018-03-16 02:37:53 +01:00
|
|
|
|
2018-07-26 23:51:30 +02:00
|
|
|
timeline_state = {
|
|
|
|
(event.type, event.state_key): event.event_id
|
2019-06-20 11:32:02 +02:00
|
|
|
for event in batch.events
|
|
|
|
if event.is_state()
|
2018-07-26 23:51:30 +02:00
|
|
|
}
|
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
if full_state:
|
|
|
|
if batch:
|
2019-12-05 18:58:25 +01:00
|
|
|
current_state_ids = await self.state_store.get_state_ids_for_event(
|
2019-06-20 11:32:02 +02:00
|
|
|
batch.events[-1].event_id, state_filter=state_filter
|
2016-02-22 14:54:46 +01:00
|
|
|
)
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
state_ids = await self.state_store.get_state_ids_for_event(
|
2019-06-20 11:32:02 +02:00
|
|
|
batch.events[0].event_id, state_filter=state_filter
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
2018-03-13 23:03:42 +01:00
|
|
|
|
2016-02-09 12:31:04 +01:00
|
|
|
else:
|
2019-12-05 18:58:25 +01:00
|
|
|
current_state_ids = await self.get_state_at(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id, stream_position=now_token, state_filter=state_filter
|
2016-02-22 14:54:46 +01:00
|
|
|
)
|
|
|
|
|
2016-08-25 19:59:44 +02:00
|
|
|
state_ids = current_state_ids
|
2016-02-01 16:59:40 +01:00
|
|
|
|
2016-08-25 19:59:44 +02:00
|
|
|
state_ids = _calculate_state(
|
2016-02-09 12:31:04 +01:00
|
|
|
timeline_contains=timeline_state,
|
2016-08-25 19:59:44 +02:00
|
|
|
timeline_start=state_ids,
|
2016-02-09 12:31:04 +01:00
|
|
|
previous={},
|
2016-08-25 19:59:44 +02:00
|
|
|
current=current_state_ids,
|
2018-07-23 20:21:20 +02:00
|
|
|
lazy_load_members=lazy_load_members,
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
|
|
|
elif batch.limited:
|
2019-08-06 13:55:36 +02:00
|
|
|
if batch:
|
2021-02-16 23:32:34 +01:00
|
|
|
state_at_timeline_start = (
|
|
|
|
await self.state_store.get_state_ids_for_event(
|
|
|
|
batch.events[0].event_id, state_filter=state_filter
|
|
|
|
)
|
2019-08-06 13:55:36 +02:00
|
|
|
)
|
|
|
|
else:
|
2019-08-15 13:02:18 +02:00
|
|
|
# We can get here if the user has ignored the senders of all
|
|
|
|
# the recent events.
|
2019-12-05 18:58:25 +01:00
|
|
|
state_at_timeline_start = await self.get_state_at(
|
2019-08-06 13:55:36 +02:00
|
|
|
room_id, stream_position=now_token, state_filter=state_filter
|
|
|
|
)
|
2018-09-12 01:50:39 +02:00
|
|
|
|
|
|
|
# for now, we disable LL for gappy syncs - see
|
|
|
|
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
|
|
|
# N.B. this slows down incr syncs as we are now processing way
|
|
|
|
# more state in the server than if we were LLing.
|
|
|
|
#
|
|
|
|
# We still have to filter timeline_start to LL entries (above) in order
|
|
|
|
# for _calculate_state's LL logic to work, as we have to include LL
|
|
|
|
# members for timeline senders in case they weren't loaded in the initial
|
|
|
|
# sync. We do this by (counterintuitively) by filtering timeline_start
|
|
|
|
# members to just be ones which were timeline senders, which then ensures
|
|
|
|
# all of the rest get included in the state block (if we need to know
|
|
|
|
# about them).
|
2018-10-25 18:49:55 +02:00
|
|
|
state_filter = StateFilter.all()
|
2018-09-12 01:50:39 +02:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
# If this is an initial sync then full_state should be set, and
|
|
|
|
# that case is handled above. We assert here to ensure that this
|
|
|
|
# is indeed the case.
|
|
|
|
assert since_token is not None
|
2019-12-05 18:58:25 +01:00
|
|
|
state_at_previous_sync = await self.get_state_at(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id, stream_position=since_token, state_filter=state_filter
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
2016-02-01 16:59:40 +01:00
|
|
|
|
2019-08-06 13:55:36 +02:00
|
|
|
if batch:
|
2019-12-05 18:58:25 +01:00
|
|
|
current_state_ids = await self.state_store.get_state_ids_for_event(
|
2019-08-06 13:55:36 +02:00
|
|
|
batch.events[-1].event_id, state_filter=state_filter
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# Its not clear how we get here, but empirically we do
|
|
|
|
# (#5407). Logging has been added elsewhere to try and
|
|
|
|
# figure out where this state comes from.
|
2019-12-05 18:58:25 +01:00
|
|
|
current_state_ids = await self.get_state_at(
|
2019-08-06 13:55:36 +02:00
|
|
|
room_id, stream_position=now_token, state_filter=state_filter
|
|
|
|
)
|
2016-02-22 14:54:46 +01:00
|
|
|
|
2016-08-25 19:59:44 +02:00
|
|
|
state_ids = _calculate_state(
|
2016-02-09 12:31:04 +01:00
|
|
|
timeline_contains=timeline_state,
|
|
|
|
timeline_start=state_at_timeline_start,
|
|
|
|
previous=state_at_previous_sync,
|
2016-08-25 19:59:44 +02:00
|
|
|
current=current_state_ids,
|
2018-09-12 01:50:39 +02:00
|
|
|
# we have to include LL members in case LL initial sync missed them
|
2018-07-23 20:21:20 +02:00
|
|
|
lazy_load_members=lazy_load_members,
|
2016-02-09 12:31:04 +01:00
|
|
|
)
|
|
|
|
else:
|
2018-03-18 22:40:35 +01:00
|
|
|
state_ids = {}
|
2018-03-19 02:15:13 +01:00
|
|
|
if lazy_load_members:
|
2018-10-25 18:49:55 +02:00
|
|
|
if members_to_fetch and batch.events:
|
2018-09-12 01:50:39 +02:00
|
|
|
# We're returning an incremental sync, with no
|
|
|
|
# "gap" since the previous sync, so normally there would be
|
|
|
|
# no state to return.
|
2018-08-29 00:25:58 +02:00
|
|
|
# But we're lazy-loading, so the client might need some more
|
|
|
|
# member events to understand the events in this timeline.
|
|
|
|
# So we fish out all the member events corresponding to the
|
|
|
|
# timeline here, and then dedupe any redundant ones below.
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
state_ids = await self.state_store.get_state_ids_for_event(
|
2018-10-25 18:49:55 +02:00
|
|
|
batch.events[0].event_id,
|
|
|
|
# we only want members!
|
|
|
|
state_filter=StateFilter.from_types(
|
|
|
|
(EventTypes.Member, member)
|
|
|
|
for member in members_to_fetch
|
|
|
|
),
|
2018-03-18 22:40:35 +01:00
|
|
|
)
|
2016-08-25 19:59:44 +02:00
|
|
|
|
2018-07-26 23:51:30 +02:00
|
|
|
if lazy_load_members and not include_redundant_members:
|
|
|
|
cache_key = (sync_config.user.to_string(), sync_config.device_id)
|
2018-08-16 10:46:50 +02:00
|
|
|
cache = self.get_lazy_loaded_members_cache(cache_key)
|
2018-07-26 23:51:30 +02:00
|
|
|
|
|
|
|
# if it's a new sync sequence, then assume the client has had
|
|
|
|
# amnesia and doesn't want any recent lazy-loaded members
|
|
|
|
# de-duplicated.
|
|
|
|
if since_token is None:
|
|
|
|
logger.debug("clearing LruCache for %r", cache_key)
|
|
|
|
cache.clear()
|
|
|
|
else:
|
|
|
|
# only send members which aren't in our LruCache (either
|
|
|
|
# because they're new to this client or have been pushed out
|
|
|
|
# of the cache)
|
|
|
|
logger.debug("filtering state from %r...", state_ids)
|
|
|
|
state_ids = {
|
|
|
|
t: event_id
|
2020-06-15 13:03:36 +02:00
|
|
|
for t, event_id in state_ids.items()
|
2018-07-26 23:51:30 +02:00
|
|
|
if cache.get(t[1]) != event_id
|
|
|
|
}
|
|
|
|
logger.debug("...to %r", state_ids)
|
|
|
|
|
|
|
|
# add any member IDs we are about to send into our LruCache
|
|
|
|
for t, event_id in itertools.chain(
|
2019-06-20 11:32:02 +02:00
|
|
|
state_ids.items(), timeline_state.items()
|
2018-07-26 23:51:30 +02:00
|
|
|
):
|
|
|
|
if t[0] == EventTypes.Member:
|
|
|
|
cache.set(t[1], event_id)
|
|
|
|
|
2021-07-16 19:22:36 +02:00
|
|
|
state: Dict[str, EventBase] = {}
|
2016-08-25 19:59:44 +02:00
|
|
|
if state_ids:
|
2019-12-05 18:58:25 +01:00
|
|
|
state = await self.store.get_events(list(state_ids.values()))
|
2016-02-01 16:59:40 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return {
|
|
|
|
(e.type, e.state_key): e
|
2021-11-09 14:10:58 +01:00
|
|
|
for e in await sync_config.filter_collection.filter_room_state(
|
2019-07-23 15:00:55 +02:00
|
|
|
list(state.values())
|
|
|
|
)
|
2020-02-10 19:07:35 +01:00
|
|
|
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
|
2019-07-23 15:00:55 +02:00
|
|
|
}
|
2015-01-30 14:33:41 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def unread_notifs_for_room_id(
|
|
|
|
self, room_id: str, sync_config: SyncConfig
|
2021-12-21 14:25:34 +01:00
|
|
|
) -> NotifCounts:
|
2016-02-09 12:31:04 +01:00
|
|
|
with Measure(self.clock, "unread_notifs_for_room_id"):
|
2019-12-05 18:58:25 +01:00
|
|
|
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
|
2016-02-09 14:55:59 +01:00
|
|
|
user_id=sync_config.user.to_string(),
|
|
|
|
room_id=room_id,
|
2021-12-08 18:26:29 +01:00
|
|
|
receipt_type=ReceiptTypes.READ,
|
2015-12-18 18:47:00 +01:00
|
|
|
)
|
2016-01-19 12:35:50 +01:00
|
|
|
|
2021-12-21 14:25:34 +01:00
|
|
|
return await self.store.get_unread_event_push_actions_by_room_for_user(
|
2020-09-02 18:19:37 +02:00
|
|
|
room_id, sync_config.user.to_string(), last_unread_event_id
|
|
|
|
)
|
2016-01-19 12:35:50 +01:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def generate_sync_result(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
sync_config: SyncConfig,
|
|
|
|
since_token: Optional[StreamToken] = None,
|
|
|
|
full_state: bool = False,
|
|
|
|
) -> SyncResult:
|
2021-12-02 21:58:32 +01:00
|
|
|
"""Generates the response body of a sync result.
|
|
|
|
|
|
|
|
This is represented by a `SyncResult` struct, which is built from small pieces
|
|
|
|
using a `SyncResultBuilder`. See also
|
|
|
|
https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3sync
|
|
|
|
the `sync_result_builder` is passed as a mutable ("inout") parameter to various
|
|
|
|
helper functions. These retrieve and process the data which forms the sync body,
|
|
|
|
often writing to the `sync_result_builder` to store their output.
|
|
|
|
|
|
|
|
At the end, we transfer data from the `sync_result_builder` to a new `SyncResult`
|
|
|
|
instance to signify that the sync calculation is complete.
|
|
|
|
"""
|
2016-05-24 10:43:35 +02:00
|
|
|
# NB: The now_token gets changed by some of the generate_sync_* methods,
|
|
|
|
# this is due to some of the underlying streams not supporting the ability
|
|
|
|
# to query up to a given point.
|
|
|
|
# Always use the `now_token` in `SyncResultBuilder`
|
2020-08-04 13:21:47 +02:00
|
|
|
now_token = self.event_sources.get_current_token()
|
2021-04-01 18:08:21 +02:00
|
|
|
log_kv({"now_token": now_token})
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug(
|
2019-01-31 19:30:40 +01:00
|
|
|
"Calculating sync response for %r between %s and %s",
|
2019-06-20 11:32:02 +02:00
|
|
|
sync_config.user,
|
|
|
|
since_token,
|
|
|
|
now_token,
|
2019-01-31 19:30:40 +01:00
|
|
|
)
|
|
|
|
|
2018-03-05 13:06:19 +01:00
|
|
|
user_id = sync_config.user.to_string()
|
|
|
|
app_service = self.store.get_app_service_by_user_id(user_id)
|
|
|
|
if app_service:
|
|
|
|
# We no longer support AS users using /sync directly.
|
|
|
|
# See https://github.com/matrix-org/matrix-doc/issues/1144
|
|
|
|
raise NotImplementedError()
|
|
|
|
else:
|
2019-12-05 18:58:25 +01:00
|
|
|
joined_room_ids = await self.get_rooms_for_user_at(
|
2020-09-24 14:24:17 +02:00
|
|
|
user_id, now_token.room_key
|
2018-03-05 13:06:19 +01:00
|
|
|
)
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder = SyncResultBuilder(
|
2019-06-20 11:32:02 +02:00
|
|
|
sync_config,
|
|
|
|
full_state,
|
2016-05-23 18:37:01 +02:00
|
|
|
since_token=since_token,
|
|
|
|
now_token=now_token,
|
2018-03-05 13:06:19 +01:00
|
|
|
joined_room_ids=joined_room_ids,
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching account data")
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
account_data_by_room = await self._generate_sync_entry_for_account_data(
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching room data")
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
res = await self._generate_sync_entry_for_rooms(
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder, account_data_by_room
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_rooms, newly_joined_or_invited_or_knocked_users, _, _ = res
|
2017-09-14 12:49:37 +02:00
|
|
|
_, _, newly_left_rooms, newly_left_users = res
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2016-11-22 17:38:35 +01:00
|
|
|
block_all_presence_data = (
|
2019-06-20 11:32:02 +02:00
|
|
|
since_token is None and sync_config.filter_collection.blocks_all_presence()
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2021-09-15 14:34:52 +02:00
|
|
|
if self.hs_config.server.use_presence and not block_all_presence_data:
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching presence data")
|
2019-12-05 18:58:25 +01:00
|
|
|
await self._generate_sync_entry_for_presence(
|
2021-06-09 20:39:51 +02:00
|
|
|
sync_result_builder,
|
|
|
|
newly_joined_rooms,
|
|
|
|
newly_joined_or_invited_or_knocked_users,
|
2016-11-22 17:38:35 +01:00
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching to-device data")
|
2019-12-05 18:58:25 +01:00
|
|
|
await self._generate_sync_entry_for_to_device(sync_result_builder)
|
2016-08-25 18:35:37 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
device_lists = await self._generate_sync_entry_for_device_list(
|
2017-09-07 16:08:39 +02:00
|
|
|
sync_result_builder,
|
|
|
|
newly_joined_rooms=newly_joined_rooms,
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users,
|
2017-09-14 12:49:37 +02:00
|
|
|
newly_left_rooms=newly_left_rooms,
|
2017-09-07 16:08:39 +02:00
|
|
|
newly_left_users=newly_left_users,
|
2017-01-27 14:36:39 +01:00
|
|
|
)
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching OTK data")
|
2017-05-19 16:47:55 +02:00
|
|
|
device_id = sync_config.device_id
|
2021-07-16 19:22:36 +02:00
|
|
|
one_time_key_counts: JsonDict = {}
|
|
|
|
unused_fallback_key_types: List[str] = []
|
2017-05-19 16:47:55 +02:00
|
|
|
if device_id:
|
2021-07-27 15:36:38 +02:00
|
|
|
# TODO: We should have a way to let clients differentiate between the states of:
|
|
|
|
# * no change in OTK count since the provided since token
|
|
|
|
# * the server has zero OTKs left for this device
|
|
|
|
# Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
|
2019-12-05 18:58:25 +01:00
|
|
|
one_time_key_counts = await self.store.count_e2e_one_time_keys(
|
2017-05-19 16:47:55 +02:00
|
|
|
user_id, device_id
|
|
|
|
)
|
2021-02-16 23:32:34 +01:00
|
|
|
unused_fallback_key_types = (
|
|
|
|
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
|
2020-10-06 19:26:29 +02:00
|
|
|
)
|
2017-05-19 16:47:55 +02:00
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Fetching group data")
|
2019-12-05 18:58:25 +01:00
|
|
|
await self._generate_sync_entry_for_groups(sync_result_builder)
|
2017-07-10 15:53:19 +02:00
|
|
|
|
2021-09-06 17:08:25 +02:00
|
|
|
num_events = 0
|
|
|
|
|
2022-01-06 13:44:36 +01:00
|
|
|
# debug for https://github.com/matrix-org/synapse/issues/9424
|
2019-03-06 18:29:15 +01:00
|
|
|
for joined_room in sync_result_builder.joined:
|
2021-09-06 17:08:25 +02:00
|
|
|
num_events += len(joined_room.timeline.events)
|
|
|
|
|
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"joined_rooms_in_result": len(sync_result_builder.joined),
|
|
|
|
"events_in_result": num_events,
|
|
|
|
}
|
|
|
|
)
|
2019-03-06 18:29:15 +01:00
|
|
|
|
2020-07-22 14:43:10 +02:00
|
|
|
logger.debug("Sync response calculation complete")
|
2019-07-23 15:00:55 +02:00
|
|
|
return SyncResult(
|
|
|
|
presence=sync_result_builder.presence,
|
|
|
|
account_data=sync_result_builder.account_data,
|
|
|
|
joined=sync_result_builder.joined,
|
|
|
|
invited=sync_result_builder.invited,
|
2021-06-09 20:39:51 +02:00
|
|
|
knocked=sync_result_builder.knocked,
|
2019-07-23 15:00:55 +02:00
|
|
|
archived=sync_result_builder.archived,
|
|
|
|
to_device=sync_result_builder.to_device,
|
|
|
|
device_lists=device_lists,
|
|
|
|
groups=sync_result_builder.groups,
|
|
|
|
device_one_time_keys_count=one_time_key_counts,
|
2020-10-06 19:26:29 +02:00
|
|
|
device_unused_fallback_key_types=unused_fallback_key_types,
|
2019-07-23 15:00:55 +02:00
|
|
|
next_batch=sync_result_builder.now_token,
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2017-07-10 15:53:19 +02:00
|
|
|
@measure_func("_generate_sync_entry_for_groups")
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _generate_sync_entry_for_groups(
|
|
|
|
self, sync_result_builder: "SyncResultBuilder"
|
|
|
|
) -> None:
|
2017-07-10 15:53:19 +02:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
|
|
|
|
if since_token and since_token.groups_key:
|
2019-12-06 11:14:59 +01:00
|
|
|
results = await self.store.get_groups_changes_for_user(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, since_token.groups_key, now_token.groups_key
|
2017-07-10 15:53:19 +02:00
|
|
|
)
|
|
|
|
else:
|
2019-12-05 18:58:25 +01:00
|
|
|
results = await self.store.get_all_groups_for_user(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, now_token.groups_key
|
2017-07-10 15:53:19 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
invited = {}
|
|
|
|
joined = {}
|
|
|
|
left = {}
|
|
|
|
for result in results:
|
|
|
|
membership = result["membership"]
|
|
|
|
group_id = result["group_id"]
|
|
|
|
gtype = result["type"]
|
|
|
|
content = result["content"]
|
|
|
|
|
|
|
|
if membership == "join":
|
|
|
|
if gtype == "membership":
|
2017-09-19 11:35:35 +02:00
|
|
|
# TODO: Add profile
|
2017-07-10 15:53:19 +02:00
|
|
|
content.pop("membership", None)
|
2017-07-24 14:31:26 +02:00
|
|
|
joined[group_id] = content["content"]
|
2017-07-10 15:53:19 +02:00
|
|
|
else:
|
|
|
|
joined.setdefault(group_id, {})[gtype] = content
|
|
|
|
elif membership == "invite":
|
|
|
|
if gtype == "membership":
|
|
|
|
content.pop("membership", None)
|
|
|
|
invited[group_id] = content["content"]
|
|
|
|
else:
|
|
|
|
if gtype == "membership":
|
|
|
|
left[group_id] = content["content"]
|
|
|
|
|
|
|
|
sync_result_builder.groups = GroupsSyncResult(
|
2019-06-20 11:32:02 +02:00
|
|
|
join=joined, invite=invited, leave=left
|
2017-07-10 15:53:19 +02:00
|
|
|
)
|
|
|
|
|
2017-02-02 19:36:17 +01:00
|
|
|
@measure_func("_generate_sync_entry_for_device_list")
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _generate_sync_entry_for_device_list(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder: "SyncResultBuilder",
|
|
|
|
newly_joined_rooms: Set[str],
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users: Set[str],
|
2020-02-03 19:05:44 +01:00
|
|
|
newly_left_rooms: Set[str],
|
|
|
|
newly_left_users: Set[str],
|
|
|
|
) -> DeviceLists:
|
2019-06-26 20:30:35 +02:00
|
|
|
"""Generate the DeviceLists section of sync
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
newly_joined_rooms: Set of rooms user has joined since previous sync
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users: Set of users that have joined,
|
|
|
|
been invited to a room or are knocking on a room since
|
|
|
|
previous sync.
|
2020-02-03 19:05:44 +01:00
|
|
|
newly_left_rooms: Set of rooms user has left since previous sync
|
|
|
|
newly_left_users: Set of users that have left a room we're in since
|
2019-06-26 20:30:35 +02:00
|
|
|
previous sync
|
|
|
|
"""
|
|
|
|
|
2017-01-27 14:36:39 +01:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
|
2019-06-26 20:30:35 +02:00
|
|
|
# We're going to mutate these fields, so lets copy them rather than
|
|
|
|
# assume they won't get used later.
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users = set(
|
|
|
|
newly_joined_or_invited_or_knocked_users
|
|
|
|
)
|
2019-06-26 20:30:35 +02:00
|
|
|
newly_left_users = set(newly_left_users)
|
2017-09-12 17:44:26 +02:00
|
|
|
|
2019-06-26 20:30:35 +02:00
|
|
|
if since_token and since_token.device_list_key:
|
|
|
|
# We want to figure out what user IDs the client should refetch
|
|
|
|
# device keys for, and which users we aren't going to track changes
|
|
|
|
# for anymore.
|
|
|
|
#
|
|
|
|
# For the first step we check:
|
2019-06-27 17:06:23 +02:00
|
|
|
# a. if any users we share a room with have updated their devices,
|
2019-06-26 20:30:35 +02:00
|
|
|
# and
|
2019-06-27 17:06:23 +02:00
|
|
|
# b. we also check if we've joined any new rooms, or if a user has
|
2019-06-26 20:30:35 +02:00
|
|
|
# joined a room we're in.
|
|
|
|
#
|
|
|
|
# For the second step we just find any users we no longer share a
|
|
|
|
# room with by looking at all users that have left a room plus users
|
|
|
|
# that were in a room we've left.
|
2017-09-12 17:44:26 +02:00
|
|
|
|
2020-03-30 11:11:26 +02:00
|
|
|
users_who_share_room = await self.store.get_users_who_share_room_with_user(
|
2019-06-26 12:56:52 +02:00
|
|
|
user_id
|
|
|
|
)
|
2020-03-30 11:11:26 +02:00
|
|
|
|
2020-05-05 18:07:59 +02:00
|
|
|
# Always tell the user about their own devices. We check as the user
|
|
|
|
# ID is almost certainly already included (unless they're not in any
|
|
|
|
# rooms) and taking a copy of the set is relatively expensive.
|
|
|
|
if user_id not in users_who_share_room:
|
|
|
|
users_who_share_room = set(users_who_share_room)
|
|
|
|
users_who_share_room.add(user_id)
|
|
|
|
|
|
|
|
tracked_users = users_who_share_room
|
2019-06-26 12:56:52 +02:00
|
|
|
|
2019-06-27 17:06:23 +02:00
|
|
|
# Step 1a, check for changes in devices of users we share a room with
|
2019-12-05 18:58:25 +01:00
|
|
|
users_that_have_changed = await self.store.get_users_whose_devices_changed(
|
2020-03-30 11:11:26 +02:00
|
|
|
since_token.device_list_key, tracked_users
|
2019-06-26 20:30:35 +02:00
|
|
|
)
|
|
|
|
|
2019-06-27 17:06:23 +02:00
|
|
|
# Step 1b, check for newly joined rooms
|
2019-06-26 20:30:35 +02:00
|
|
|
for room_id in newly_joined_rooms:
|
2021-05-05 17:49:34 +02:00
|
|
|
joined_users = await self.store.get_users_in_room(room_id)
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users.update(joined_users)
|
2019-06-26 20:30:35 +02:00
|
|
|
|
2017-09-07 16:08:39 +02:00
|
|
|
# TODO: Check that these users are actually new, i.e. either they
|
|
|
|
# weren't in the previous sync *or* they left and rejoined.
|
2021-06-09 20:39:51 +02:00
|
|
|
users_that_have_changed.update(newly_joined_or_invited_or_knocked_users)
|
2017-07-12 11:30:10 +02:00
|
|
|
|
2021-02-16 23:32:34 +01:00
|
|
|
user_signatures_changed = (
|
|
|
|
await self.store.get_users_whose_signatures_changed(
|
|
|
|
user_id, since_token.device_list_key
|
|
|
|
)
|
2019-07-25 17:08:24 +02:00
|
|
|
)
|
|
|
|
users_that_have_changed.update(user_signatures_changed)
|
|
|
|
|
2019-06-26 20:30:35 +02:00
|
|
|
# Now find users that we no longer track
|
|
|
|
for room_id in newly_left_rooms:
|
2021-05-05 17:49:34 +02:00
|
|
|
left_users = await self.store.get_users_in_room(room_id)
|
2019-06-26 20:30:35 +02:00
|
|
|
newly_left_users.update(left_users)
|
2017-01-27 14:36:39 +01:00
|
|
|
|
2019-06-26 20:30:35 +02:00
|
|
|
# Remove any users that we still share a room with.
|
2020-03-30 11:11:26 +02:00
|
|
|
newly_left_users -= users_who_share_room
|
2019-06-26 12:56:52 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return DeviceLists(changed=users_that_have_changed, left=newly_left_users)
|
2017-01-27 14:36:39 +01:00
|
|
|
else:
|
2019-07-23 15:00:55 +02:00
|
|
|
return DeviceLists(changed=[], left=[])
|
2017-01-27 14:36:39 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _generate_sync_entry_for_to_device(
|
|
|
|
self, sync_result_builder: "SyncResultBuilder"
|
|
|
|
) -> None:
|
2016-08-25 18:35:37 +02:00
|
|
|
"""Generates the portion of the sync response. Populates
|
|
|
|
`sync_result_builder` with the result.
|
|
|
|
"""
|
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
device_id = sync_result_builder.sync_config.device_id
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
since_stream_id = 0
|
|
|
|
if sync_result_builder.since_token is not None:
|
|
|
|
since_stream_id = int(sync_result_builder.since_token.to_device_key)
|
|
|
|
|
2022-02-01 15:13:38 +01:00
|
|
|
if device_id is not None and since_stream_id != int(now_token.to_device_key):
|
|
|
|
messages, stream_id = await self.store.get_messages_for_device(
|
2016-09-02 16:50:37 +02:00
|
|
|
user_id, device_id, since_stream_id, now_token.to_device_key
|
2016-08-30 11:58:46 +02:00
|
|
|
)
|
2016-12-15 19:13:58 +01:00
|
|
|
|
2021-04-01 18:08:21 +02:00
|
|
|
for message in messages:
|
|
|
|
# We pop here as we shouldn't be sending the message ID down
|
|
|
|
# `/sync`
|
|
|
|
message_id = message.pop("message_id", None)
|
|
|
|
if message_id:
|
|
|
|
set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
|
|
|
|
|
2017-03-09 15:50:40 +01:00
|
|
|
logger.debug(
|
2016-12-15 19:13:58 +01:00
|
|
|
"Returning %d to-device messages between %d and %d (current token: %d)",
|
2019-06-20 11:32:02 +02:00
|
|
|
len(messages),
|
|
|
|
since_stream_id,
|
|
|
|
stream_id,
|
|
|
|
now_token.to_device_key,
|
2016-12-15 19:13:58 +01:00
|
|
|
)
|
2016-08-30 11:58:46 +02:00
|
|
|
sync_result_builder.now_token = now_token.copy_and_replace(
|
|
|
|
"to_device_key", stream_id
|
|
|
|
)
|
|
|
|
sync_result_builder.to_device = messages
|
|
|
|
else:
|
|
|
|
sync_result_builder.to_device = []
|
2016-08-25 18:35:37 +02:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _generate_sync_entry_for_account_data(
|
|
|
|
self, sync_result_builder: "SyncResultBuilder"
|
|
|
|
) -> Dict[str, Dict[str, JsonDict]]:
|
2021-12-02 21:58:32 +01:00
|
|
|
"""Generates the account data portion of the sync response.
|
|
|
|
|
|
|
|
Account data (called "Client Config" in the spec) can be set either globally
|
|
|
|
or for a specific room. Account data consists of a list of events which
|
|
|
|
accumulate state, much like a room.
|
|
|
|
|
|
|
|
This function retrieves global and per-room account data. The former is written
|
|
|
|
to the given `sync_result_builder`. The latter is returned directly, to be
|
|
|
|
later written to the `sync_result_builder` on a room-by-room basis.
|
2016-05-24 11:14:53 +02:00
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
2016-05-24 11:14:53 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-12-02 21:58:32 +01:00
|
|
|
A dictionary whose keys (room ids) map to the per room account data for that
|
|
|
|
room.
|
2016-05-24 11:14:53 +02:00
|
|
|
"""
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_config = sync_result_builder.sync_config
|
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
if since_token and not sync_result_builder.full_state:
|
2019-10-31 16:43:24 +01:00
|
|
|
(
|
2021-12-02 21:58:32 +01:00
|
|
|
global_account_data,
|
2019-10-31 16:43:24 +01:00
|
|
|
account_data_by_room,
|
2019-12-06 11:14:59 +01:00
|
|
|
) = await self.store.get_updated_account_data_for_user(
|
2019-10-31 16:43:24 +01:00
|
|
|
user_id, since_token.account_data_key
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
push_rules_changed = await self.store.have_push_rules_changed_for_user(
|
2016-05-23 18:37:01 +02:00
|
|
|
user_id, int(since_token.push_rules_key)
|
|
|
|
)
|
|
|
|
|
|
|
|
if push_rules_changed:
|
2021-12-02 21:58:32 +01:00
|
|
|
global_account_data["m.push_rules"] = await self.push_rules_for_user(
|
2016-05-23 18:37:01 +02:00
|
|
|
sync_config.user
|
|
|
|
)
|
|
|
|
else:
|
2019-10-31 16:43:24 +01:00
|
|
|
(
|
2021-12-02 21:58:32 +01:00
|
|
|
global_account_data,
|
2019-10-31 16:43:24 +01:00
|
|
|
account_data_by_room,
|
2019-12-05 18:58:25 +01:00
|
|
|
) = await self.store.get_account_data_for_user(sync_config.user.to_string())
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
global_account_data["m.push_rules"] = await self.push_rules_for_user(
|
2016-05-23 18:37:01 +02:00
|
|
|
sync_config.user
|
|
|
|
)
|
|
|
|
|
2021-11-09 14:10:58 +01:00
|
|
|
account_data_for_user = await sync_config.filter_collection.filter_account_data(
|
2019-06-20 11:32:02 +02:00
|
|
|
[
|
|
|
|
{"type": account_data_type, "content": content}
|
2021-12-02 21:58:32 +01:00
|
|
|
for account_data_type, content in global_account_data.items()
|
2019-06-20 11:32:02 +02:00
|
|
|
]
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder.account_data = account_data_for_user
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return account_data_by_room
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _generate_sync_entry_for_presence(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
sync_result_builder: "SyncResultBuilder",
|
|
|
|
newly_joined_rooms: Set[str],
|
|
|
|
newly_joined_or_invited_users: Set[str],
|
|
|
|
) -> None:
|
2016-05-24 11:14:53 +02:00
|
|
|
"""Generates the presence portion of the sync response. Populates the
|
2016-05-24 11:53:03 +02:00
|
|
|
`sync_result_builder` with the result.
|
2016-05-24 11:14:53 +02:00
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
newly_joined_rooms: Set of rooms that the user has joined since
|
|
|
|
the last sync (or empty if an initial sync)
|
|
|
|
newly_joined_or_invited_users: Set of users that have joined or
|
|
|
|
been invited to rooms since the last sync (or empty if an
|
|
|
|
initial sync)
|
2016-05-24 11:14:53 +02:00
|
|
|
"""
|
2016-05-24 11:53:03 +02:00
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
sync_config = sync_result_builder.sync_config
|
|
|
|
user = sync_result_builder.sync_config.user
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-21 19:34:26 +02:00
|
|
|
presence_source = self.event_sources.sources.presence
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
since_token = sync_result_builder.since_token
|
2020-09-08 17:48:15 +02:00
|
|
|
presence_key = None
|
|
|
|
include_offline = False
|
2016-05-24 11:53:03 +02:00
|
|
|
if since_token and not sync_result_builder.full_state:
|
2016-05-23 18:37:01 +02:00
|
|
|
presence_key = since_token.presence_key
|
2016-05-24 12:04:35 +02:00
|
|
|
include_offline = True
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
presence, presence_key = await presence_source.get_new_events(
|
2016-05-23 18:37:01 +02:00
|
|
|
user=user,
|
|
|
|
from_key=presence_key,
|
|
|
|
is_guest=sync_config.is_guest,
|
2016-05-24 12:04:35 +02:00
|
|
|
include_offline=include_offline,
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2020-09-11 13:22:55 +02:00
|
|
|
assert presence_key
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder.now_token = now_token.copy_and_replace(
|
2016-05-23 18:37:01 +02:00
|
|
|
"presence_key", presence_key
|
|
|
|
)
|
|
|
|
|
2019-05-16 14:23:43 +02:00
|
|
|
extra_users_ids = set(newly_joined_or_invited_users)
|
2016-05-23 18:37:01 +02:00
|
|
|
for room_id in newly_joined_rooms:
|
2021-05-05 17:49:34 +02:00
|
|
|
users = await self.store.get_users_in_room(room_id)
|
2016-05-23 18:37:01 +02:00
|
|
|
extra_users_ids.update(users)
|
|
|
|
extra_users_ids.discard(user.to_string())
|
|
|
|
|
2017-03-15 15:27:34 +01:00
|
|
|
if extra_users_ids:
|
2019-12-05 18:58:25 +01:00
|
|
|
states = await self.presence_handler.get_states(extra_users_ids)
|
2017-03-15 15:27:34 +01:00
|
|
|
presence.extend(states)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2017-03-15 15:27:34 +01:00
|
|
|
# Deduplicate the presence entries so that there's at most one per user
|
2018-05-31 11:03:47 +02:00
|
|
|
presence = list({p.user_id: p for p in presence}.values())
|
2016-06-02 16:20:28 +02:00
|
|
|
|
2021-11-09 14:10:58 +01:00
|
|
|
presence = await sync_config.filter_collection.filter_presence(presence)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder.presence = presence
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _generate_sync_entry_for_rooms(
|
2020-02-03 19:05:44 +01:00
|
|
|
self,
|
|
|
|
sync_result_builder: "SyncResultBuilder",
|
|
|
|
account_data_by_room: Dict[str, Dict[str, JsonDict]],
|
|
|
|
) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
|
2016-05-24 11:14:53 +02:00
|
|
|
"""Generates the rooms portion of the sync response. Populates the
|
2016-05-24 11:53:03 +02:00
|
|
|
`sync_result_builder` with the result.
|
2016-05-24 11:14:53 +02:00
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
In the response that reaches the client, rooms are divided into four categories:
|
|
|
|
`invite`, `join`, `knock`, `leave`. These aren't the same as the four sets of
|
|
|
|
room ids returned by this function.
|
|
|
|
|
2016-05-24 11:14:53 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
account_data_by_room: Dictionary of per room account data
|
2016-05-24 11:14:53 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-12-07 13:42:05 +01:00
|
|
|
Returns a 4-tuple describing rooms the user has joined or left, and users who've
|
|
|
|
joined or left rooms any rooms the user is in. This gets used later in
|
|
|
|
`_generate_sync_entry_for_device_list`.
|
|
|
|
|
|
|
|
Its entries are:
|
2021-12-02 21:58:32 +01:00
|
|
|
- newly_joined_rooms
|
|
|
|
- newly_joined_or_invited_or_knocked_users
|
|
|
|
- newly_left_rooms
|
|
|
|
- newly_left_users
|
2016-05-24 11:14:53 +02:00
|
|
|
"""
|
2021-12-07 13:42:05 +01:00
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
|
|
|
|
# 1. Start by fetching all ephemeral events in rooms we've joined (if required).
|
2016-05-24 11:53:03 +02:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
2016-11-22 17:38:35 +01:00
|
|
|
block_all_room_ephemeral = (
|
2021-12-07 13:42:05 +01:00
|
|
|
since_token is None
|
2019-06-20 11:32:02 +02:00
|
|
|
and sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral()
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2016-11-22 17:38:35 +01:00
|
|
|
|
|
|
|
if block_all_room_ephemeral:
|
2021-07-16 19:22:36 +02:00
|
|
|
ephemeral_by_room: Dict[str, List[JsonDict]] = {}
|
2016-11-22 17:38:35 +01:00
|
|
|
else:
|
2019-12-05 18:58:25 +01:00
|
|
|
now_token, ephemeral_by_room = await self.ephemeral_by_room(
|
2018-03-05 13:06:19 +01:00
|
|
|
sync_result_builder,
|
2016-11-22 17:38:35 +01:00
|
|
|
now_token=sync_result_builder.now_token,
|
|
|
|
since_token=sync_result_builder.since_token,
|
|
|
|
)
|
|
|
|
sync_result_builder.now_token = now_token
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
# 2. We check up front if anything has changed, if it hasn't then there is
|
2020-06-05 14:43:21 +02:00
|
|
|
# no point in going further.
|
2017-03-15 19:13:59 +01:00
|
|
|
if not sync_result_builder.full_state:
|
|
|
|
if since_token and not ephemeral_by_room and not account_data_by_room:
|
2019-12-05 18:58:25 +01:00
|
|
|
have_changed = await self._have_rooms_changed(sync_result_builder)
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"rooms_have_changed": have_changed})
|
2017-03-15 19:13:59 +01:00
|
|
|
if not have_changed:
|
2019-12-05 18:58:25 +01:00
|
|
|
tags_by_room = await self.store.get_updated_tags(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, since_token.account_data_key
|
2017-03-15 19:13:59 +01:00
|
|
|
)
|
|
|
|
if not tags_by_room:
|
2017-03-16 12:51:46 +01:00
|
|
|
logger.debug("no-oping sync")
|
2020-02-03 19:05:44 +01:00
|
|
|
return set(), set(), set(), set()
|
2017-03-15 19:13:59 +01:00
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
# 3. Work out which rooms need reporting in the sync response.
|
|
|
|
ignored_users = await self._get_ignored_users(user_id)
|
2017-03-15 19:13:59 +01:00
|
|
|
if since_token:
|
2020-02-03 19:05:44 +01:00
|
|
|
room_changes = await self._get_rooms_changed(
|
|
|
|
sync_result_builder, ignored_users
|
|
|
|
)
|
2019-12-05 18:58:25 +01:00
|
|
|
tags_by_room = await self.store.get_updated_tags(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, since_token.account_data_key
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
else:
|
2020-02-03 19:05:44 +01:00
|
|
|
room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
|
2019-12-05 18:58:25 +01:00
|
|
|
tags_by_room = await self.store.get_tags_for_user(user_id)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-06 17:08:25 +02:00
|
|
|
log_kv({"rooms_changed": len(room_changes.room_entries)})
|
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
room_entries = room_changes.room_entries
|
|
|
|
invited = room_changes.invited
|
2021-06-09 20:39:51 +02:00
|
|
|
knocked = room_changes.knocked
|
2020-02-03 19:05:44 +01:00
|
|
|
newly_joined_rooms = room_changes.newly_joined_rooms
|
|
|
|
newly_left_rooms = room_changes.newly_left_rooms
|
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
# 4. We need to apply further processing to `room_entries` (rooms considered
|
|
|
|
# joined or archived).
|
2021-09-20 14:56:23 +02:00
|
|
|
async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None:
|
2021-09-15 18:14:25 +02:00
|
|
|
logger.debug("Generating room entry for %s", room_entry.room_id)
|
2021-09-20 14:56:23 +02:00
|
|
|
await self._generate_room_entry(
|
2021-09-15 18:14:25 +02:00
|
|
|
sync_result_builder,
|
|
|
|
ignored_users,
|
|
|
|
room_entry,
|
|
|
|
ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
|
|
|
|
tags=tags_by_room.get(room_entry.room_id),
|
|
|
|
account_data=account_data_by_room.get(room_entry.room_id, {}),
|
|
|
|
always_include=sync_result_builder.full_state,
|
|
|
|
)
|
|
|
|
logger.debug("Generated room entry for %s", room_entry.room_id)
|
2016-05-23 19:21:27 +02:00
|
|
|
|
2021-12-21 12:10:36 +01:00
|
|
|
with start_active_span("sync.generate_room_entries"):
|
|
|
|
await concurrently_execute(handle_room_entries, room_entries, 10)
|
2016-05-23 19:21:27 +02:00
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
sync_result_builder.invited.extend(invited)
|
2021-06-09 20:39:51 +02:00
|
|
|
sync_result_builder.knocked.extend(knocked)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
# 5. Work out which users have joined or left rooms we're in. We use this
|
|
|
|
# to build the device_list part of the sync response in
|
|
|
|
# `_generate_sync_entry_for_device_list`.
|
|
|
|
(
|
|
|
|
newly_joined_or_invited_or_knocked_users,
|
|
|
|
newly_left_users,
|
|
|
|
) = sync_result_builder.calculate_user_changes()
|
2017-09-13 16:13:41 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return (
|
2020-02-03 19:05:44 +01:00
|
|
|
set(newly_joined_rooms),
|
2021-06-09 20:39:51 +02:00
|
|
|
newly_joined_or_invited_or_knocked_users,
|
2020-02-03 19:05:44 +01:00
|
|
|
set(newly_left_rooms),
|
2019-07-23 15:00:55 +02:00
|
|
|
newly_left_users,
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]:
|
|
|
|
"""Retrieve the users ignored by the given user from their global account_data.
|
|
|
|
|
|
|
|
Returns an empty set if
|
|
|
|
- there is no global account_data entry for ignored_users
|
|
|
|
- there is such an entry, but it's not a JSON object.
|
|
|
|
"""
|
|
|
|
# TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead?
|
|
|
|
ignored_account_data = (
|
|
|
|
await self.store.get_global_account_data_by_type_for_user(
|
2022-01-21 09:38:36 +01:00
|
|
|
user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST
|
2021-12-07 13:42:05 +01:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# If there is ignored users account data and it matches the proper type,
|
|
|
|
# then use it.
|
|
|
|
ignored_users: FrozenSet[str] = frozenset()
|
|
|
|
if ignored_account_data:
|
|
|
|
ignored_users_data = ignored_account_data.get("ignored_users", {})
|
|
|
|
if isinstance(ignored_users_data, dict):
|
|
|
|
ignored_users = frozenset(ignored_users_data.keys())
|
|
|
|
return ignored_users
|
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _have_rooms_changed(
|
|
|
|
self, sync_result_builder: "SyncResultBuilder"
|
|
|
|
) -> bool:
|
2017-03-16 14:04:07 +01:00
|
|
|
"""Returns whether there may be any new events that should be sent down
|
|
|
|
the sync. Returns True if there are.
|
2021-12-02 21:58:32 +01:00
|
|
|
|
|
|
|
Does not modify the `sync_result_builder`.
|
2017-03-16 12:56:59 +01:00
|
|
|
"""
|
2017-03-15 19:13:59 +01:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
|
2017-03-16 12:56:59 +01:00
|
|
|
assert since_token
|
2017-03-15 19:13:59 +01:00
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
# Get a list of membership change events that have happened to the user
|
|
|
|
# requesting the sync.
|
|
|
|
membership_changes = await self.store.get_membership_changes_for_user(
|
2017-03-15 19:13:59 +01:00
|
|
|
user_id, since_token.room_key, now_token.room_key
|
|
|
|
)
|
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
if membership_changes:
|
2019-07-23 15:00:55 +02:00
|
|
|
return True
|
2017-03-15 19:13:59 +01:00
|
|
|
|
2020-09-11 13:22:55 +02:00
|
|
|
stream_id = since_token.room_key.stream
|
2018-03-05 13:06:19 +01:00
|
|
|
for room_id in sync_result_builder.joined_room_ids:
|
2017-03-16 12:56:59 +01:00
|
|
|
if self.store.has_room_changed_since(room_id, stream_id):
|
2019-07-23 15:00:55 +02:00
|
|
|
return True
|
|
|
|
return False
|
2017-03-15 19:13:59 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _get_rooms_changed(
|
2020-10-05 15:28:05 +02:00
|
|
|
self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str]
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> _RoomChanges:
|
2021-12-02 21:58:32 +01:00
|
|
|
"""Determine the changes in rooms to report to the user.
|
|
|
|
|
2021-12-10 20:19:48 +01:00
|
|
|
This function is a first pass at generating the rooms part of the sync response.
|
|
|
|
It determines which rooms have changed during the sync period, and categorises
|
|
|
|
them into four buckets: "knock", "invite", "join" and "leave".
|
2021-12-02 21:58:32 +01:00
|
|
|
|
2021-12-10 20:19:48 +01:00
|
|
|
1. Finds all membership changes for the user in the sync period (from
|
|
|
|
`since_token` up to `now_token`).
|
|
|
|
2. Uses those to place the room in one of the four categories above.
|
|
|
|
3. Builds a `_RoomChanges` struct to record this, and return that struct.
|
2021-12-02 21:58:32 +01:00
|
|
|
|
2021-12-10 20:19:48 +01:00
|
|
|
For rooms classified as "knock", "invite" or "leave", we just need to report
|
|
|
|
a single membership event in the eventual /sync response. For "join" we need
|
|
|
|
to fetch additional non-membership events, e.g. messages in the room. That is
|
|
|
|
more complicated, so instead we report an intermediary `RoomSyncResultBuilder`
|
|
|
|
struct, and leave the additional work to `_generate_room_entry`.
|
2021-12-02 21:58:32 +01:00
|
|
|
|
|
|
|
The sync_result_builder is not modified by this function.
|
|
|
|
"""
|
2016-05-24 11:53:03 +02:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
sync_config = sync_result_builder.sync_config
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
assert since_token
|
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
# TODO: we've already called this function and ran this query in
|
|
|
|
# _have_rooms_changed. We could keep the results in memory to avoid a
|
|
|
|
# second query, at the cost of more complicated source code.
|
|
|
|
membership_change_events = await self.store.get_membership_changes_for_user(
|
2016-05-23 18:37:01 +02:00
|
|
|
user_id, since_token.room_key, now_token.room_key
|
|
|
|
)
|
|
|
|
|
2021-07-16 19:22:36 +02:00
|
|
|
mem_change_events_by_room_id: Dict[str, List[EventBase]] = {}
|
2021-12-02 21:58:32 +01:00
|
|
|
for event in membership_change_events:
|
2016-05-23 18:37:01 +02:00
|
|
|
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
|
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
newly_joined_rooms: List[str] = []
|
|
|
|
newly_left_rooms: List[str] = []
|
|
|
|
room_entries: List[RoomSyncResultBuilder] = []
|
|
|
|
invited: List[InvitedSyncResult] = []
|
|
|
|
knocked: List[KnockedSyncResult] = []
|
2020-06-15 13:03:36 +02:00
|
|
|
for room_id, events in mem_change_events_by_room_id.items():
|
2021-12-02 21:58:32 +01:00
|
|
|
# The body of this loop will add this room to at least one of the five lists
|
|
|
|
# above. Things get messy if you've e.g. joined, left, joined then left the
|
|
|
|
# room all in the same sync period.
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug(
|
2019-01-31 19:30:40 +01:00
|
|
|
"Membership changes in %s: [%s]",
|
|
|
|
room_id,
|
2021-07-19 16:28:05 +02:00
|
|
|
", ".join("%s (%s)" % (e.event_id, e.membership) for e in events),
|
2019-01-31 19:30:40 +01:00
|
|
|
)
|
|
|
|
|
2016-05-23 18:37:01 +02:00
|
|
|
non_joins = [e for e in events if e.membership != Membership.JOIN]
|
|
|
|
has_join = len(non_joins) != len(events)
|
|
|
|
|
|
|
|
# We want to figure out if we joined the room at some point since
|
|
|
|
# the last sync (even if we have since left). This is to make sure
|
|
|
|
# we do send down the room, and with full state, where necessary
|
2017-09-22 15:44:17 +02:00
|
|
|
|
2017-09-13 16:13:41 +02:00
|
|
|
old_state_ids = None
|
2018-03-05 13:06:19 +01:00
|
|
|
if room_id in sync_result_builder.joined_room_ids and non_joins:
|
2017-09-22 15:44:17 +02:00
|
|
|
# Always include if the user (re)joined the room, especially
|
|
|
|
# important so that device list changes are calculated correctly.
|
2019-05-16 14:23:43 +02:00
|
|
|
# If there are non-join member events, but we are still in the room,
|
2017-09-22 15:44:17 +02:00
|
|
|
# then the user must have left and joined
|
|
|
|
newly_joined_rooms.append(room_id)
|
|
|
|
|
|
|
|
# User is in the room so we don't need to do the invite/leave checks
|
|
|
|
continue
|
|
|
|
|
2018-03-05 13:06:19 +01:00
|
|
|
if room_id in sync_result_builder.joined_room_ids or has_join:
|
2019-12-05 18:58:25 +01:00
|
|
|
old_state_ids = await self.get_state_at(room_id, since_token)
|
2016-08-25 19:59:44 +02:00
|
|
|
old_mem_ev_id = old_state_ids.get((EventTypes.Member, user_id), None)
|
|
|
|
old_mem_ev = None
|
|
|
|
if old_mem_ev_id:
|
2019-12-05 18:58:25 +01:00
|
|
|
old_mem_ev = await self.store.get_event(
|
2016-08-25 19:59:44 +02:00
|
|
|
old_mem_ev_id, allow_none=True
|
|
|
|
)
|
2019-03-06 18:29:15 +01:00
|
|
|
|
2016-05-23 18:37:01 +02:00
|
|
|
if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
|
|
|
|
newly_joined_rooms.append(room_id)
|
|
|
|
|
2017-09-22 15:44:17 +02:00
|
|
|
# If user is in the room then we don't need to do the invite/leave checks
|
2018-03-05 13:06:19 +01:00
|
|
|
if room_id in sync_result_builder.joined_room_ids:
|
2017-09-22 15:44:17 +02:00
|
|
|
continue
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
if not non_joins:
|
|
|
|
continue
|
2021-12-07 12:24:31 +01:00
|
|
|
last_non_join = non_joins[-1]
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2017-09-13 16:13:41 +02:00
|
|
|
# Check if we have left the room. This can either be because we were
|
|
|
|
# joined before *or* that we since joined and then left.
|
|
|
|
if events[-1].membership != Membership.JOIN:
|
|
|
|
if has_join:
|
|
|
|
newly_left_rooms.append(room_id)
|
|
|
|
else:
|
|
|
|
if not old_state_ids:
|
2019-12-05 18:58:25 +01:00
|
|
|
old_state_ids = await self.get_state_at(room_id, since_token)
|
2017-09-13 16:13:41 +02:00
|
|
|
old_mem_ev_id = old_state_ids.get(
|
2019-06-20 11:32:02 +02:00
|
|
|
(EventTypes.Member, user_id), None
|
2017-09-13 16:13:41 +02:00
|
|
|
)
|
|
|
|
old_mem_ev = None
|
|
|
|
if old_mem_ev_id:
|
2019-12-05 18:58:25 +01:00
|
|
|
old_mem_ev = await self.store.get_event(
|
2017-09-13 16:13:41 +02:00
|
|
|
old_mem_ev_id, allow_none=True
|
|
|
|
)
|
|
|
|
if old_mem_ev and old_mem_ev.membership == Membership.JOIN:
|
|
|
|
newly_left_rooms.append(room_id)
|
|
|
|
|
2016-05-23 18:37:01 +02:00
|
|
|
# Only bother if we're still currently invited
|
2021-12-07 12:24:31 +01:00
|
|
|
should_invite = last_non_join.membership == Membership.INVITE
|
2016-05-23 18:37:01 +02:00
|
|
|
if should_invite:
|
2021-12-07 12:24:31 +01:00
|
|
|
if last_non_join.sender not in ignored_users:
|
|
|
|
invite_room_sync = InvitedSyncResult(room_id, invite=last_non_join)
|
2021-06-09 20:39:51 +02:00
|
|
|
if invite_room_sync:
|
|
|
|
invited.append(invite_room_sync)
|
|
|
|
|
|
|
|
# Only bother if our latest membership in the room is knock (and we haven't
|
|
|
|
# been accepted/rejected in the meantime).
|
2021-12-07 12:24:31 +01:00
|
|
|
should_knock = last_non_join.membership == Membership.KNOCK
|
2021-06-09 20:39:51 +02:00
|
|
|
if should_knock:
|
2021-12-07 12:24:31 +01:00
|
|
|
knock_room_sync = KnockedSyncResult(room_id, knock=last_non_join)
|
2021-06-09 20:39:51 +02:00
|
|
|
if knock_room_sync:
|
|
|
|
knocked.append(knock_room_sync)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
# Always include leave/ban events. Just take the last one.
|
|
|
|
# TODO: How do we handle ban -> leave in same batch?
|
|
|
|
leave_events = [
|
2019-06-20 11:32:02 +02:00
|
|
|
e
|
|
|
|
for e in non_joins
|
2016-05-23 18:37:01 +02:00
|
|
|
if e.membership in (Membership.LEAVE, Membership.BAN)
|
|
|
|
]
|
|
|
|
|
|
|
|
if leave_events:
|
|
|
|
leave_event = leave_events[-1]
|
2020-09-29 22:48:33 +02:00
|
|
|
leave_position = await self.store.get_position_for_event(
|
2016-05-23 18:37:01 +02:00
|
|
|
leave_event.event_id
|
|
|
|
)
|
|
|
|
|
2020-09-29 22:48:33 +02:00
|
|
|
# If the leave event happened before the since token then we
|
|
|
|
# bail.
|
|
|
|
if since_token and not leave_position.persisted_after(
|
|
|
|
since_token.room_key
|
|
|
|
):
|
2016-05-23 18:37:01 +02:00
|
|
|
continue
|
|
|
|
|
2020-09-29 22:48:33 +02:00
|
|
|
# We can safely convert the position of the leave event into a
|
|
|
|
# stream token as it'll only be used in the context of this
|
|
|
|
# room. (c.f. the docstring of `to_room_stream_token`).
|
|
|
|
leave_token = since_token.copy_and_replace(
|
|
|
|
"room_key", leave_position.to_room_stream_token()
|
|
|
|
)
|
|
|
|
|
2019-01-30 16:46:27 +01:00
|
|
|
# If this is an out of band message, like a remote invite
|
|
|
|
# rejection, we include it in the recents batch. Otherwise, we
|
|
|
|
# let _load_filtered_recents handle fetching the correct
|
|
|
|
# batches.
|
|
|
|
#
|
|
|
|
# This is all screaming out for a refactor, as the logic here is
|
|
|
|
# subtle and the moving parts numerous.
|
|
|
|
if leave_event.internal_metadata.is_out_of_band_membership():
|
2021-07-16 19:22:36 +02:00
|
|
|
batch_events: Optional[List[EventBase]] = [leave_event]
|
2019-01-30 16:46:27 +01:00
|
|
|
else:
|
|
|
|
batch_events = None
|
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
room_entries.append(
|
|
|
|
RoomSyncResultBuilder(
|
|
|
|
room_id=room_id,
|
|
|
|
rtype="archived",
|
|
|
|
events=batch_events,
|
|
|
|
newly_joined=room_id in newly_joined_rooms,
|
|
|
|
full_state=False,
|
|
|
|
since_token=since_token,
|
|
|
|
upto_token=leave_token,
|
|
|
|
)
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
timeline_limit = sync_config.filter_collection.timeline_limit()
|
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
# Get all events since the `from_key` in rooms we're currently joined to.
|
|
|
|
# If there are too many, we get the most recent events only. This leaves
|
|
|
|
# a "gap" in the timeline, as described by the spec for /sync.
|
2019-12-05 18:58:25 +01:00
|
|
|
room_to_events = await self.store.get_room_events_stream_for_rooms(
|
2018-03-05 13:06:19 +01:00
|
|
|
room_ids=sync_result_builder.joined_room_ids,
|
2016-05-23 18:37:01 +02:00
|
|
|
from_key=since_token.room_key,
|
|
|
|
to_key=now_token.room_key,
|
|
|
|
limit=timeline_limit + 1,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We loop through all room ids, even if there are no new events, in case
|
2020-04-11 21:55:18 +02:00
|
|
|
# there are non room events that we need to notify about.
|
2018-03-05 13:06:19 +01:00
|
|
|
for room_id in sync_result_builder.joined_room_ids:
|
2016-05-23 18:37:01 +02:00
|
|
|
room_entry = room_to_events.get(room_id, None)
|
|
|
|
|
2019-03-06 18:29:15 +01:00
|
|
|
newly_joined = room_id in newly_joined_rooms
|
2016-05-23 18:37:01 +02:00
|
|
|
if room_entry:
|
|
|
|
events, start_key = room_entry
|
|
|
|
|
|
|
|
prev_batch_token = now_token.copy_and_replace("room_key", start_key)
|
|
|
|
|
2019-03-06 18:29:15 +01:00
|
|
|
entry = RoomSyncResultBuilder(
|
2016-05-23 18:37:01 +02:00
|
|
|
room_id=room_id,
|
2016-05-24 10:43:35 +02:00
|
|
|
rtype="joined",
|
2016-05-23 18:37:01 +02:00
|
|
|
events=events,
|
2019-03-06 18:29:15 +01:00
|
|
|
newly_joined=newly_joined,
|
2016-05-23 18:37:01 +02:00
|
|
|
full_state=False,
|
2019-03-06 18:29:15 +01:00
|
|
|
since_token=None if newly_joined else since_token,
|
2016-05-23 18:37:01 +02:00
|
|
|
upto_token=prev_batch_token,
|
2019-03-06 18:29:15 +01:00
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
else:
|
2019-03-06 18:29:15 +01:00
|
|
|
entry = RoomSyncResultBuilder(
|
2016-05-23 18:37:01 +02:00
|
|
|
room_id=room_id,
|
2016-05-24 10:43:35 +02:00
|
|
|
rtype="joined",
|
2016-05-23 18:37:01 +02:00
|
|
|
events=[],
|
2019-03-06 18:29:15 +01:00
|
|
|
newly_joined=newly_joined,
|
2016-05-23 18:37:01 +02:00
|
|
|
full_state=False,
|
|
|
|
since_token=since_token,
|
|
|
|
upto_token=since_token,
|
2019-03-06 18:29:15 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
room_entries.append(entry)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-06-09 20:39:51 +02:00
|
|
|
return _RoomChanges(
|
|
|
|
room_entries,
|
|
|
|
invited,
|
|
|
|
knocked,
|
|
|
|
newly_joined_rooms,
|
|
|
|
newly_left_rooms,
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def _get_all_rooms(
|
2020-10-05 15:28:05 +02:00
|
|
|
self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str]
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> _RoomChanges:
|
2016-05-24 11:14:53 +02:00
|
|
|
"""Returns entries for all rooms for the user.
|
|
|
|
|
2021-12-02 21:58:32 +01:00
|
|
|
Like `_get_rooms_changed`, but assumes the `since_token` is `None`.
|
|
|
|
|
|
|
|
This function does not modify the sync_result_builder.
|
|
|
|
|
2016-05-24 11:14:53 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
ignored_users: Set of users ignored by user.
|
2016-05-24 11:14:53 +02:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2016-05-24 11:53:03 +02:00
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
since_token = sync_result_builder.since_token
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
sync_config = sync_result_builder.sync_config
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2020-01-15 15:59:33 +01:00
|
|
|
room_list = await self.store.get_rooms_for_local_user_where_membership_is(
|
2021-12-02 21:58:32 +01:00
|
|
|
user_id=user_id,
|
|
|
|
membership_list=Membership.LIST,
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
|
2016-05-24 10:43:35 +02:00
|
|
|
room_entries = []
|
2016-05-23 18:37:01 +02:00
|
|
|
invited = []
|
2021-06-09 20:39:51 +02:00
|
|
|
knocked = []
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
for event in room_list:
|
2021-08-19 17:12:55 +02:00
|
|
|
if event.room_version_id not in KNOWN_ROOM_VERSIONS:
|
|
|
|
continue
|
|
|
|
|
2016-05-23 18:37:01 +02:00
|
|
|
if event.membership == Membership.JOIN:
|
2019-06-20 11:32:02 +02:00
|
|
|
room_entries.append(
|
|
|
|
RoomSyncResultBuilder(
|
|
|
|
room_id=event.room_id,
|
|
|
|
rtype="joined",
|
|
|
|
events=None,
|
|
|
|
newly_joined=False,
|
|
|
|
full_state=True,
|
|
|
|
since_token=since_token,
|
|
|
|
upto_token=now_token,
|
|
|
|
)
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
elif event.membership == Membership.INVITE:
|
|
|
|
if event.sender in ignored_users:
|
|
|
|
continue
|
2019-12-05 18:58:25 +01:00
|
|
|
invite = await self.store.get_event(event.event_id)
|
2019-06-20 11:32:02 +02:00
|
|
|
invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite))
|
2021-06-09 20:39:51 +02:00
|
|
|
elif event.membership == Membership.KNOCK:
|
|
|
|
knock = await self.store.get_event(event.event_id)
|
|
|
|
knocked.append(KnockedSyncResult(room_id=event.room_id, knock=knock))
|
2016-05-23 18:37:01 +02:00
|
|
|
elif event.membership in (Membership.LEAVE, Membership.BAN):
|
2021-06-09 20:39:51 +02:00
|
|
|
# Always send down rooms we were banned from or kicked from.
|
2016-05-23 18:37:01 +02:00
|
|
|
if not sync_config.filter_collection.include_leave:
|
|
|
|
if event.membership == Membership.LEAVE:
|
|
|
|
if user_id == event.sender:
|
|
|
|
continue
|
|
|
|
|
|
|
|
leave_token = now_token.copy_and_replace(
|
2020-09-11 13:22:55 +02:00
|
|
|
"room_key", RoomStreamToken(None, event.stream_ordering)
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
room_entries.append(
|
|
|
|
RoomSyncResultBuilder(
|
|
|
|
room_id=event.room_id,
|
|
|
|
rtype="archived",
|
|
|
|
events=None,
|
|
|
|
newly_joined=False,
|
|
|
|
full_state=True,
|
|
|
|
since_token=since_token,
|
|
|
|
upto_token=leave_token,
|
|
|
|
)
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-06-09 20:39:51 +02:00
|
|
|
return _RoomChanges(room_entries, invited, knocked, [], [])
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
async def _generate_room_entry(
|
2019-06-20 11:32:02 +02:00
|
|
|
self,
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder: "SyncResultBuilder",
|
2020-10-05 15:28:05 +02:00
|
|
|
ignored_users: FrozenSet[str],
|
2020-02-03 19:05:44 +01:00
|
|
|
room_builder: "RoomSyncResultBuilder",
|
|
|
|
ephemeral: List[JsonDict],
|
2020-09-03 13:54:10 +02:00
|
|
|
tags: Optional[Dict[str, Dict[str, Any]]],
|
2020-02-03 19:05:44 +01:00
|
|
|
account_data: Dict[str, JsonDict],
|
|
|
|
always_include: bool = False,
|
2021-09-20 14:56:23 +02:00
|
|
|
) -> None:
|
2016-05-24 11:53:03 +02:00
|
|
|
"""Populates the `joined` and `archived` section of `sync_result_builder`
|
2016-05-24 11:14:53 +02:00
|
|
|
based on the `room_builder`.
|
|
|
|
|
2021-12-10 20:19:48 +01:00
|
|
|
Ideally, we want to report all events whose stream ordering `s` lies in the
|
|
|
|
range `since_token < s <= now_token`, where the two tokens are read from the
|
|
|
|
sync_result_builder.
|
|
|
|
|
|
|
|
If there are too many events in that range to report, things get complicated.
|
|
|
|
In this situation we return a truncated list of the most recent events, and
|
|
|
|
indicate in the response that there is a "gap" of omitted events. Lots of this
|
|
|
|
is handled in `_load_filtered_recents`, but some of is handled in this method.
|
|
|
|
|
|
|
|
Additionally:
|
|
|
|
- we include a "state_delta", to describe the changes in state over the gap,
|
|
|
|
- we include all membership events applying to the user making the request,
|
|
|
|
even those in the gap.
|
|
|
|
|
|
|
|
See the spec for the rationale:
|
|
|
|
https://spec.matrix.org/v1.1/client-server-api/#syncing
|
|
|
|
|
2016-05-24 11:14:53 +02:00
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_result_builder
|
|
|
|
ignored_users: Set of users ignored by user.
|
|
|
|
room_builder
|
|
|
|
ephemeral: List of new ephemeral events for room
|
|
|
|
tags: List of *all* tags for room, or None if there has been
|
2016-05-24 11:14:53 +02:00
|
|
|
no change.
|
2020-02-03 19:05:44 +01:00
|
|
|
account_data: List of new account data for room
|
|
|
|
always_include: Always include this room in the sync response,
|
2016-05-24 11:14:53 +02:00
|
|
|
even if empty.
|
|
|
|
"""
|
2016-05-23 18:37:01 +02:00
|
|
|
newly_joined = room_builder.newly_joined
|
|
|
|
full_state = (
|
2019-06-20 11:32:02 +02:00
|
|
|
room_builder.full_state or newly_joined or sync_result_builder.full_state
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
2016-05-24 15:00:43 +02:00
|
|
|
events = room_builder.events
|
|
|
|
|
|
|
|
# We want to shortcut out as early as possible.
|
|
|
|
if not (always_include or account_data or ephemeral or full_state):
|
|
|
|
if events == [] and tags is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
now_token = sync_result_builder.now_token
|
|
|
|
sync_config = sync_result_builder.sync_config
|
|
|
|
|
|
|
|
room_id = room_builder.room_id
|
2016-05-23 18:37:01 +02:00
|
|
|
since_token = room_builder.since_token
|
|
|
|
upto_token = room_builder.upto_token
|
|
|
|
|
2021-12-21 12:10:36 +01:00
|
|
|
with start_active_span("sync.generate_room_entry"):
|
2021-09-15 18:14:25 +02:00
|
|
|
set_tag("room_id", room_id)
|
|
|
|
log_kv({"events": len(events or ())})
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"since_token": since_token,
|
|
|
|
"upto_token": upto_token,
|
|
|
|
}
|
|
|
|
)
|
2019-08-06 13:55:36 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
batch = await self._load_filtered_recents(
|
2019-06-20 11:32:02 +02:00
|
|
|
room_id,
|
2021-09-15 18:14:25 +02:00
|
|
|
sync_config,
|
|
|
|
now_token=upto_token,
|
|
|
|
since_token=since_token,
|
|
|
|
potential_recents=events,
|
|
|
|
newly_joined_room=newly_joined,
|
|
|
|
)
|
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"batch_events": len(batch.events),
|
|
|
|
"prev_batch": batch.prev_batch,
|
|
|
|
"batch_limited": batch.limited,
|
|
|
|
}
|
2019-03-06 18:29:15 +01:00
|
|
|
)
|
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
# Note: `batch` can be both empty and limited here in the case where
|
|
|
|
# `_load_filtered_recents` can't find any events the user should see
|
|
|
|
# (e.g. due to having ignored the sender of the last 50 events).
|
2018-09-06 17:46:51 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
# When we join the room (or the client requests full_state), we should
|
|
|
|
# send down any existing tags. Usually the user won't have tags in a
|
|
|
|
# newly joined room, unless either a) they've joined before or b) the
|
|
|
|
# tag was added by synapse e.g. for server notice rooms.
|
|
|
|
if full_state:
|
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
tags = await self.store.get_tags_for_room(user_id, room_id)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
# If there aren't any tags, don't send the empty tags list down
|
|
|
|
# sync
|
|
|
|
if not tags:
|
|
|
|
tags = None
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
account_data_events = []
|
|
|
|
if tags is not None:
|
|
|
|
account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
for account_data_type, content in account_data.items():
|
|
|
|
account_data_events.append(
|
|
|
|
{"type": account_data_type, "content": content}
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
account_data_events = (
|
2021-11-09 14:10:58 +01:00
|
|
|
await sync_config.filter_collection.filter_room_account_data(
|
2021-09-15 18:14:25 +02:00
|
|
|
account_data_events
|
|
|
|
)
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-11-09 14:10:58 +01:00
|
|
|
ephemeral = await sync_config.filter_collection.filter_room_ephemeral(
|
|
|
|
ephemeral
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
if not (
|
|
|
|
always_include
|
|
|
|
or batch
|
|
|
|
or account_data_events
|
|
|
|
or ephemeral
|
|
|
|
or full_state
|
|
|
|
):
|
|
|
|
return
|
2018-08-16 10:46:50 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
state = await self.compute_state_delta(
|
|
|
|
room_id,
|
|
|
|
batch,
|
|
|
|
sync_config,
|
|
|
|
since_token,
|
|
|
|
now_token,
|
|
|
|
full_state=full_state,
|
2016-05-23 18:37:01 +02:00
|
|
|
)
|
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
summary: Optional[JsonDict] = {}
|
|
|
|
|
|
|
|
# we include a summary in room responses when we're lazy loading
|
|
|
|
# members (as the client otherwise doesn't have enough info to form
|
|
|
|
# the name itself).
|
|
|
|
if sync_config.filter_collection.lazy_load_members() and (
|
|
|
|
# we recalculate the summary:
|
|
|
|
# if there are membership changes in the timeline, or
|
|
|
|
# if membership has changed during a gappy sync, or
|
|
|
|
# if this is an initial sync.
|
|
|
|
any(ev.type == EventTypes.Member for ev in batch.events)
|
|
|
|
or (
|
|
|
|
# XXX: this may include false positives in the form of LL
|
|
|
|
# members which have snuck into state
|
|
|
|
batch.limited
|
|
|
|
and any(t == EventTypes.Member for (t, k) in state)
|
|
|
|
)
|
|
|
|
or since_token is None
|
|
|
|
):
|
|
|
|
summary = await self.compute_summary(
|
|
|
|
room_id, sync_config, batch, state, now_token
|
|
|
|
)
|
|
|
|
|
|
|
|
if room_builder.rtype == "joined":
|
|
|
|
unread_notifications: Dict[str, int] = {}
|
|
|
|
room_sync = JoinedSyncResult(
|
|
|
|
room_id=room_id,
|
|
|
|
timeline=batch,
|
|
|
|
state=state,
|
|
|
|
ephemeral=ephemeral,
|
|
|
|
account_data=account_data_events,
|
|
|
|
unread_notifications=unread_notifications,
|
|
|
|
summary=summary,
|
|
|
|
unread_count=0,
|
|
|
|
)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
if room_sync or always_include:
|
|
|
|
notifs = await self.unread_notifs_for_room_id(room_id, sync_config)
|
2020-09-02 18:19:37 +02:00
|
|
|
|
2021-12-21 14:25:34 +01:00
|
|
|
unread_notifications["notification_count"] = notifs.notify_count
|
|
|
|
unread_notifications["highlight_count"] = notifs.highlight_count
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-21 14:25:34 +01:00
|
|
|
room_sync.unread_count = notifs.unread_count
|
2018-09-12 01:50:39 +02:00
|
|
|
|
2021-09-15 18:14:25 +02:00
|
|
|
sync_result_builder.joined.append(room_sync)
|
|
|
|
|
|
|
|
if batch.limited and since_token:
|
|
|
|
user_id = sync_result_builder.sync_config.user.to_string()
|
|
|
|
logger.debug(
|
|
|
|
"Incremental gappy sync of %s for user %s with %d state events"
|
|
|
|
% (room_id, user_id, len(state))
|
|
|
|
)
|
|
|
|
elif room_builder.rtype == "archived":
|
|
|
|
archived_room_sync = ArchivedSyncResult(
|
|
|
|
room_id=room_id,
|
|
|
|
timeline=batch,
|
|
|
|
state=state,
|
|
|
|
account_data=account_data_events,
|
2018-09-12 01:50:39 +02:00
|
|
|
)
|
2021-09-15 18:14:25 +02:00
|
|
|
if archived_room_sync or always_include:
|
|
|
|
sync_result_builder.archived.append(archived_room_sync)
|
|
|
|
else:
|
|
|
|
raise Exception("Unrecognized rtype: %r", room_builder.rtype)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
async def get_rooms_for_user_at(
|
2020-09-24 14:24:17 +02:00
|
|
|
self, user_id: str, room_key: RoomStreamToken
|
2020-02-03 19:05:44 +01:00
|
|
|
) -> FrozenSet[str]:
|
2018-03-05 13:06:19 +01:00
|
|
|
"""Get set of joined rooms for a user at the given stream ordering.
|
|
|
|
|
|
|
|
The stream ordering *must* be recent, otherwise this may throw an
|
|
|
|
exception if older than a month. (This function is called with the
|
|
|
|
current token, which should be perfectly fine).
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
user_id
|
|
|
|
stream_ordering
|
2018-03-05 13:06:19 +01:00
|
|
|
|
|
|
|
ReturnValue:
|
2020-02-03 19:05:44 +01:00
|
|
|
Set of room_ids the user is in at given stream_ordering.
|
2018-03-05 13:06:19 +01:00
|
|
|
"""
|
2019-12-05 18:58:25 +01:00
|
|
|
joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id)
|
2018-03-05 13:06:19 +01:00
|
|
|
|
|
|
|
joined_room_ids = set()
|
|
|
|
|
|
|
|
# We need to check that the stream ordering of the join for each room
|
|
|
|
# is before the stream_ordering asked for. This might not be the case
|
|
|
|
# if the user joins a room between us getting the current token and
|
|
|
|
# calling `get_rooms_for_user_with_stream_ordering`.
|
|
|
|
# If the membership's stream ordering is after the given stream
|
|
|
|
# ordering, we need to go and work out if the user was in the room
|
|
|
|
# before.
|
2021-08-18 15:22:07 +02:00
|
|
|
for joined_room in joined_rooms:
|
|
|
|
if not joined_room.event_pos.persisted_after(room_key):
|
|
|
|
joined_room_ids.add(joined_room.room_id)
|
2018-03-05 13:06:19 +01:00
|
|
|
continue
|
|
|
|
|
2021-08-18 15:22:07 +02:00
|
|
|
logger.info("User joined room after current token: %s", joined_room.room_id)
|
2018-03-05 13:06:19 +01:00
|
|
|
|
2021-03-17 14:20:08 +01:00
|
|
|
extrems = (
|
|
|
|
await self.store.get_forward_extremities_for_room_at_stream_ordering(
|
2021-08-18 15:22:07 +02:00
|
|
|
joined_room.room_id, joined_room.event_pos.stream
|
2021-03-17 14:20:08 +01:00
|
|
|
)
|
2018-03-05 13:06:19 +01:00
|
|
|
)
|
2021-08-18 15:22:07 +02:00
|
|
|
users_in_room = await self.state.get_current_users_in_room(
|
|
|
|
joined_room.room_id, extrems
|
|
|
|
)
|
2018-03-05 13:06:19 +01:00
|
|
|
if user_id in users_in_room:
|
2021-08-18 15:22:07 +02:00
|
|
|
joined_room_ids.add(joined_room.room_id)
|
2018-03-05 13:06:19 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
return frozenset(joined_room_ids)
|
2018-03-05 13:06:19 +01:00
|
|
|
|
2016-01-19 12:35:50 +01:00
|
|
|
|
2020-02-03 19:05:44 +01:00
|
|
|
def _action_has_highlight(actions: List[JsonDict]) -> bool:
|
2016-01-19 12:35:50 +01:00
|
|
|
for action in actions:
|
|
|
|
try:
|
|
|
|
if action.get("set_tweak", None) == "highlight":
|
|
|
|
return action.get("value", True)
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return False
|
2016-02-01 16:59:40 +01:00
|
|
|
|
|
|
|
|
2018-07-23 20:21:20 +02:00
|
|
|
def _calculate_state(
|
2020-02-03 19:05:44 +01:00
|
|
|
timeline_contains: StateMap[str],
|
|
|
|
timeline_start: StateMap[str],
|
|
|
|
previous: StateMap[str],
|
|
|
|
current: StateMap[str],
|
|
|
|
lazy_load_members: bool,
|
|
|
|
) -> StateMap[str]:
|
2016-02-01 16:59:40 +01:00
|
|
|
"""Works out what state to include in a sync response.
|
|
|
|
|
|
|
|
Args:
|
2020-02-03 19:05:44 +01:00
|
|
|
timeline_contains: state in the timeline
|
|
|
|
timeline_start: state at the start of the timeline
|
|
|
|
previous: state at the end of the previous sync (or empty dict
|
2016-02-01 17:52:27 +01:00
|
|
|
if this is an initial sync)
|
2020-02-03 19:05:44 +01:00
|
|
|
current: state at the end of the timeline
|
|
|
|
lazy_load_members: whether to return members from timeline_start
|
2018-07-24 13:39:40 +02:00
|
|
|
or not. assumes that timeline_start has already been filtered to
|
|
|
|
include only the members the client needs to know about.
|
2016-02-01 16:59:40 +01:00
|
|
|
"""
|
2016-08-25 19:59:44 +02:00
|
|
|
event_id_to_key = {
|
|
|
|
e: key
|
|
|
|
for key, e in itertools.chain(
|
2020-06-15 13:03:36 +02:00
|
|
|
timeline_contains.items(),
|
|
|
|
previous.items(),
|
|
|
|
timeline_start.items(),
|
|
|
|
current.items(),
|
2016-02-01 16:59:40 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-06-15 13:03:36 +02:00
|
|
|
c_ids = set(current.values())
|
|
|
|
ts_ids = set(timeline_start.values())
|
|
|
|
p_ids = set(previous.values())
|
|
|
|
tc_ids = set(timeline_contains.values())
|
2016-02-01 16:59:40 +01:00
|
|
|
|
2018-07-24 13:39:40 +02:00
|
|
|
# If we are lazyloading room members, we explicitly add the membership events
|
|
|
|
# for the senders in the timeline into the state block returned by /sync,
|
|
|
|
# as we may not have sent them to the client before. We find these membership
|
|
|
|
# events by filtering them out of timeline_start, which has already been filtered
|
|
|
|
# to only include membership events for the senders in the timeline.
|
2018-07-24 15:03:15 +02:00
|
|
|
# In practice, we can do this by removing them from the p_ids list,
|
|
|
|
# which is the list of relevant state we know we have already sent to the client.
|
2021-12-02 21:58:32 +01:00
|
|
|
# see https://github.com/matrix-org/synapse/pull/2970/files/efcdacad7d1b7f52f879179701c7e0d9b763511f#r204732809
|
2018-07-24 13:39:40 +02:00
|
|
|
|
2018-07-23 20:21:20 +02:00
|
|
|
if lazy_load_members:
|
2018-07-24 14:40:49 +02:00
|
|
|
p_ids.difference_update(
|
2020-06-15 13:03:36 +02:00
|
|
|
e for t, e in timeline_start.items() if t[0] == EventTypes.Member
|
2018-07-23 20:21:20 +02:00
|
|
|
)
|
|
|
|
|
2018-07-24 14:40:49 +02:00
|
|
|
state_ids = ((c_ids | ts_ids) - p_ids) - tc_ids
|
2016-02-01 16:59:40 +01:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
return {event_id_to_key[e]: e for e in state_ids}
|
2016-05-23 18:37:01 +02:00
|
|
|
|
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-02-03 19:05:44 +01:00
|
|
|
class SyncResultBuilder:
|
2019-03-06 18:21:08 +01:00
|
|
|
"""Used to help build up a new SyncResult for a user
|
|
|
|
|
|
|
|
Attributes:
|
2020-02-03 19:05:44 +01:00
|
|
|
sync_config
|
|
|
|
full_state: The full_state flag as specified by user
|
|
|
|
since_token: The token supplied by user, or None.
|
|
|
|
now_token: The token to sync up to.
|
|
|
|
joined_room_ids: List of rooms the user is joined to
|
2019-03-06 18:21:08 +01:00
|
|
|
|
|
|
|
# The following mirror the fields in a sync response
|
2021-08-23 14:14:42 +02:00
|
|
|
presence
|
|
|
|
account_data
|
|
|
|
joined
|
|
|
|
invited
|
|
|
|
knocked
|
|
|
|
archived
|
|
|
|
groups
|
|
|
|
to_device
|
2019-03-06 18:21:08 +01:00
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
sync_config: SyncConfig
|
|
|
|
full_state: bool
|
|
|
|
since_token: Optional[StreamToken]
|
|
|
|
now_token: StreamToken
|
|
|
|
joined_room_ids: FrozenSet[str]
|
2020-02-03 19:05:44 +01:00
|
|
|
|
2021-08-23 14:14:42 +02:00
|
|
|
presence: List[UserPresenceState] = attr.Factory(list)
|
2021-08-18 13:27:32 +02:00
|
|
|
account_data: List[JsonDict] = attr.Factory(list)
|
|
|
|
joined: List[JoinedSyncResult] = attr.Factory(list)
|
|
|
|
invited: List[InvitedSyncResult] = attr.Factory(list)
|
|
|
|
knocked: List[KnockedSyncResult] = attr.Factory(list)
|
|
|
|
archived: List[ArchivedSyncResult] = attr.Factory(list)
|
|
|
|
groups: Optional[GroupsSyncResult] = None
|
|
|
|
to_device: List[JsonDict] = attr.Factory(list)
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-12-07 13:42:05 +01:00
|
|
|
def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:
|
|
|
|
"""Work out which other users have joined or left rooms we are joined to.
|
|
|
|
|
|
|
|
This data only is only useful for an incremental sync.
|
|
|
|
|
|
|
|
The SyncResultBuilder is not modified by this function.
|
|
|
|
"""
|
|
|
|
newly_joined_or_invited_or_knocked_users = set()
|
|
|
|
newly_left_users = set()
|
|
|
|
if self.since_token:
|
|
|
|
for joined_sync in self.joined:
|
|
|
|
it = itertools.chain(
|
|
|
|
joined_sync.timeline.events, joined_sync.state.values()
|
|
|
|
)
|
|
|
|
for event in it:
|
|
|
|
if event.type == EventTypes.Member:
|
|
|
|
if (
|
|
|
|
event.membership == Membership.JOIN
|
|
|
|
or event.membership == Membership.INVITE
|
|
|
|
or event.membership == Membership.KNOCK
|
|
|
|
):
|
|
|
|
newly_joined_or_invited_or_knocked_users.add(
|
|
|
|
event.state_key
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
prev_content = event.unsigned.get("prev_content", {})
|
|
|
|
prev_membership = prev_content.get("membership", None)
|
|
|
|
if prev_membership == Membership.JOIN:
|
|
|
|
newly_left_users.add(event.state_key)
|
|
|
|
|
|
|
|
newly_left_users -= newly_joined_or_invited_or_knocked_users
|
|
|
|
return newly_joined_or_invited_or_knocked_users, newly_left_users
|
|
|
|
|
2016-05-23 18:37:01 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
@attr.s(slots=True, auto_attribs=True)
|
2020-09-04 12:54:56 +02:00
|
|
|
class RoomSyncResultBuilder:
|
2016-05-24 11:14:53 +02:00
|
|
|
"""Stores information needed to create either a `JoinedSyncResult` or
|
|
|
|
`ArchivedSyncResult`.
|
2020-02-03 19:05:44 +01:00
|
|
|
|
|
|
|
Attributes:
|
|
|
|
room_id
|
|
|
|
rtype: One of `"joined"` or `"archived"`
|
|
|
|
events: List of events to include in the room (more events may be added
|
|
|
|
when generating result).
|
|
|
|
newly_joined: If the user has newly joined the room
|
|
|
|
full_state: Whether the full state should be sent in result
|
|
|
|
since_token: Earliest point to return events from, or None
|
|
|
|
upto_token: Latest point to return events from.
|
2016-05-24 11:14:53 +02:00
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-08-18 13:27:32 +02:00
|
|
|
room_id: str
|
|
|
|
rtype: str
|
|
|
|
events: Optional[List[EventBase]]
|
|
|
|
newly_joined: bool
|
|
|
|
full_state: bool
|
|
|
|
since_token: Optional[StreamToken]
|
|
|
|
upto_token: StreamToken
|