2016-07-15 14:19:07 +02:00
|
|
|
# Copyright 2016 OpenMarket Ltd
|
2019-07-25 17:08:24 +02:00
|
|
|
# Copyright 2019 New Vector Ltd
|
2020-10-07 14:00:17 +02:00
|
|
|
# Copyright 2019,2020 The Matrix.org Foundation C.I.C.
|
2016-07-15 14:19:07 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-07-09 08:09:20 +02:00
|
|
|
import logging
|
2021-10-12 14:50:34 +02:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Collection,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
Mapping,
|
|
|
|
Optional,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
)
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2016-07-20 18:58:44 +02:00
|
|
|
from synapse.api import errors
|
2017-02-01 18:33:16 +01:00
|
|
|
from synapse.api.constants import EventTypes
|
2019-02-18 16:24:13 +01:00
|
|
|
from synapse.api.errors import (
|
2020-09-22 12:42:55 +02:00
|
|
|
Codes,
|
2019-02-18 16:24:13 +01:00
|
|
|
FederationDeniedError,
|
|
|
|
HttpResponseException,
|
|
|
|
RequestSendFailed,
|
2020-02-10 22:35:26 +01:00
|
|
|
SynapseError,
|
2019-02-18 16:24:13 +01:00
|
|
|
)
|
2019-09-03 11:21:30 +02:00
|
|
|
from synapse.logging.opentracing import log_kv, set_tag, trace
|
2020-05-21 17:41:12 +02:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2020-06-01 17:47:30 +02:00
|
|
|
from synapse.types import (
|
2020-10-07 14:00:17 +02:00
|
|
|
JsonDict,
|
2020-09-11 13:22:55 +02:00
|
|
|
StreamToken,
|
2020-10-07 14:58:21 +02:00
|
|
|
UserID,
|
2020-06-01 17:47:30 +02:00
|
|
|
get_domain_from_id,
|
|
|
|
get_verify_key_from_cross_signing_key,
|
|
|
|
)
|
2016-07-15 14:19:07 +02:00
|
|
|
from synapse.util import stringutils
|
2018-08-10 15:50:21 +02:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2017-03-03 17:02:53 +01:00
|
|
|
from synapse.util.caches.expiringcache import ExpiringCache
|
2017-02-02 19:36:17 +01:00
|
|
|
from synapse.util.metrics import measure_func
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.retryutils import NotRetryingDestination
|
2016-07-15 14:19:07 +02:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
if TYPE_CHECKING:
|
2021-03-23 12:12:48 +01:00
|
|
|
from synapse.server import HomeServer
|
2020-10-07 14:58:21 +02:00
|
|
|
|
2016-07-15 14:19:07 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2020-02-10 22:35:26 +01:00
|
|
|
MAX_DEVICE_DISPLAY_NAME_LEN = 100
|
|
|
|
|
2016-07-15 14:19:07 +02:00
|
|
|
|
2021-10-08 13:44:43 +02:00
|
|
|
class DeviceWorkerHandler:
|
2020-10-07 14:58:21 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2021-10-08 13:44:43 +02:00
|
|
|
self.clock = hs.get_clock()
|
2017-01-26 17:06:54 +01:00
|
|
|
self.hs = hs
|
2022-02-23 12:04:02 +01:00
|
|
|
self.store = hs.get_datastores().main
|
2021-10-08 13:44:43 +02:00
|
|
|
self.notifier = hs.get_notifier()
|
2017-01-25 15:27:27 +01:00
|
|
|
self.state = hs.get_state_handler()
|
2019-10-23 18:25:54 +02:00
|
|
|
self.state_store = hs.get_storage().state
|
2017-11-01 11:29:34 +01:00
|
|
|
self._auth_handler = hs.get_auth_handler()
|
2021-10-08 13:44:43 +02:00
|
|
|
self.server_name = hs.hostname
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2021-01-28 14:34:19 +01:00
|
|
|
async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
|
2019-03-04 19:24:32 +01:00
|
|
|
"""
|
|
|
|
Retrieve the given user's devices
|
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user ID to query for devices.
|
2019-03-04 19:24:32 +01:00
|
|
|
Returns:
|
2020-07-17 13:09:25 +02:00
|
|
|
info on each device
|
2019-03-04 19:24:32 +01:00
|
|
|
"""
|
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("user_id", user_id)
|
2020-07-17 13:09:25 +02:00
|
|
|
device_map = await self.store.get_devices_by_user(user_id)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
ips = await self.store.get_last_client_ip_by_device(user_id, device_id=None)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
devices = list(device_map.values())
|
|
|
|
for device in devices:
|
|
|
|
_update_device_from_client_ips(device, ips)
|
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
log_kv(device_map)
|
2019-07-23 15:00:55 +02:00
|
|
|
return devices
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2021-01-28 14:34:19 +01:00
|
|
|
async def get_device(self, user_id: str, device_id: str) -> JsonDict:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Retrieve the given device
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user to get the device from
|
|
|
|
device_id: The device to fetch.
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
Returns:
|
2020-07-17 13:09:25 +02:00
|
|
|
info on the device
|
2019-03-04 19:24:32 +01:00
|
|
|
Raises:
|
|
|
|
errors.NotFoundError: if the device was not found
|
|
|
|
"""
|
2021-12-13 16:39:43 +01:00
|
|
|
device = await self.store.get_device(user_id, device_id)
|
|
|
|
if device is None:
|
|
|
|
raise errors.NotFoundError()
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
ips = await self.store.get_last_client_ip_by_device(user_id, device_id)
|
2019-03-04 19:24:32 +01:00
|
|
|
_update_device_from_client_ips(device, ips)
|
2019-09-03 11:21:30 +02:00
|
|
|
|
|
|
|
set_tag("device", device)
|
|
|
|
set_tag("ips", ips)
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return device
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2020-07-17 13:09:25 +02:00
|
|
|
@measure_func("device.get_user_ids_changed")
|
2020-10-07 14:58:21 +02:00
|
|
|
async def get_user_ids_changed(
|
|
|
|
self, user_id: str, from_token: StreamToken
|
|
|
|
) -> JsonDict:
|
2019-03-04 19:24:32 +01:00
|
|
|
"""Get list of users that have had the devices updated, or have newly
|
|
|
|
joined a room, that `user_id` may be interested in.
|
|
|
|
"""
|
2019-09-03 11:21:30 +02:00
|
|
|
|
|
|
|
set_tag("user_id", user_id)
|
|
|
|
set_tag("from_token", from_token)
|
2020-09-29 22:48:33 +02:00
|
|
|
now_room_key = self.store.get_room_max_token()
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
room_ids = await self.store.get_rooms_for_user(user_id)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2019-06-26 12:56:52 +02:00
|
|
|
# First we check if any devices have changed for users that we share
|
|
|
|
# rooms with.
|
2020-07-17 13:09:25 +02:00
|
|
|
users_who_share_room = await self.store.get_users_who_share_room_with_user(
|
2020-03-30 11:18:33 +02:00
|
|
|
user_id
|
|
|
|
)
|
2020-03-30 11:11:26 +02:00
|
|
|
|
|
|
|
tracked_users = set(users_who_share_room)
|
2020-03-30 15:00:11 +02:00
|
|
|
|
|
|
|
# Always tell the user about their own devices
|
2020-03-27 13:26:47 +01:00
|
|
|
tracked_users.add(user_id)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
changed = await self.store.get_users_whose_devices_changed(
|
2020-03-27 13:26:47 +01:00
|
|
|
from_token.device_list_key, tracked_users
|
|
|
|
)
|
|
|
|
|
2019-03-04 19:24:32 +01:00
|
|
|
# Then work out if any users have since joined
|
|
|
|
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
member_events = await self.store.get_membership_changes_for_user(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, from_token.room_key, now_room_key
|
2019-03-04 19:24:32 +01:00
|
|
|
)
|
|
|
|
rooms_changed.update(event.room_id for event in member_events)
|
|
|
|
|
2020-09-11 13:22:55 +02:00
|
|
|
stream_ordering = from_token.room_key.stream
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
possibly_changed = set(changed)
|
|
|
|
possibly_left = set()
|
|
|
|
for room_id in rooms_changed:
|
2020-07-17 13:09:25 +02:00
|
|
|
current_state_ids = await self.store.get_current_state_ids(room_id)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
# The user may have left the room
|
|
|
|
# TODO: Check if they actually did or if we were just invited.
|
|
|
|
if room_id not in room_ids:
|
2021-04-20 12:50:49 +02:00
|
|
|
for etype, state_key in current_state_ids.keys():
|
2019-03-04 19:24:32 +01:00
|
|
|
if etype != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
possibly_left.add(state_key)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Fetch the current state at the time.
|
|
|
|
try:
|
2021-03-17 14:20:08 +01:00
|
|
|
event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(
|
2019-03-04 19:24:32 +01:00
|
|
|
room_id, stream_ordering=stream_ordering
|
|
|
|
)
|
|
|
|
except errors.StoreError:
|
|
|
|
# we have purged the stream_ordering index since the stream
|
|
|
|
# ordering: treat it the same as a new room
|
|
|
|
event_ids = []
|
|
|
|
|
|
|
|
# special-case for an empty prev state: include all members
|
|
|
|
# in the changed list
|
|
|
|
if not event_ids:
|
2019-09-03 11:21:30 +02:00
|
|
|
log_kv(
|
|
|
|
{"event": "encountered empty previous state", "room_id": room_id}
|
|
|
|
)
|
2021-04-20 12:50:49 +02:00
|
|
|
for etype, state_key in current_state_ids.keys():
|
2019-03-04 19:24:32 +01:00
|
|
|
if etype != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
possibly_changed.add(state_key)
|
|
|
|
continue
|
|
|
|
|
|
|
|
current_member_id = current_state_ids.get((EventTypes.Member, user_id))
|
|
|
|
if not current_member_id:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# mapping from event_id -> state_dict
|
2020-07-17 13:09:25 +02:00
|
|
|
prev_state_ids = await self.state_store.get_state_ids_for_events(event_ids)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
# Check if we've joined the room? If so we just blindly add all the users to
|
|
|
|
# the "possibly changed" users.
|
2020-06-15 13:03:36 +02:00
|
|
|
for state_dict in prev_state_ids.values():
|
2019-03-04 19:24:32 +01:00
|
|
|
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
|
|
|
if not member_event or member_event != current_member_id:
|
2021-04-20 12:50:49 +02:00
|
|
|
for etype, state_key in current_state_ids.keys():
|
2019-03-04 19:24:32 +01:00
|
|
|
if etype != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
possibly_changed.add(state_key)
|
|
|
|
break
|
|
|
|
|
|
|
|
# If there has been any change in membership, include them in the
|
|
|
|
# possibly changed list. We'll check if they are joined below,
|
|
|
|
# and we're not toooo worried about spuriously adding users.
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, event_id in current_state_ids.items():
|
2019-03-04 19:24:32 +01:00
|
|
|
etype, state_key = key
|
|
|
|
if etype != EventTypes.Member:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# check if this member has changed since any of the extremities
|
|
|
|
# at the stream_ordering, and add them to the list if so.
|
2020-06-15 13:03:36 +02:00
|
|
|
for state_dict in prev_state_ids.values():
|
2019-03-04 19:24:32 +01:00
|
|
|
prev_event_id = state_dict.get(key, None)
|
|
|
|
if not prev_event_id or prev_event_id != event_id:
|
|
|
|
if state_key != user_id:
|
|
|
|
possibly_changed.add(state_key)
|
|
|
|
break
|
|
|
|
|
|
|
|
if possibly_changed or possibly_left:
|
|
|
|
# Take the intersection of the users whose devices may have changed
|
|
|
|
# and those that actually still share a room with the user
|
2020-03-30 11:11:26 +02:00
|
|
|
possibly_joined = possibly_changed & users_who_share_room
|
|
|
|
possibly_left = (possibly_changed | possibly_left) - users_who_share_room
|
2019-03-04 19:24:32 +01:00
|
|
|
else:
|
2020-10-07 14:58:21 +02:00
|
|
|
possibly_joined = set()
|
|
|
|
possibly_left = set()
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
result = {"changed": list(possibly_joined), "left": list(possibly_left)}
|
|
|
|
|
|
|
|
log_kv(result)
|
|
|
|
|
|
|
|
return result
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
async def on_federation_query_user_devices(self, user_id: str) -> JsonDict:
|
2020-09-01 13:41:21 +02:00
|
|
|
stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query(
|
|
|
|
user_id
|
|
|
|
)
|
2020-07-17 13:09:25 +02:00
|
|
|
master_key = await self.store.get_e2e_cross_signing_key(user_id, "master")
|
|
|
|
self_signing_key = await self.store.get_e2e_cross_signing_key(
|
2020-02-07 16:45:39 +01:00
|
|
|
user_id, "self_signing"
|
|
|
|
)
|
|
|
|
|
|
|
|
return {
|
|
|
|
"user_id": user_id,
|
|
|
|
"stream_id": stream_id,
|
|
|
|
"devices": devices,
|
|
|
|
"master_key": master_key,
|
|
|
|
"self_signing_key": self_signing_key,
|
|
|
|
}
|
|
|
|
|
2019-03-04 19:24:32 +01:00
|
|
|
|
|
|
|
class DeviceHandler(DeviceWorkerHandler):
|
2020-10-07 14:58:21 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(hs)
|
2019-03-04 19:24:32 +01:00
|
|
|
|
2017-01-26 17:06:54 +01:00
|
|
|
self.federation_sender = hs.get_federation_sender()
|
2017-03-01 15:12:11 +01:00
|
|
|
|
2019-07-29 17:34:44 +02:00
|
|
|
self.device_list_updater = DeviceListUpdater(hs, self)
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2018-03-12 17:17:08 +01:00
|
|
|
federation_registry = hs.get_federation_registry()
|
|
|
|
|
|
|
|
federation_registry.register_edu_handler(
|
2019-07-29 17:34:44 +02:00
|
|
|
"m.device_list_update", self.device_list_updater.incoming_device_list_update
|
2017-01-26 17:06:54 +01:00
|
|
|
)
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2017-01-26 17:39:33 +01:00
|
|
|
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
|
|
|
|
2021-09-20 14:56:23 +02:00
|
|
|
def _check_device_name_length(self, name: Optional[str]) -> None:
|
2020-09-22 12:42:55 +02:00
|
|
|
"""
|
|
|
|
Checks whether a device name is longer than the maximum allowed length.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
name: The name of the device.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
SynapseError: if the device name is too long.
|
|
|
|
"""
|
|
|
|
if name and len(name) > MAX_DEVICE_DISPLAY_NAME_LEN:
|
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
"Device display name is too long (max %i)"
|
|
|
|
% (MAX_DEVICE_DISPLAY_NAME_LEN,),
|
|
|
|
errcode=Codes.TOO_LARGE,
|
|
|
|
)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def check_device_registered(
|
2020-10-07 14:58:21 +02:00
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
device_id: Optional[str],
|
|
|
|
initial_device_display_name: Optional[str] = None,
|
2021-12-06 18:43:06 +01:00
|
|
|
auth_provider_id: Optional[str] = None,
|
|
|
|
auth_provider_session_id: Optional[str] = None,
|
2020-10-07 14:58:21 +02:00
|
|
|
) -> str:
|
2016-07-15 14:19:07 +02:00
|
|
|
"""
|
|
|
|
If the given device has not been registered, register it with the
|
|
|
|
supplied display name.
|
|
|
|
|
|
|
|
If no device_id is supplied, we make one up.
|
|
|
|
|
|
|
|
Args:
|
2020-10-07 14:58:21 +02:00
|
|
|
user_id: @user:id
|
|
|
|
device_id: device id supplied by client
|
|
|
|
initial_device_display_name: device display name from client
|
2021-12-06 18:43:06 +01:00
|
|
|
auth_provider_id: The SSO IdP the user used, if any.
|
|
|
|
auth_provider_session_id: The session ID (sid) got from the SSO IdP.
|
2016-07-15 14:19:07 +02:00
|
|
|
Returns:
|
2020-10-07 14:58:21 +02:00
|
|
|
device id (generated if none was supplied)
|
2016-07-15 14:19:07 +02:00
|
|
|
"""
|
2020-09-22 12:42:55 +02:00
|
|
|
|
|
|
|
self._check_device_name_length(initial_device_display_name)
|
|
|
|
|
2016-07-15 14:19:07 +02:00
|
|
|
if device_id is not None:
|
2020-07-17 13:09:25 +02:00
|
|
|
new_device = await self.store.store_device(
|
2016-07-15 14:19:07 +02:00
|
|
|
user_id=user_id,
|
|
|
|
device_id=device_id,
|
|
|
|
initial_device_display_name=initial_device_display_name,
|
2021-12-06 18:43:06 +01:00
|
|
|
auth_provider_id=auth_provider_id,
|
|
|
|
auth_provider_session_id=auth_provider_session_id,
|
2016-07-15 14:19:07 +02:00
|
|
|
)
|
2017-01-25 15:27:27 +01:00
|
|
|
if new_device:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.notify_device_update(user_id, [device_id])
|
2019-07-23 15:00:55 +02:00
|
|
|
return device_id
|
2016-07-15 14:19:07 +02:00
|
|
|
|
|
|
|
# if the device id is not specified, we'll autogen one, but loop a few
|
|
|
|
# times in case of a clash.
|
|
|
|
attempts = 0
|
|
|
|
while attempts < 5:
|
2020-10-07 14:58:21 +02:00
|
|
|
new_device_id = stringutils.random_string(10).upper()
|
2020-07-17 13:09:25 +02:00
|
|
|
new_device = await self.store.store_device(
|
2017-01-25 15:27:27 +01:00
|
|
|
user_id=user_id,
|
2020-10-07 14:58:21 +02:00
|
|
|
device_id=new_device_id,
|
2017-01-25 15:27:27 +01:00
|
|
|
initial_device_display_name=initial_device_display_name,
|
2021-12-06 18:43:06 +01:00
|
|
|
auth_provider_id=auth_provider_id,
|
|
|
|
auth_provider_session_id=auth_provider_session_id,
|
2017-01-25 15:27:27 +01:00
|
|
|
)
|
|
|
|
if new_device:
|
2020-10-07 14:58:21 +02:00
|
|
|
await self.notify_device_update(user_id, [new_device_id])
|
|
|
|
return new_device_id
|
2017-01-25 15:27:27 +01:00
|
|
|
attempts += 1
|
2016-07-15 14:19:07 +02:00
|
|
|
|
2016-07-20 18:58:44 +02:00
|
|
|
raise errors.StoreError(500, "Couldn't generate a device ID.")
|
2016-07-20 17:34:00 +02:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2020-07-17 13:09:25 +02:00
|
|
|
async def delete_device(self, user_id: str, device_id: str) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Delete the given device
|
2016-07-22 15:52:53 +02:00
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user to delete the device from.
|
|
|
|
device_id: The device to delete.
|
2016-07-22 15:52:53 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
try:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.delete_device(user_id, device_id)
|
2018-03-15 16:11:17 +01:00
|
|
|
except errors.StoreError as e:
|
2016-07-22 15:52:53 +02:00
|
|
|
if e.code == 404:
|
|
|
|
# no match
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("error", True)
|
|
|
|
log_kv(
|
|
|
|
{"reason": "User doesn't have device id.", "device_id": device_id}
|
|
|
|
)
|
2016-07-22 15:52:53 +02:00
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self._auth_handler.delete_access_tokens_for_user(
|
|
|
|
user_id, device_id=device_id
|
2016-07-26 12:09:47 +02:00
|
|
|
)
|
2016-07-22 15:52:53 +02:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
|
2016-07-27 13:18:03 +02:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.notify_device_update(user_id, [device_id])
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2020-07-17 13:09:25 +02:00
|
|
|
async def delete_all_devices_for_user(
|
|
|
|
self, user_id: str, except_device_id: Optional[str] = None
|
|
|
|
) -> None:
|
2017-11-29 16:44:59 +01:00
|
|
|
"""Delete all of the user's devices
|
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user to remove all devices from
|
|
|
|
except_device_id: optional device id which should not be deleted
|
2017-11-29 16:44:59 +01:00
|
|
|
"""
|
2020-07-17 13:09:25 +02:00
|
|
|
device_map = await self.store.get_devices_by_user(user_id)
|
2018-05-31 11:03:47 +02:00
|
|
|
device_ids = list(device_map)
|
2017-11-29 16:44:59 +01:00
|
|
|
if except_device_id is not None:
|
|
|
|
device_ids = [d for d in device_ids if d != except_device_id]
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.delete_devices(user_id, device_ids)
|
2017-11-29 16:44:59 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Delete several devices
|
2017-03-13 18:53:23 +01:00
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user to delete devices from.
|
|
|
|
device_ids: The list of device IDs to delete
|
2017-03-13 18:53:23 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
try:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.delete_devices(user_id, device_ids)
|
2018-03-15 16:11:17 +01:00
|
|
|
except errors.StoreError as e:
|
2017-03-13 18:53:23 +01:00
|
|
|
if e.code == 404:
|
|
|
|
# no match
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("error", True)
|
|
|
|
set_tag("reason", "User doesn't have that device id.")
|
2017-03-13 18:53:23 +01:00
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
|
|
|
# Delete access tokens and e2e keys for each device. Not optimised as it is not
|
|
|
|
# considered as part of a critical path.
|
|
|
|
for device_id in device_ids:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self._auth_handler.delete_access_tokens_for_user(
|
|
|
|
user_id, device_id=device_id
|
2017-03-13 18:53:23 +01:00
|
|
|
)
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.delete_e2e_keys_by_device(
|
2017-03-13 18:53:23 +01:00
|
|
|
user_id=user_id, device_id=device_id
|
|
|
|
)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.notify_device_update(user_id, device_ids)
|
2017-03-13 18:53:23 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Update the given device
|
2016-07-25 18:51:24 +02:00
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user to update devices of.
|
|
|
|
device_id: The device to update.
|
|
|
|
content: body of update request
|
2016-07-25 18:51:24 +02:00
|
|
|
"""
|
|
|
|
|
2020-02-10 22:35:26 +01:00
|
|
|
# Reject a new displayname which is too long.
|
|
|
|
new_display_name = content.get("display_name")
|
2020-09-22 12:42:55 +02:00
|
|
|
|
|
|
|
self._check_device_name_length(new_display_name)
|
2020-02-10 22:35:26 +01:00
|
|
|
|
2016-07-25 18:51:24 +02:00
|
|
|
try:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.update_device(
|
2020-02-10 22:35:26 +01:00
|
|
|
user_id, device_id, new_display_name=new_display_name
|
2016-07-25 18:51:24 +02:00
|
|
|
)
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.notify_device_update(user_id, [device_id])
|
2018-03-15 16:11:17 +01:00
|
|
|
except errors.StoreError as e:
|
2016-07-25 18:51:24 +02:00
|
|
|
if e.code == 404:
|
|
|
|
raise errors.NotFoundError()
|
|
|
|
else:
|
|
|
|
raise
|
2016-07-22 15:52:53 +02:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2017-02-02 19:36:17 +01:00
|
|
|
@measure_func("notify_device_update")
|
2020-10-07 14:58:21 +02:00
|
|
|
async def notify_device_update(
|
|
|
|
self, user_id: str, device_ids: Collection[str]
|
|
|
|
) -> None:
|
2017-01-26 17:30:37 +01:00
|
|
|
"""Notify that a user's device(s) has changed. Pokes the notifier, and
|
|
|
|
remote servers if the user is local.
|
2021-10-21 18:42:25 +02:00
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The Matrix ID of the user who's device list has been updated.
|
|
|
|
device_ids: The device IDs that have changed.
|
2017-01-26 17:30:37 +01:00
|
|
|
"""
|
2020-07-17 15:11:05 +02:00
|
|
|
if not device_ids:
|
|
|
|
# No changes to notify about, so this is a no-op.
|
|
|
|
return
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
users_who_share_room = await self.store.get_users_who_share_room_with_user(
|
2017-02-02 16:25:00 +01:00
|
|
|
user_id
|
|
|
|
)
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2021-07-16 19:22:36 +02:00
|
|
|
hosts: Set[str] = set()
|
2017-01-26 17:06:54 +01:00
|
|
|
if self.hs.is_mine_id(user_id):
|
2017-02-02 16:25:00 +01:00
|
|
|
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
|
2017-01-26 17:06:54 +01:00
|
|
|
hosts.discard(self.server_name)
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("target_hosts", hosts)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
position = await self.store.add_device_change_to_streams(
|
2017-01-26 17:06:54 +01:00
|
|
|
user_id, device_ids, list(hosts)
|
2017-01-25 15:27:27 +01:00
|
|
|
)
|
|
|
|
|
2020-07-17 15:11:05 +02:00
|
|
|
if not position:
|
|
|
|
# This should only happen if there are no updates, so we bail.
|
|
|
|
return
|
|
|
|
|
2019-03-07 16:53:14 +01:00
|
|
|
for device_id in device_ids:
|
|
|
|
logger.debug(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
|
2019-03-07 16:53:14 +01:00
|
|
|
)
|
|
|
|
|
2020-03-27 13:26:47 +01:00
|
|
|
# specify the user ID too since the user should always get their own device list
|
|
|
|
# updates, even if they aren't in any rooms.
|
2022-02-12 15:33:49 +01:00
|
|
|
users_to_notify = users_who_share_room.union({user_id})
|
|
|
|
|
|
|
|
self.notifier.on_new_event("device_list_key", position, users=users_to_notify)
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2017-01-26 17:30:37 +01:00
|
|
|
if hosts:
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.info(
|
|
|
|
"Sending device list update notif for %r to: %r", user_id, hosts
|
|
|
|
)
|
2017-01-26 17:30:37 +01:00
|
|
|
for host in hosts:
|
2022-03-04 12:48:15 +01:00
|
|
|
self.federation_sender.send_device_messages(host, immediate=False)
|
2019-09-03 11:21:30 +02:00
|
|
|
log_kv({"message": "sent device update to host", "host": host})
|
2017-01-25 15:27:27 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def notify_user_signature_update(
|
|
|
|
self, from_user_id: str, user_ids: List[str]
|
|
|
|
) -> None:
|
2019-07-25 17:08:24 +02:00
|
|
|
"""Notify a user that they have made new signatures of other users.
|
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
from_user_id: the user who made the signature
|
|
|
|
user_ids: the users IDs that have new signatures
|
2019-07-25 17:08:24 +02:00
|
|
|
"""
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
position = await self.store.add_user_signature_change_to_streams(
|
2019-07-25 17:08:24 +02:00
|
|
|
from_user_id, user_ids
|
|
|
|
)
|
|
|
|
|
|
|
|
self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
|
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
async def user_left_room(self, user: UserID, room_id: str) -> None:
|
2017-03-01 15:12:11 +01:00
|
|
|
user_id = user.to_string()
|
2020-07-17 13:09:25 +02:00
|
|
|
room_ids = await self.store.get_rooms_for_user(user_id)
|
2017-03-16 12:51:46 +01:00
|
|
|
if not room_ids:
|
2017-03-01 15:12:11 +01:00
|
|
|
# We no longer share rooms with this user, so we'll no longer
|
|
|
|
# receive device updates. Mark this in DB.
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
|
2017-03-01 15:12:11 +01:00
|
|
|
|
2020-10-07 14:00:17 +02:00
|
|
|
async def store_dehydrated_device(
|
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
device_data: JsonDict,
|
|
|
|
initial_device_display_name: Optional[str] = None,
|
|
|
|
) -> str:
|
|
|
|
"""Store a dehydrated device for a user. If the user had a previous
|
|
|
|
dehydrated device, it is removed.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user that we are storing the device for
|
|
|
|
device_data: the dehydrated device information
|
|
|
|
initial_device_display_name: The display name to use for the device
|
|
|
|
Returns:
|
|
|
|
device id of the dehydrated device
|
|
|
|
"""
|
|
|
|
device_id = await self.check_device_registered(
|
2021-02-16 23:32:34 +01:00
|
|
|
user_id,
|
|
|
|
None,
|
|
|
|
initial_device_display_name,
|
2020-10-07 14:00:17 +02:00
|
|
|
)
|
|
|
|
old_device_id = await self.store.store_dehydrated_device(
|
|
|
|
user_id, device_id, device_data
|
|
|
|
)
|
|
|
|
if old_device_id is not None:
|
|
|
|
await self.delete_device(user_id, old_device_id)
|
|
|
|
return device_id
|
|
|
|
|
|
|
|
async def get_dehydrated_device(
|
|
|
|
self, user_id: str
|
|
|
|
) -> Optional[Tuple[str, JsonDict]]:
|
|
|
|
"""Retrieve the information for a dehydrated device.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user whose dehydrated device we are looking for
|
|
|
|
Returns:
|
|
|
|
a tuple whose first item is the device ID, and the second item is
|
|
|
|
the dehydrated device information
|
|
|
|
"""
|
|
|
|
return await self.store.get_dehydrated_device(user_id)
|
|
|
|
|
|
|
|
async def rehydrate_device(
|
|
|
|
self, user_id: str, access_token: str, device_id: str
|
|
|
|
) -> dict:
|
|
|
|
"""Process a rehydration request from the user.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: the user who is rehydrating the device
|
|
|
|
access_token: the access token used for the request
|
|
|
|
device_id: the ID of the device that will be rehydrated
|
|
|
|
Returns:
|
|
|
|
a dict containing {"success": True}
|
|
|
|
"""
|
|
|
|
success = await self.store.remove_dehydrated_device(user_id, device_id)
|
|
|
|
|
|
|
|
if not success:
|
|
|
|
raise errors.NotFoundError()
|
|
|
|
|
|
|
|
# If the dehydrated device was successfully deleted (the device ID
|
|
|
|
# matched the stored dehydrated device), then modify the access
|
|
|
|
# token to use the dehydrated device's ID and copy the old device
|
|
|
|
# display name to the dehydrated device, and destroy the old device
|
|
|
|
# ID
|
|
|
|
old_device_id = await self.store.set_device_for_access_token(
|
|
|
|
access_token, device_id
|
|
|
|
)
|
|
|
|
old_device = await self.store.get_device(user_id, old_device_id)
|
2021-12-13 16:39:43 +01:00
|
|
|
if old_device is None:
|
|
|
|
raise errors.NotFoundError()
|
2020-10-07 14:00:17 +02:00
|
|
|
await self.store.update_device(user_id, device_id, old_device["display_name"])
|
|
|
|
# can't call self.delete_device because that will clobber the
|
|
|
|
# access token so call the storage layer directly
|
|
|
|
await self.store.delete_device(user_id, old_device_id)
|
|
|
|
await self.store.delete_e2e_keys_by_device(
|
|
|
|
user_id=user_id, device_id=old_device_id
|
|
|
|
)
|
|
|
|
|
|
|
|
# tell everyone that the old device is gone and that the dehydrated
|
|
|
|
# device has a new display name
|
|
|
|
await self.notify_device_update(user_id, [old_device_id, device_id])
|
|
|
|
|
|
|
|
return {"success": True}
|
|
|
|
|
2017-03-01 15:12:11 +01:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
def _update_device_from_client_ips(
|
2021-10-12 14:50:34 +02:00
|
|
|
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
|
2020-10-07 14:58:21 +02:00
|
|
|
) -> None:
|
2017-03-01 15:12:11 +01:00
|
|
|
ip = client_ips.get((device["user_id"], device["device_id"]), {})
|
2019-06-20 11:32:02 +02:00
|
|
|
device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")})
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class DeviceListUpdater:
|
2017-03-01 15:12:11 +01:00
|
|
|
"Handles incoming device list updates from federation and updates the DB"
|
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
|
2022-02-23 12:04:02 +01:00
|
|
|
self.store = hs.get_datastores().main
|
2018-03-13 14:26:52 +01:00
|
|
|
self.federation = hs.get_federation_client()
|
2017-03-01 15:12:11 +01:00
|
|
|
self.clock = hs.get_clock()
|
|
|
|
self.device_handler = device_handler
|
|
|
|
|
2017-03-03 16:31:57 +01:00
|
|
|
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
# user_id -> list of updates waiting to be handled.
|
2021-07-16 19:22:36 +02:00
|
|
|
self._pending_updates: Dict[
|
|
|
|
str, List[Tuple[str, str, Iterable[str], JsonDict]]
|
|
|
|
] = {}
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
# Recently seen stream ids. We don't bother keeping these in the DB,
|
|
|
|
# but they're useful to have them about to reduce the number of spurious
|
|
|
|
# resyncs.
|
2021-07-16 19:22:36 +02:00
|
|
|
self._seen_updates: ExpiringCache[str, Set[str]] = ExpiringCache(
|
2017-03-03 17:02:53 +01:00
|
|
|
cache_name="device_update_edu",
|
|
|
|
clock=self.clock,
|
|
|
|
max_len=10000,
|
|
|
|
expiry_ms=30 * 60 * 1000,
|
|
|
|
iterable=True,
|
2021-07-16 19:22:36 +02:00
|
|
|
)
|
2017-03-01 15:12:11 +01:00
|
|
|
|
2020-05-21 17:41:12 +02:00
|
|
|
# Attempt to resync out of sync device lists every 30s.
|
|
|
|
self._resync_retry_in_progress = False
|
|
|
|
self.clock.looping_call(
|
|
|
|
run_as_background_process,
|
|
|
|
30 * 1000,
|
|
|
|
func=self._maybe_retry_device_resync,
|
|
|
|
desc="_maybe_retry_device_resync",
|
|
|
|
)
|
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
@trace
|
2020-10-07 14:58:21 +02:00
|
|
|
async def incoming_device_list_update(
|
|
|
|
self, origin: str, edu_content: JsonDict
|
|
|
|
) -> None:
|
2017-03-01 15:12:11 +01:00
|
|
|
"""Called on incoming device list update from federation. Responsible
|
|
|
|
for parsing the EDU and adding to pending updates list.
|
|
|
|
"""
|
|
|
|
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("origin", origin)
|
|
|
|
set_tag("edu_content", edu_content)
|
2017-03-01 15:12:11 +01:00
|
|
|
user_id = edu_content.pop("user_id")
|
|
|
|
device_id = edu_content.pop("device_id")
|
|
|
|
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
|
|
|
|
prev_ids = edu_content.pop("prev_id", [])
|
2019-06-20 11:32:02 +02:00
|
|
|
prev_ids = [str(p) for p in prev_ids] # They may come as ints
|
2017-01-26 17:06:54 +01:00
|
|
|
|
|
|
|
if get_domain_from_id(user_id) != origin:
|
|
|
|
# TODO: Raise?
|
2019-03-07 16:53:14 +01:00
|
|
|
logger.warning(
|
|
|
|
"Got device list update edu for %r/%r from %r",
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id,
|
|
|
|
device_id,
|
|
|
|
origin,
|
2019-03-07 16:53:14 +01:00
|
|
|
)
|
2019-09-03 11:21:30 +02:00
|
|
|
|
|
|
|
set_tag("error", True)
|
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"message": "Got a device list update edu from a user and "
|
|
|
|
"device which does not match the origin of the request.",
|
|
|
|
"user_id": user_id,
|
|
|
|
"device_id": device_id,
|
|
|
|
}
|
|
|
|
)
|
2017-01-26 17:06:54 +01:00
|
|
|
return
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
room_ids = await self.store.get_rooms_for_user(user_id)
|
2017-03-16 12:51:46 +01:00
|
|
|
if not room_ids:
|
2017-01-26 17:39:33 +01:00
|
|
|
# We don't share any rooms with this user. Ignore update, as we
|
|
|
|
# probably won't get any further updates.
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("error", True)
|
|
|
|
log_kv(
|
|
|
|
{
|
|
|
|
"message": "Got an update from a user for which "
|
|
|
|
"we don't share any rooms",
|
|
|
|
"other user_id": user_id,
|
|
|
|
}
|
|
|
|
)
|
2019-03-07 16:53:14 +01:00
|
|
|
logger.warning(
|
|
|
|
"Got device list update edu for %r/%r, but don't share a room",
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id,
|
|
|
|
device_id,
|
2019-03-07 16:53:14 +01:00
|
|
|
)
|
2017-01-26 17:39:33 +01:00
|
|
|
return
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.debug("Received device list update for %r/%r", user_id, device_id)
|
2019-03-07 16:53:14 +01:00
|
|
|
|
2017-03-01 15:12:11 +01:00
|
|
|
self._pending_updates.setdefault(user_id, []).append(
|
|
|
|
(device_id, stream_id, prev_ids, edu_content)
|
|
|
|
)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self._handle_device_updates(user_id)
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
@measure_func("_incoming_device_list_update")
|
2020-10-07 14:58:21 +02:00
|
|
|
async def _handle_device_updates(self, user_id: str) -> None:
|
2017-03-01 15:12:11 +01:00
|
|
|
"Actually handle pending updates."
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
with (await self._remote_edu_linearizer.queue(user_id)):
|
2017-03-01 15:12:11 +01:00
|
|
|
pending_updates = self._pending_updates.pop(user_id, [])
|
|
|
|
if not pending_updates:
|
|
|
|
# This can happen since we batch updates
|
|
|
|
return
|
|
|
|
|
2021-04-20 12:50:49 +02:00
|
|
|
for device_id, stream_id, prev_ids, _ in pending_updates:
|
2019-03-07 16:53:14 +01:00
|
|
|
logger.debug(
|
|
|
|
"Handling update %r/%r, ID: %r, prev: %r ",
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id,
|
|
|
|
device_id,
|
|
|
|
stream_id,
|
|
|
|
prev_ids,
|
2019-03-07 16:53:14 +01:00
|
|
|
)
|
|
|
|
|
2017-05-05 11:59:32 +02:00
|
|
|
# Given a list of updates we check if we need to resync. This
|
|
|
|
# happens if we've missed updates.
|
2020-07-17 13:09:25 +02:00
|
|
|
resync = await self._need_to_do_resync(user_id, pending_updates)
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2020-02-05 15:02:39 +01:00
|
|
|
if logger.isEnabledFor(logging.INFO):
|
|
|
|
logger.info(
|
|
|
|
"Received device list update for %s, requiring resync: %s. Devices: %s",
|
|
|
|
user_id,
|
|
|
|
resync,
|
|
|
|
", ".join(u[0] for u in pending_updates),
|
|
|
|
)
|
2019-03-07 16:53:14 +01:00
|
|
|
|
2017-01-26 17:06:54 +01:00
|
|
|
if resync:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.user_device_resync(user_id)
|
2017-01-26 17:06:54 +01:00
|
|
|
else:
|
2017-01-26 17:30:37 +01:00
|
|
|
# Simply update the single device, since we know that is the only
|
2018-07-10 18:58:09 +02:00
|
|
|
# change (because of the single prev_id matching the current cache)
|
2021-04-20 12:50:49 +02:00
|
|
|
for device_id, stream_id, _, content in pending_updates:
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.update_remote_device_list_cache_entry(
|
2019-06-20 11:32:02 +02:00
|
|
|
user_id, device_id, content, stream_id
|
2017-03-01 15:12:11 +01:00
|
|
|
)
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.device_handler.notify_device_update(
|
2017-03-01 15:12:11 +01:00
|
|
|
user_id, [device_id for device_id, _, _, _ in pending_updates]
|
2017-01-26 17:06:54 +01:00
|
|
|
)
|
|
|
|
|
2019-03-07 17:04:24 +01:00
|
|
|
self._seen_updates.setdefault(user_id, set()).update(
|
|
|
|
stream_id for _, stream_id, _, _ in pending_updates
|
|
|
|
)
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
async def _need_to_do_resync(
|
|
|
|
self, user_id: str, updates: Iterable[Tuple[str, str, Iterable[str], JsonDict]]
|
|
|
|
) -> bool:
|
2017-03-01 15:12:11 +01:00
|
|
|
"""Given a list of updates for a user figure out if we need to do a full
|
|
|
|
resync, or whether we have enough data that we can just apply the delta.
|
|
|
|
"""
|
2021-07-16 19:22:36 +02:00
|
|
|
seen_updates: Set[str] = self._seen_updates.get(user_id, set())
|
2017-01-26 17:39:33 +01:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id)
|
2016-07-20 18:58:44 +02:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.debug("Current extremity for %r: %r", user_id, extremity)
|
2019-03-07 16:53:14 +01:00
|
|
|
|
2017-03-01 15:12:11 +01:00
|
|
|
stream_id_in_updates = set() # stream_ids in updates list
|
|
|
|
for _, stream_id, prev_ids, _ in updates:
|
|
|
|
if not prev_ids:
|
|
|
|
# We always do a resync if there are no previous IDs
|
2019-07-23 15:00:55 +02:00
|
|
|
return True
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
for prev_id in prev_ids:
|
|
|
|
if prev_id == extremity:
|
|
|
|
continue
|
|
|
|
elif prev_id in seen_updates:
|
|
|
|
continue
|
|
|
|
elif prev_id in stream_id_in_updates:
|
|
|
|
continue
|
|
|
|
else:
|
2019-07-23 15:00:55 +02:00
|
|
|
return True
|
2017-03-01 15:12:11 +01:00
|
|
|
|
|
|
|
stream_id_in_updates.add(stream_id)
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return False
|
2019-07-29 17:34:44 +02:00
|
|
|
|
2020-06-15 17:42:21 +02:00
|
|
|
@trace
|
2020-10-07 14:58:21 +02:00
|
|
|
async def _maybe_retry_device_resync(self) -> None:
|
2020-05-21 17:41:12 +02:00
|
|
|
"""Retry to resync device lists that are out of sync, except if another retry is
|
|
|
|
in progress.
|
|
|
|
"""
|
|
|
|
if self._resync_retry_in_progress:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Prevent another call of this function to retry resyncing device lists so
|
|
|
|
# we don't send too many requests.
|
|
|
|
self._resync_retry_in_progress = True
|
|
|
|
# Get all of the users that need resyncing.
|
2020-07-17 13:09:25 +02:00
|
|
|
need_resync = await self.store.get_user_ids_requiring_device_list_resync()
|
2020-05-21 17:41:12 +02:00
|
|
|
# Iterate over the set of user IDs.
|
|
|
|
for user_id in need_resync:
|
2020-06-01 12:55:14 +02:00
|
|
|
try:
|
|
|
|
# Try to resync the current user's devices list.
|
2020-07-17 13:09:25 +02:00
|
|
|
result = await self.user_device_resync(
|
2021-02-16 23:32:34 +01:00
|
|
|
user_id=user_id,
|
|
|
|
mark_failed_as_stale=False,
|
2020-06-01 12:55:14 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# user_device_resync only returns a result if it managed to
|
|
|
|
# successfully resync and update the database. Updating the table
|
|
|
|
# of users requiring resync isn't necessary here as
|
|
|
|
# user_device_resync already does it (through
|
|
|
|
# self.store.update_remote_device_list_cache).
|
|
|
|
if result:
|
|
|
|
logger.debug(
|
2021-02-16 23:32:34 +01:00
|
|
|
"Successfully resynced the device list for %s",
|
|
|
|
user_id,
|
2020-06-01 12:55:14 +02:00
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
# If there was an issue resyncing this user, e.g. if the remote
|
|
|
|
# server sent a malformed result, just log the error instead of
|
|
|
|
# aborting all the subsequent resyncs.
|
2020-05-21 17:41:12 +02:00
|
|
|
logger.debug(
|
2021-02-16 23:32:34 +01:00
|
|
|
"Could not resync the device list for %s: %s",
|
|
|
|
user_id,
|
|
|
|
e,
|
2020-05-21 17:41:12 +02:00
|
|
|
)
|
|
|
|
finally:
|
|
|
|
# Allow future calls to retry resyncinc out of sync device lists.
|
|
|
|
self._resync_retry_in_progress = False
|
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def user_device_resync(
|
|
|
|
self, user_id: str, mark_failed_as_stale: bool = True
|
2020-10-07 14:58:21 +02:00
|
|
|
) -> Optional[JsonDict]:
|
2019-07-29 17:34:44 +02:00
|
|
|
"""Fetches all devices for a user and updates the device cache with them.
|
|
|
|
|
|
|
|
Args:
|
2020-07-17 13:09:25 +02:00
|
|
|
user_id: The user's id whose device_list will be updated.
|
|
|
|
mark_failed_as_stale: Whether to mark the user's device list as stale
|
2020-05-21 17:41:12 +02:00
|
|
|
if the attempt to resync failed.
|
2019-07-29 17:34:44 +02:00
|
|
|
Returns:
|
2020-07-17 13:09:25 +02:00
|
|
|
A dict with device info as under the "devices" in the result of this
|
2019-07-29 17:34:44 +02:00
|
|
|
request:
|
|
|
|
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
|
|
|
|
"""
|
2020-06-01 12:55:14 +02:00
|
|
|
logger.debug("Attempting to resync the device list for %s", user_id)
|
2019-09-03 11:21:30 +02:00
|
|
|
log_kv({"message": "Doing resync to update device list."})
|
2019-07-29 17:34:44 +02:00
|
|
|
# Fetch all devices for the user.
|
|
|
|
origin = get_domain_from_id(user_id)
|
|
|
|
try:
|
2020-07-17 13:09:25 +02:00
|
|
|
result = await self.federation.query_user_devices(origin, user_id)
|
2020-05-21 17:41:12 +02:00
|
|
|
except NotRetryingDestination:
|
|
|
|
if mark_failed_as_stale:
|
|
|
|
# Mark the remote user's device list as stale so we know we need to retry
|
|
|
|
# it later.
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.mark_remote_user_device_cache_as_stale(user_id)
|
2020-05-21 17:41:12 +02:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
return None
|
2020-05-21 17:41:12 +02:00
|
|
|
except (RequestSendFailed, HttpResponseException) as e:
|
|
|
|
logger.warning(
|
2021-02-16 23:32:34 +01:00
|
|
|
"Failed to handle device list update for %s: %s",
|
|
|
|
user_id,
|
|
|
|
e,
|
2020-05-21 17:41:12 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if mark_failed_as_stale:
|
|
|
|
# Mark the remote user's device list as stale so we know we need to retry
|
|
|
|
# it later.
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.mark_remote_user_device_cache_as_stale(user_id)
|
2020-05-21 17:41:12 +02:00
|
|
|
|
2019-07-29 17:34:44 +02:00
|
|
|
# We abort on exceptions rather than accepting the update
|
|
|
|
# as otherwise synapse will 'forget' that its device list
|
|
|
|
# is out of date. If we bail then we will retry the resync
|
|
|
|
# next time we get a device list update for this user_id.
|
|
|
|
# This makes it more likely that the device lists will
|
|
|
|
# eventually become consistent.
|
2020-10-07 14:58:21 +02:00
|
|
|
return None
|
2019-07-29 17:34:44 +02:00
|
|
|
except FederationDeniedError as e:
|
2019-09-03 11:21:30 +02:00
|
|
|
set_tag("error", True)
|
|
|
|
log_kv({"reason": "FederationDeniedError"})
|
2019-07-29 17:34:44 +02:00
|
|
|
logger.info(e)
|
2020-10-07 14:58:21 +02:00
|
|
|
return None
|
2019-09-03 11:21:30 +02:00
|
|
|
except Exception as e:
|
|
|
|
set_tag("error", True)
|
|
|
|
log_kv(
|
|
|
|
{"message": "Exception raised by federation request", "exception": e}
|
|
|
|
)
|
2019-07-29 17:34:44 +02:00
|
|
|
logger.exception("Failed to handle device list update for %s", user_id)
|
2020-05-21 17:41:12 +02:00
|
|
|
|
|
|
|
if mark_failed_as_stale:
|
|
|
|
# Mark the remote user's device list as stale so we know we need to retry
|
|
|
|
# it later.
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.mark_remote_user_device_cache_as_stale(user_id)
|
2020-05-21 17:41:12 +02:00
|
|
|
|
2020-10-07 14:58:21 +02:00
|
|
|
return None
|
2019-09-03 11:21:30 +02:00
|
|
|
log_kv({"result": result})
|
2019-07-29 17:34:44 +02:00
|
|
|
stream_id = result["stream_id"]
|
|
|
|
devices = result["devices"]
|
|
|
|
|
2020-06-01 17:47:30 +02:00
|
|
|
# Get the master key and the self-signing key for this user if provided in the
|
|
|
|
# response (None if not in the response).
|
|
|
|
# The response will not contain the user signing key, as this key is only used by
|
|
|
|
# its owner, thus it doesn't make sense to send it over federation.
|
|
|
|
master_key = result.get("master_key")
|
|
|
|
self_signing_key = result.get("self_signing_key")
|
|
|
|
|
2021-03-17 16:04:57 +01:00
|
|
|
ignore_devices = False
|
2019-07-29 17:34:44 +02:00
|
|
|
# If the remote server has more than ~1000 devices for this user
|
|
|
|
# we assume that something is going horribly wrong (e.g. a bot
|
|
|
|
# that logs in and creates a new device every time it tries to
|
|
|
|
# send a message). Maintaining lots of devices per user in the
|
|
|
|
# cache can cause serious performance issues as if this request
|
|
|
|
# takes more than 60s to complete, internal replication from the
|
|
|
|
# inbound federation worker to the synapse master may time out
|
|
|
|
# causing the inbound federation to fail and causing the remote
|
|
|
|
# server to retry, causing a DoS. So in this scenario we give
|
|
|
|
# up on storing the total list of devices and only handle the
|
|
|
|
# delta instead.
|
|
|
|
if len(devices) > 1000:
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning(
|
2019-07-29 17:34:44 +02:00
|
|
|
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
|
|
|
user_id,
|
|
|
|
len(devices),
|
|
|
|
)
|
|
|
|
devices = []
|
2021-03-17 16:04:57 +01:00
|
|
|
ignore_devices = True
|
|
|
|
else:
|
2022-01-05 14:33:28 +01:00
|
|
|
prev_stream_id = await self.store.get_device_list_last_stream_id_for_remote(
|
|
|
|
user_id
|
|
|
|
)
|
2021-03-17 16:04:57 +01:00
|
|
|
cached_devices = await self.store.get_cached_devices_for_user(user_id)
|
2022-01-05 14:33:28 +01:00
|
|
|
|
|
|
|
# To ensure that a user with no devices is cached, we skip the resync only
|
|
|
|
# if we have a stream_id from previously writing a cache entry.
|
|
|
|
if prev_stream_id is not None and cached_devices == {
|
|
|
|
d["device_id"]: d for d in devices
|
|
|
|
}:
|
2021-04-22 17:53:24 +02:00
|
|
|
logging.info(
|
|
|
|
"Skipping device list resync for %s, as our cache matches already",
|
|
|
|
user_id,
|
|
|
|
)
|
2021-03-17 16:04:57 +01:00
|
|
|
devices = []
|
|
|
|
ignore_devices = True
|
2019-07-29 17:34:44 +02:00
|
|
|
|
|
|
|
for device in devices:
|
|
|
|
logger.debug(
|
|
|
|
"Handling resync update %r/%r, ID: %r",
|
|
|
|
user_id,
|
|
|
|
device["device_id"],
|
|
|
|
stream_id,
|
|
|
|
)
|
|
|
|
|
2021-03-17 16:04:57 +01:00
|
|
|
if not ignore_devices:
|
|
|
|
await self.store.update_remote_device_list_cache(
|
|
|
|
user_id, devices, stream_id
|
|
|
|
)
|
2021-04-22 17:53:24 +02:00
|
|
|
# mark the cache as valid, whether or not we actually processed any device
|
|
|
|
# list updates.
|
|
|
|
await self.store.mark_remote_user_device_cache_as_valid(user_id)
|
2019-07-29 17:34:44 +02:00
|
|
|
device_ids = [device["device_id"] for device in devices]
|
2020-06-01 17:47:30 +02:00
|
|
|
|
|
|
|
# Handle cross-signing keys.
|
2020-07-17 13:09:25 +02:00
|
|
|
cross_signing_device_ids = await self.process_cross_signing_key_update(
|
2021-02-16 23:32:34 +01:00
|
|
|
user_id,
|
|
|
|
master_key,
|
|
|
|
self_signing_key,
|
2020-06-01 17:47:30 +02:00
|
|
|
)
|
|
|
|
device_ids = device_ids + cross_signing_device_ids
|
|
|
|
|
2021-03-17 16:04:57 +01:00
|
|
|
if device_ids:
|
|
|
|
await self.device_handler.notify_device_update(user_id, device_ids)
|
2019-07-29 17:34:44 +02:00
|
|
|
|
|
|
|
# We clobber the seen updates since we've re-synced from a given
|
|
|
|
# point.
|
2020-02-21 13:15:07 +01:00
|
|
|
self._seen_updates[user_id] = {stream_id}
|
2019-07-29 17:34:44 +02:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
return result
|
2020-06-01 17:47:30 +02:00
|
|
|
|
2020-07-17 13:09:25 +02:00
|
|
|
async def process_cross_signing_key_update(
|
2020-06-01 17:47:30 +02:00
|
|
|
self,
|
|
|
|
user_id: str,
|
2021-01-28 14:34:19 +01:00
|
|
|
master_key: Optional[JsonDict],
|
|
|
|
self_signing_key: Optional[JsonDict],
|
2020-10-07 14:58:21 +02:00
|
|
|
) -> List[str]:
|
2020-06-01 17:47:30 +02:00
|
|
|
"""Process the given new master and self-signing key for the given remote user.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
user_id: The ID of the user these keys are for.
|
|
|
|
master_key: The dict of the cross-signing master key as returned by the
|
|
|
|
remote server.
|
|
|
|
self_signing_key: The dict of the cross-signing self-signing key as returned
|
|
|
|
by the remote server.
|
|
|
|
|
|
|
|
Return:
|
|
|
|
The device IDs for the given keys.
|
|
|
|
"""
|
|
|
|
device_ids = []
|
|
|
|
|
2021-03-17 16:04:57 +01:00
|
|
|
current_keys_map = await self.store.get_e2e_cross_signing_keys_bulk([user_id])
|
|
|
|
current_keys = current_keys_map.get(user_id) or {}
|
|
|
|
|
|
|
|
if master_key and master_key != current_keys.get("master"):
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.set_e2e_cross_signing_key(user_id, "master", master_key)
|
2020-06-01 17:47:30 +02:00
|
|
|
_, verify_key = get_verify_key_from_cross_signing_key(master_key)
|
|
|
|
# verify_key is a VerifyKey from signedjson, which uses
|
|
|
|
# .version to denote the portion of the key ID after the
|
|
|
|
# algorithm and colon, which is the device ID
|
|
|
|
device_ids.append(verify_key.version)
|
2021-03-17 16:04:57 +01:00
|
|
|
if self_signing_key and self_signing_key != current_keys.get("self_signing"):
|
2020-07-17 13:09:25 +02:00
|
|
|
await self.store.set_e2e_cross_signing_key(
|
2020-06-01 17:47:30 +02:00
|
|
|
user_id, "self_signing", self_signing_key
|
|
|
|
)
|
|
|
|
_, verify_key = get_verify_key_from_cross_signing_key(self_signing_key)
|
|
|
|
device_ids.append(verify_key.version)
|
|
|
|
|
|
|
|
return device_ids
|