2017-05-31 12:51:01 +02:00
|
|
|
# Copyright 2017 Vector Creations Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
2021-01-04 16:05:12 +01:00
|
|
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2019-01-23 16:01:09 +01:00
|
|
|
import synapse.metrics
|
2020-12-16 14:46:37 +01:00
|
|
|
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
2021-09-10 11:54:38 +02:00
|
|
|
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
2018-10-23 15:29:17 +02:00
|
|
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
2022-03-18 14:51:41 +01:00
|
|
|
from synapse.storage.databases.main.user_directory import SearchResult
|
2017-05-31 12:51:01 +02:00
|
|
|
from synapse.storage.roommember import ProfileInfo
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.metrics import Measure
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
if TYPE_CHECKING:
|
2021-03-23 12:12:48 +01:00
|
|
|
from synapse.server import HomeServer
|
2021-01-04 16:05:12 +01:00
|
|
|
|
2017-05-31 12:51:01 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2019-03-25 10:37:08 +01:00
|
|
|
class UserDirectoryHandler(StateDeltasHandler):
|
2021-09-10 11:54:38 +02:00
|
|
|
"""Handles queries and updates for the user_directory.
|
2017-05-31 16:00:29 +02:00
|
|
|
|
|
|
|
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
|
2017-05-31 18:11:18 +02:00
|
|
|
|
2021-09-10 11:54:38 +02:00
|
|
|
When a local user searches the user_directory, we report two kinds of users:
|
|
|
|
|
|
|
|
- users this server can see are joined to a world_readable or publicly
|
|
|
|
joinable room, and
|
|
|
|
- users belonging to a private room shared by that local user.
|
|
|
|
|
|
|
|
The two cases are tracked separately in the `users_in_public_rooms` and
|
|
|
|
`users_who_share_private_rooms` tables. Both kinds of users have their
|
|
|
|
username and avatar tracked in a `user_directory` table.
|
|
|
|
|
|
|
|
This handler has three responsibilities:
|
|
|
|
1. Forwarding requests to `/user_directory/search` to the UserDirectoryStore.
|
|
|
|
2. Providing hooks for the application to call when local users are added,
|
|
|
|
removed, or have their profile changed.
|
|
|
|
3. Listening for room state changes that indicate remote users have
|
|
|
|
joined or left a room, or that their profile has changed.
|
2017-05-31 16:00:29 +02:00
|
|
|
"""
|
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(hs)
|
2019-03-25 10:37:08 +01:00
|
|
|
|
2022-02-23 12:04:02 +01:00
|
|
|
self.store = hs.get_datastores().main
|
2022-06-01 17:02:53 +02:00
|
|
|
self._storage_controllers = hs.get_storage_controllers()
|
2017-05-31 12:51:01 +02:00
|
|
|
self.server_name = hs.hostname
|
|
|
|
self.clock = hs.get_clock()
|
2017-06-07 13:02:53 +02:00
|
|
|
self.notifier = hs.get_notifier()
|
2017-06-15 10:59:04 +02:00
|
|
|
self.is_mine_id = hs.is_mine_id
|
2022-05-10 12:08:45 +02:00
|
|
|
self.update_user_directory = hs.config.worker.should_update_user_directory
|
2021-09-24 13:25:21 +02:00
|
|
|
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
|
2020-02-14 13:17:54 +01:00
|
|
|
self.spam_checker = hs.get_spam_checker()
|
2017-05-31 16:00:29 +02:00
|
|
|
# The current position in the current_state_delta stream
|
2021-07-16 19:22:36 +02:00
|
|
|
self.pos: Optional[int] = None
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2017-05-31 16:00:29 +02:00
|
|
|
# Guard to ensure we only process deltas one at a time
|
2017-05-31 12:51:01 +02:00
|
|
|
self._is_processing = False
|
|
|
|
|
2017-06-15 13:47:05 +02:00
|
|
|
if self.update_user_directory:
|
|
|
|
self.notifier.add_replication_callback(self.notify_new_event)
|
|
|
|
|
|
|
|
# We kick this off so that we don't have to wait for a change before
|
|
|
|
# we start populating the user directory
|
|
|
|
self.clock.call_later(0, self.notify_new_event)
|
2017-05-31 12:59:36 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
async def search_users(
|
|
|
|
self, user_id: str, search_term: str, limit: int
|
2022-03-18 14:51:41 +01:00
|
|
|
) -> SearchResult:
|
2017-05-31 16:00:29 +02:00
|
|
|
"""Searches for users in directory
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
dict of the form::
|
|
|
|
|
|
|
|
{
|
|
|
|
"limited": <bool>, # whether there were more results or not
|
|
|
|
"results": [ # Ordered by best match first
|
|
|
|
{
|
|
|
|
"user_id": <user_id>,
|
|
|
|
"display_name": <display_name>,
|
|
|
|
"avatar_url": <avatar_url>
|
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
"""
|
2020-02-14 13:17:54 +01:00
|
|
|
results = await self.store.search_user_dir(user_id, search_term, limit)
|
|
|
|
|
|
|
|
# Remove any spammy users from the results.
|
2020-12-11 20:05:15 +01:00
|
|
|
non_spammy_users = []
|
|
|
|
for user in results["results"]:
|
|
|
|
if not await self.spam_checker.check_username_for_spam(user):
|
|
|
|
non_spammy_users.append(user)
|
|
|
|
results["results"] = non_spammy_users
|
2020-02-14 13:17:54 +01:00
|
|
|
|
|
|
|
return results
|
2017-05-31 15:00:01 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
def notify_new_event(self) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Called when there may be more deltas to process"""
|
2017-06-15 13:47:05 +02:00
|
|
|
if not self.update_user_directory:
|
|
|
|
return
|
|
|
|
|
2017-05-31 12:51:01 +02:00
|
|
|
if self._is_processing:
|
|
|
|
return
|
|
|
|
|
2021-09-20 14:56:23 +02:00
|
|
|
async def process() -> None:
|
2018-10-23 15:29:17 +02:00
|
|
|
try:
|
2020-06-05 20:42:55 +02:00
|
|
|
await self._unsafe_process()
|
2018-10-23 15:29:17 +02:00
|
|
|
finally:
|
|
|
|
self._is_processing = False
|
|
|
|
|
2017-05-31 12:51:01 +02:00
|
|
|
self._is_processing = True
|
2018-10-23 15:29:17 +02:00
|
|
|
run_as_background_process("user_directory.notify_new_event", process)
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
async def handle_local_profile_change(
|
|
|
|
self, user_id: str, profile: ProfileInfo
|
|
|
|
) -> None:
|
2017-11-29 19:27:05 +01:00
|
|
|
"""Called to update index of our local user profiles when they change
|
|
|
|
irrespective of any rooms the user may be in.
|
|
|
|
"""
|
2018-08-17 16:30:31 +02:00
|
|
|
# FIXME(#3714): We should probably do this in the same worker as all
|
|
|
|
# the other changes.
|
2020-12-17 13:05:39 +01:00
|
|
|
|
2021-10-04 13:45:51 +02:00
|
|
|
if await self.store.should_include_local_user_in_dir(user_id):
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.update_profile_in_user_dir(
|
2019-03-18 18:50:24 +01:00
|
|
|
user_id, profile.display_name, profile.avatar_url
|
2018-12-14 19:20:59 +01:00
|
|
|
)
|
2017-11-29 19:27:05 +01:00
|
|
|
|
2021-09-10 11:54:38 +02:00
|
|
|
async def handle_local_user_deactivated(self, user_id: str) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Called when a user ID is deactivated"""
|
2018-08-17 16:30:31 +02:00
|
|
|
# FIXME(#3714): We should probably do this in the same worker as all
|
|
|
|
# the other changes.
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.remove_from_user_dir(user_id)
|
2018-05-24 16:59:58 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
async def _unsafe_process(self) -> None:
|
2017-05-31 16:00:29 +02:00
|
|
|
# If self.pos is None then means we haven't fetched it from DB
|
2017-05-31 12:51:01 +02:00
|
|
|
if self.pos is None:
|
2020-06-05 20:42:55 +02:00
|
|
|
self.pos = await self.store.get_user_directory_stream_pos()
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2022-01-04 17:36:33 +01:00
|
|
|
# If still None then the initial background update hasn't happened yet.
|
|
|
|
if self.pos is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
|
|
|
|
if self.pos > room_max_stream_ordering:
|
|
|
|
# apparently, we've processed more events than exist in the database!
|
|
|
|
# this can happen if events are removed with history purge or similar.
|
|
|
|
logger.warning(
|
|
|
|
"Event stream ordering appears to have gone backwards (%i -> %i): "
|
|
|
|
"rewinding user directory processor",
|
|
|
|
self.pos,
|
|
|
|
room_max_stream_ordering,
|
|
|
|
)
|
|
|
|
self.pos = room_max_stream_ordering
|
2021-02-18 14:44:19 +01:00
|
|
|
|
2017-05-31 16:00:29 +02:00
|
|
|
# Loop round handling deltas until we're up to date
|
2017-05-31 12:51:01 +02:00
|
|
|
while True:
|
|
|
|
with Measure(self.clock, "user_dir_delta"):
|
2019-10-10 12:29:01 +02:00
|
|
|
room_max_stream_ordering = self.store.get_room_max_stream_ordering()
|
|
|
|
if self.pos == room_max_stream_ordering:
|
2017-05-31 12:51:01 +02:00
|
|
|
return
|
|
|
|
|
2019-10-10 12:29:01 +02:00
|
|
|
logger.debug(
|
|
|
|
"Processing user stats %s->%s", self.pos, room_max_stream_ordering
|
|
|
|
)
|
2022-06-01 17:02:53 +02:00
|
|
|
(
|
|
|
|
max_pos,
|
|
|
|
deltas,
|
|
|
|
) = await self._storage_controllers.state.get_current_state_deltas(
|
2019-10-10 12:29:01 +02:00
|
|
|
self.pos, room_max_stream_ordering
|
|
|
|
)
|
|
|
|
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug("Handling %d state deltas", len(deltas))
|
2020-06-05 20:42:55 +02:00
|
|
|
await self._handle_deltas(deltas)
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2019-10-10 12:29:01 +02:00
|
|
|
self.pos = max_pos
|
2019-01-23 16:01:09 +01:00
|
|
|
|
|
|
|
# Expose current event processing position to prometheus
|
2019-02-13 13:05:32 +01:00
|
|
|
synapse.metrics.event_processing_positions.labels("user_dir").set(
|
2019-10-10 12:29:01 +02:00
|
|
|
max_pos
|
2019-02-13 13:05:32 +01:00
|
|
|
)
|
2019-01-23 16:01:09 +01:00
|
|
|
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.update_user_directory_stream_pos(max_pos)
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Called with the state deltas to process"""
|
2017-05-31 12:51:01 +02:00
|
|
|
for delta in deltas:
|
|
|
|
typ = delta["type"]
|
|
|
|
state_key = delta["state_key"]
|
|
|
|
room_id = delta["room_id"]
|
|
|
|
event_id = delta["event_id"]
|
|
|
|
prev_event_id = delta["prev_event_id"]
|
|
|
|
|
2017-06-01 12:09:49 +02:00
|
|
|
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
|
|
|
|
|
2017-05-31 16:00:29 +02:00
|
|
|
# For join rule and visibility changes we need to check if the room
|
|
|
|
# may have become public or not and add/remove the users in said room
|
2017-05-31 18:00:24 +02:00
|
|
|
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
|
2020-06-05 20:42:55 +02:00
|
|
|
await self._handle_room_publicity_change(
|
2019-02-13 13:05:32 +01:00
|
|
|
room_id, prev_event_id, event_id, typ
|
2017-05-31 14:30:12 +02:00
|
|
|
)
|
2017-05-31 12:51:01 +02:00
|
|
|
elif typ == EventTypes.Member:
|
2021-10-13 14:50:00 +02:00
|
|
|
await self._handle_room_membership_event(
|
|
|
|
room_id,
|
2019-02-13 13:05:32 +01:00
|
|
|
prev_event_id,
|
|
|
|
event_id,
|
2021-10-13 14:50:00 +02:00
|
|
|
state_key,
|
2017-05-31 12:51:01 +02:00
|
|
|
)
|
2017-06-01 12:09:49 +02:00
|
|
|
else:
|
|
|
|
logger.debug("Ignoring irrelevant type: %r", typ)
|
2017-05-31 12:51:01 +02:00
|
|
|
|
2020-06-05 20:42:55 +02:00
|
|
|
async def _handle_room_publicity_change(
|
2021-01-04 16:05:12 +01:00
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
prev_event_id: Optional[str],
|
|
|
|
event_id: Optional[str],
|
|
|
|
typ: str,
|
|
|
|
) -> None:
|
2020-09-01 19:02:41 +02:00
|
|
|
"""Handle a room having potentially changed from/to world_readable/publicly
|
2017-05-31 18:00:24 +02:00
|
|
|
joinable.
|
|
|
|
|
|
|
|
Args:
|
2021-01-04 16:05:12 +01:00
|
|
|
room_id: The ID of the room which changed.
|
|
|
|
prev_event_id: The previous event before the state change
|
|
|
|
event_id: The new event after the state change
|
|
|
|
typ: Type of the event
|
2017-05-31 18:00:24 +02:00
|
|
|
"""
|
2017-06-15 13:47:05 +02:00
|
|
|
logger.debug("Handling change for %s: %s", typ, room_id)
|
2017-06-01 12:09:49 +02:00
|
|
|
|
2017-05-31 18:00:24 +02:00
|
|
|
if typ == EventTypes.RoomHistoryVisibility:
|
2021-09-10 11:54:38 +02:00
|
|
|
publicness = await self._get_key_change(
|
2019-02-13 13:05:32 +01:00
|
|
|
prev_event_id,
|
|
|
|
event_id,
|
2017-05-31 18:00:24 +02:00
|
|
|
key_name="history_visibility",
|
2020-12-16 14:46:37 +01:00
|
|
|
public_value=HistoryVisibility.WORLD_READABLE,
|
2017-05-31 18:00:24 +02:00
|
|
|
)
|
|
|
|
elif typ == EventTypes.JoinRules:
|
2021-09-10 11:54:38 +02:00
|
|
|
publicness = await self._get_key_change(
|
2019-02-13 13:05:32 +01:00
|
|
|
prev_event_id,
|
|
|
|
event_id,
|
2017-06-01 12:09:49 +02:00
|
|
|
key_name="join_rule",
|
2017-05-31 18:00:24 +02:00
|
|
|
public_value=JoinRules.PUBLIC,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise Exception("Invalid event type")
|
2021-09-10 11:54:38 +02:00
|
|
|
if publicness is MatchChange.no_change:
|
2017-06-01 12:09:49 +02:00
|
|
|
logger.debug("No change")
|
2017-05-31 18:00:24 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
# There's been a change to or from being world readable.
|
|
|
|
|
2020-06-05 20:42:55 +02:00
|
|
|
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
|
2017-05-31 18:00:24 +02:00
|
|
|
room_id
|
|
|
|
)
|
|
|
|
|
2021-10-08 13:52:48 +02:00
|
|
|
logger.debug("Publicness change: %r, is_public: %r", publicness, is_public)
|
2017-06-01 12:09:49 +02:00
|
|
|
|
2021-09-10 11:54:38 +02:00
|
|
|
if publicness is MatchChange.now_true and not is_public:
|
2017-05-31 18:00:24 +02:00
|
|
|
# If we became world readable but room isn't currently public then
|
|
|
|
# we ignore the change
|
|
|
|
return
|
2021-09-10 11:54:38 +02:00
|
|
|
elif publicness is MatchChange.now_false and is_public:
|
2017-05-31 18:00:24 +02:00
|
|
|
# If we stopped being world readable but are still public,
|
|
|
|
# ignore the change
|
|
|
|
return
|
|
|
|
|
2021-10-08 13:52:48 +02:00
|
|
|
users_in_room = await self.store.get_users_in_room(room_id)
|
2019-03-07 10:22:53 +01:00
|
|
|
|
|
|
|
# Remove every user from the sharing tables for that room.
|
2021-10-08 13:52:48 +02:00
|
|
|
for user_id in users_in_room:
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.remove_user_who_share_room(user_id, room_id)
|
2019-03-07 10:22:53 +01:00
|
|
|
|
2021-10-15 16:53:05 +02:00
|
|
|
# Then, re-add all remote users and some local users to the tables.
|
2021-10-08 13:52:48 +02:00
|
|
|
# NOTE: this is not the most efficient method, as _track_user_joined_room sets
|
2019-03-07 10:22:53 +01:00
|
|
|
# up local_user -> other_user and other_user_whos_local -> local_user,
|
|
|
|
# which when ran over an entire room, will result in the same values
|
|
|
|
# being added multiple times. The batching upserts shouldn't make this
|
|
|
|
# too bad, though.
|
2021-10-08 13:52:48 +02:00
|
|
|
for user_id in users_in_room:
|
2021-10-15 16:53:05 +02:00
|
|
|
if not self.is_mine_id(
|
|
|
|
user_id
|
|
|
|
) or await self.store.should_include_local_user_in_dir(user_id):
|
|
|
|
await self._track_user_joined_room(room_id, user_id)
|
2017-11-29 19:27:05 +01:00
|
|
|
|
2021-10-13 14:50:00 +02:00
|
|
|
async def _handle_room_membership_event(
|
|
|
|
self,
|
|
|
|
room_id: str,
|
|
|
|
prev_event_id: str,
|
|
|
|
event_id: str,
|
|
|
|
state_key: str,
|
|
|
|
) -> None:
|
|
|
|
"""Process a single room membershp event.
|
|
|
|
|
|
|
|
We have to do two things:
|
|
|
|
|
|
|
|
1. Update the room-sharing tables.
|
|
|
|
This applies to remote users and non-excluded local users.
|
|
|
|
2. Update the user_directory and user_directory_search tables.
|
|
|
|
This applies to remote users only, because we only become aware of
|
|
|
|
the (and any profile changes) by listening to these events.
|
|
|
|
The rest of the application knows exactly when local users are
|
|
|
|
created or their profile changed---it will directly call methods
|
|
|
|
on this class.
|
|
|
|
"""
|
|
|
|
joined = await self._get_key_change(
|
|
|
|
prev_event_id,
|
|
|
|
event_id,
|
|
|
|
key_name="membership",
|
|
|
|
public_value=Membership.JOIN,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Both cases ignore excluded local users, so start by discarding them.
|
|
|
|
is_remote = not self.is_mine_id(state_key)
|
|
|
|
if not is_remote and not await self.store.should_include_local_user_in_dir(
|
|
|
|
state_key
|
|
|
|
):
|
|
|
|
return
|
|
|
|
|
|
|
|
if joined is MatchChange.now_false:
|
|
|
|
# Need to check if the server left the room entirely, if so
|
|
|
|
# we might need to remove all the users in that room
|
|
|
|
is_in_room = await self.store.is_host_joined(room_id, self.server_name)
|
|
|
|
if not is_in_room:
|
|
|
|
logger.debug("Server left room: %r", room_id)
|
|
|
|
# Fetch all the users that we marked as being in user
|
|
|
|
# directory due to being in the room and then check if
|
|
|
|
# need to remove those users or not
|
|
|
|
user_ids = await self.store.get_users_in_dir_due_to_room(room_id)
|
|
|
|
|
|
|
|
for user_id in user_ids:
|
|
|
|
await self._handle_remove_user(room_id, user_id)
|
|
|
|
else:
|
|
|
|
logger.debug("Server is still in room: %r", room_id)
|
|
|
|
await self._handle_remove_user(room_id, state_key)
|
|
|
|
elif joined is MatchChange.no_change:
|
|
|
|
# Handle any profile changes for remote users.
|
|
|
|
# (For local users the rest of the application calls
|
|
|
|
# `handle_local_profile_change`.)
|
|
|
|
if is_remote:
|
|
|
|
await self._handle_possible_remote_profile_change(
|
|
|
|
state_key, room_id, prev_event_id, event_id
|
|
|
|
)
|
|
|
|
elif joined is MatchChange.now_true: # The user joined
|
|
|
|
# This may be the first time we've seen a remote user. If
|
|
|
|
# so, ensure we have a directory entry for them. (For local users,
|
|
|
|
# the rest of the application calls `handle_local_profile_change`.)
|
|
|
|
if is_remote:
|
|
|
|
await self._upsert_directory_entry_for_remote_user(state_key, event_id)
|
|
|
|
await self._track_user_joined_room(room_id, state_key)
|
|
|
|
|
2021-10-08 13:52:48 +02:00
|
|
|
async def _upsert_directory_entry_for_remote_user(
|
|
|
|
self, user_id: str, event_id: str
|
2021-01-04 16:05:12 +01:00
|
|
|
) -> None:
|
2021-10-08 13:52:48 +02:00
|
|
|
"""A remote user has just joined a room. Ensure they have an entry in
|
|
|
|
the user directory. The caller is responsible for making sure they're
|
|
|
|
remote.
|
2017-05-31 16:00:29 +02:00
|
|
|
"""
|
2021-10-08 13:52:48 +02:00
|
|
|
event = await self.store.get_event(event_id, allow_none=True)
|
|
|
|
# It isn't expected for this event to not exist, but we
|
|
|
|
# don't want the entire background process to break.
|
|
|
|
if event is None:
|
|
|
|
return
|
|
|
|
|
2017-11-29 19:27:05 +01:00
|
|
|
logger.debug("Adding new user to dir, %r", user_id)
|
2017-06-01 15:50:46 +02:00
|
|
|
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.update_profile_in_user_dir(
|
2021-10-08 13:52:48 +02:00
|
|
|
user_id, event.content.get("displayname"), event.content.get("avatar_url")
|
2019-03-18 18:50:24 +01:00
|
|
|
)
|
2017-06-01 15:50:46 +02:00
|
|
|
|
2021-10-08 13:52:48 +02:00
|
|
|
async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
|
|
|
|
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
|
|
|
`users_who_share_private_rooms` as appropriate.
|
|
|
|
|
2021-10-15 16:53:05 +02:00
|
|
|
The caller is responsible for ensuring that the given user should be
|
|
|
|
included in the user directory.
|
2021-10-08 13:52:48 +02:00
|
|
|
"""
|
2020-06-05 20:42:55 +02:00
|
|
|
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
|
2017-06-01 15:50:46 +02:00
|
|
|
room_id
|
|
|
|
)
|
2019-03-12 11:47:14 +01:00
|
|
|
if is_public:
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.add_users_in_public_rooms(room_id, (user_id,))
|
2019-03-12 11:47:14 +01:00
|
|
|
else:
|
2021-10-21 18:48:59 +02:00
|
|
|
users_in_room = await self.store.get_users_in_room(room_id)
|
|
|
|
other_users_in_room = [
|
|
|
|
other
|
|
|
|
for other in users_in_room
|
|
|
|
if other != user_id
|
|
|
|
and (
|
|
|
|
not self.is_mine_id(other)
|
|
|
|
or await self.store.should_include_local_user_in_dir(other)
|
|
|
|
)
|
|
|
|
]
|
2019-03-12 11:47:14 +01:00
|
|
|
to_insert = set()
|
2017-06-21 13:00:41 +02:00
|
|
|
|
2019-03-12 11:47:14 +01:00
|
|
|
# First, if they're our user then we need to update for every user
|
|
|
|
if self.is_mine_id(user_id):
|
2021-10-21 18:48:59 +02:00
|
|
|
for other_user_id in other_users_in_room:
|
|
|
|
to_insert.add((user_id, other_user_id))
|
2017-06-15 11:15:00 +02:00
|
|
|
|
2019-03-12 11:47:14 +01:00
|
|
|
# Next we need to update for every local user in the room
|
2021-04-16 19:17:18 +02:00
|
|
|
for other_user_id in other_users_in_room:
|
2021-10-21 18:48:59 +02:00
|
|
|
if self.is_mine_id(other_user_id):
|
2019-03-12 11:47:14 +01:00
|
|
|
to_insert.add((other_user_id, user_id))
|
2017-06-15 11:15:00 +02:00
|
|
|
|
2019-03-12 11:47:14 +01:00
|
|
|
if to_insert:
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.add_users_who_share_private_room(room_id, to_insert)
|
2017-06-15 11:15:00 +02:00
|
|
|
|
2021-01-04 16:05:12 +01:00
|
|
|
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
|
2021-10-13 14:50:00 +02:00
|
|
|
"""Called when when someone leaves a room. The user may be local or remote.
|
|
|
|
|
|
|
|
(If the person who left was the last local user in this room, the server
|
|
|
|
is no longer in the room. We call this function to forget that the remaining
|
|
|
|
remote users are in the room, even though they haven't left. So the name is
|
|
|
|
a little misleading!)
|
2017-05-31 16:00:29 +02:00
|
|
|
|
|
|
|
Args:
|
2021-01-04 16:05:12 +01:00
|
|
|
room_id: The room ID that user left or stopped being public that
|
|
|
|
user_id
|
2017-05-31 16:00:29 +02:00
|
|
|
"""
|
2021-10-18 15:20:04 +02:00
|
|
|
logger.debug("Removing user %r from room %r", user_id, room_id)
|
2017-05-31 16:00:29 +02:00
|
|
|
|
2019-03-07 10:22:53 +01:00
|
|
|
# Remove user from sharing tables
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.remove_user_who_share_room(user_id, room_id)
|
2017-06-01 15:50:46 +02:00
|
|
|
|
2021-10-18 15:20:04 +02:00
|
|
|
# Additionally, if they're a remote user and we're no longer joined
|
|
|
|
# to any rooms they're in, remove them from the user directory.
|
|
|
|
if not self.is_mine_id(user_id):
|
|
|
|
rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id)
|
2017-06-01 15:50:46 +02:00
|
|
|
|
2021-10-18 15:20:04 +02:00
|
|
|
if len(rooms_user_is_in) == 0:
|
|
|
|
logger.debug("Removing user %r from directory", user_id)
|
|
|
|
await self.store.remove_from_user_dir(user_id)
|
2017-06-15 11:15:00 +02:00
|
|
|
|
2021-10-13 14:50:00 +02:00
|
|
|
async def _handle_possible_remote_profile_change(
|
2021-01-04 16:05:12 +01:00
|
|
|
self,
|
|
|
|
user_id: str,
|
|
|
|
room_id: str,
|
|
|
|
prev_event_id: Optional[str],
|
|
|
|
event_id: Optional[str],
|
|
|
|
) -> None:
|
2017-06-01 16:39:51 +02:00
|
|
|
"""Check member event changes for any profile changes and update the
|
2021-10-13 14:50:00 +02:00
|
|
|
database if there are. This is intended for remote users only. The caller
|
|
|
|
is responsible for checking that the given user is remote.
|
2017-06-01 16:39:51 +02:00
|
|
|
"""
|
|
|
|
if not prev_event_id or not event_id:
|
|
|
|
return
|
|
|
|
|
2020-06-05 20:42:55 +02:00
|
|
|
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
|
|
|
|
event = await self.store.get_event(event_id, allow_none=True)
|
2017-06-13 11:19:26 +02:00
|
|
|
|
|
|
|
if not prev_event or not event:
|
|
|
|
return
|
2017-06-01 16:39:51 +02:00
|
|
|
|
|
|
|
if event.membership != Membership.JOIN:
|
|
|
|
return
|
|
|
|
|
|
|
|
prev_name = prev_event.content.get("displayname")
|
|
|
|
new_name = event.content.get("displayname")
|
2020-09-01 19:02:41 +02:00
|
|
|
# If the new name is an unexpected form, do not update the directory.
|
|
|
|
if not isinstance(new_name, str):
|
|
|
|
new_name = prev_name
|
2017-06-01 16:39:51 +02:00
|
|
|
|
|
|
|
prev_avatar = prev_event.content.get("avatar_url")
|
|
|
|
new_avatar = event.content.get("avatar_url")
|
2020-09-01 19:02:41 +02:00
|
|
|
# If the new avatar is an unexpected form, do not update the directory.
|
|
|
|
if not isinstance(new_avatar, str):
|
|
|
|
new_avatar = prev_avatar
|
2017-06-01 16:39:51 +02:00
|
|
|
|
|
|
|
if prev_name != new_name or prev_avatar != new_avatar:
|
2020-06-05 20:42:55 +02:00
|
|
|
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
|