2016-05-11 14:42:37 +02:00
|
|
|
# Copyright 2014 - 2016 OpenMarket Ltd
|
2022-03-31 19:39:34 +02:00
|
|
|
# Copyright (C) The Matrix.org Foundation C.I.C. 2022
|
2016-05-11 14:42:37 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2018-06-07 12:37:10 +02:00
|
|
|
import logging
|
2022-03-31 19:39:34 +02:00
|
|
|
from typing import Collection, Dict, FrozenSet, List, Optional, Tuple
|
|
|
|
|
|
|
|
from typing_extensions import Final
|
2018-06-07 12:37:10 +02:00
|
|
|
|
2022-03-15 19:06:05 +01:00
|
|
|
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
|
2021-03-24 11:49:01 +01:00
|
|
|
from synapse.events import EventBase
|
2018-06-25 14:42:55 +02:00
|
|
|
from synapse.events.utils import prune_event
|
2022-05-31 14:17:50 +02:00
|
|
|
from synapse.storage.controllers import StorageControllers
|
2018-10-25 18:49:55 +02:00
|
|
|
from synapse.storage.state import StateFilter
|
2022-05-23 19:18:23 +02:00
|
|
|
from synapse.types import RetentionPolicy, StateMap, get_domain_from_id
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2020-12-16 14:46:37 +01:00
|
|
|
VISIBILITY_PRIORITY = (
|
|
|
|
HistoryVisibility.WORLD_READABLE,
|
|
|
|
HistoryVisibility.SHARED,
|
|
|
|
HistoryVisibility.INVITED,
|
|
|
|
HistoryVisibility.JOINED,
|
|
|
|
)
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
|
|
|
|
MEMBERSHIP_PRIORITY = (
|
|
|
|
Membership.JOIN,
|
|
|
|
Membership.INVITE,
|
|
|
|
Membership.KNOCK,
|
|
|
|
Membership.LEAVE,
|
|
|
|
Membership.BAN,
|
|
|
|
)
|
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
_HISTORY_VIS_KEY: Final[Tuple[str, str]] = (EventTypes.RoomHistoryVisibility, "")
|
|
|
|
|
2016-05-11 14:42:37 +02:00
|
|
|
|
2020-07-27 18:32:08 +02:00
|
|
|
async def filter_events_for_client(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage: StorageControllers,
|
2021-03-24 11:49:01 +01:00
|
|
|
user_id: str,
|
|
|
|
events: List[EventBase],
|
|
|
|
is_peeking: bool = False,
|
|
|
|
always_include_ids: FrozenSet[str] = frozenset(),
|
|
|
|
filter_send_to_client: bool = True,
|
|
|
|
) -> List[EventBase]:
|
2018-06-07 12:37:10 +02:00
|
|
|
"""
|
2019-12-16 13:15:37 +01:00
|
|
|
Check which events a user is allowed to see. If the user can see the event but its
|
|
|
|
sender asked for their data to be erased, prune the content of the event.
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
Args:
|
2019-10-23 18:25:54 +02:00
|
|
|
storage
|
2021-03-24 11:49:01 +01:00
|
|
|
user_id: user id to be checked
|
|
|
|
events: sequence of events to be checked
|
|
|
|
is_peeking: should be True if:
|
2018-06-07 12:37:10 +02:00
|
|
|
* the user is not currently a member of the room, and:
|
|
|
|
* the user has not been a member of the room since the given
|
|
|
|
events
|
2021-03-24 11:49:01 +01:00
|
|
|
always_include_ids: set of event ids to specifically
|
2017-09-18 18:13:03 +02:00
|
|
|
include (unless sender is ignored)
|
2021-03-24 11:49:01 +01:00
|
|
|
filter_send_to_client: Whether we're checking an event that's going to be
|
2020-03-11 16:21:25 +01:00
|
|
|
sent to a client. This might not always be the case since this function can
|
|
|
|
also be called to check whether a user can see the state at a given point.
|
2018-06-07 12:37:10 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-03-24 11:49:01 +01:00
|
|
|
The filtered events.
|
2016-05-11 14:42:37 +02:00
|
|
|
"""
|
2019-02-12 11:31:21 +01:00
|
|
|
# Filter out events that have been soft failed so that we don't relay them
|
|
|
|
# to clients.
|
2020-02-21 13:15:07 +01:00
|
|
|
events = [e for e in events if not e.internal_metadata.is_soft_failed()]
|
2019-02-12 11:31:21 +01:00
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id))
|
2021-01-18 15:02:22 +01:00
|
|
|
|
2022-03-04 11:31:19 +01:00
|
|
|
# we exclude outliers at this point, and then handle them separately later
|
2020-07-27 18:32:08 +02:00
|
|
|
event_id_to_state = await storage.state.get_state_for_events(
|
2022-03-04 11:31:19 +01:00
|
|
|
frozenset(e.event_id for e in events if not e.internal_metadata.outlier),
|
2018-10-25 18:49:55 +02:00
|
|
|
state_filter=StateFilter.from_types(types),
|
2018-06-07 12:37:10 +02:00
|
|
|
)
|
|
|
|
|
2022-03-15 19:06:05 +01:00
|
|
|
# Get the users who are ignored by the requesting user.
|
|
|
|
ignore_list = await storage.main.ignored_users(user_id)
|
2016-05-11 14:42:37 +02:00
|
|
|
|
2021-07-19 16:28:05 +02:00
|
|
|
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
2018-06-25 14:42:55 +02:00
|
|
|
|
2020-03-11 16:32:07 +01:00
|
|
|
if filter_send_to_client:
|
2020-02-21 13:15:07 +01:00
|
|
|
room_ids = {e.room_id for e in events}
|
2022-05-23 19:18:23 +02:00
|
|
|
retention_policies: Dict[str, RetentionPolicy] = {}
|
2019-11-04 18:09:22 +01:00
|
|
|
|
2019-11-28 20:26:13 +01:00
|
|
|
for room_id in room_ids:
|
2019-11-28 21:35:22 +01:00
|
|
|
retention_policies[
|
|
|
|
room_id
|
2020-07-27 18:32:08 +02:00
|
|
|
] = await storage.main.get_retention_policy_for_room(room_id)
|
2019-11-04 18:09:22 +01:00
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
def allowed(event: EventBase) -> Optional[EventBase]:
|
2016-05-11 14:42:37 +02:00
|
|
|
"""
|
|
|
|
Args:
|
2021-03-24 11:49:01 +01:00
|
|
|
event: event to check
|
2018-06-25 14:42:55 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-03-24 11:49:01 +01:00
|
|
|
None if the user cannot see this event at all
|
2018-06-25 14:42:55 +02:00
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
a redacted copy of the event if they can only see a redacted
|
|
|
|
version
|
2018-06-25 14:42:55 +02:00
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
the original event if they can see it as normal.
|
2016-05-11 14:42:37 +02:00
|
|
|
"""
|
2020-03-11 19:49:41 +01:00
|
|
|
# Only run some checks if these events aren't about to be sent to clients. This is
|
|
|
|
# because, if this is not the case, we're probably only checking if the users can
|
|
|
|
# see events in the room at that point in the DAG, and that shouldn't be decided
|
|
|
|
# on those checks.
|
|
|
|
if filter_send_to_client:
|
2020-12-18 10:49:18 +01:00
|
|
|
if event.type == EventTypes.Dummy:
|
2020-03-11 19:49:41 +01:00
|
|
|
return None
|
|
|
|
|
|
|
|
if not event.is_state() and event.sender in ignore_list:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Until MSC2261 has landed we can't redact malicious alias events, so for
|
|
|
|
# now we temporarily filter out m.room.aliases entirely to mitigate
|
|
|
|
# abuse, while we spec a better solution to advertising aliases
|
|
|
|
# on rooms.
|
|
|
|
if event.type == EventTypes.Aliases:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# Don't try to apply the room's retention policy if the event is a state
|
|
|
|
# event, as MSC1763 states that retention is only considered for non-state
|
|
|
|
# events.
|
|
|
|
if not event.is_state():
|
|
|
|
retention_policy = retention_policies[event.room_id]
|
2022-05-23 19:18:23 +02:00
|
|
|
max_lifetime = retention_policy.max_lifetime
|
2020-03-11 19:49:41 +01:00
|
|
|
|
|
|
|
if max_lifetime is not None:
|
|
|
|
oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
|
|
|
|
|
|
|
|
if event.origin_server_ts < oldest_allowed_ts:
|
|
|
|
return None
|
2019-11-04 18:09:22 +01:00
|
|
|
|
2017-09-18 18:13:03 +02:00
|
|
|
if event.event_id in always_include_ids:
|
2018-06-25 14:42:55 +02:00
|
|
|
return event
|
2017-09-18 18:13:03 +02:00
|
|
|
|
2022-03-04 11:31:19 +01:00
|
|
|
# we need to handle outliers separately, since we don't have the room state.
|
|
|
|
if event.internal_metadata.outlier:
|
|
|
|
# Normally these can't be seen by clients, but we make an exception for
|
|
|
|
# for out-of-band membership events (eg, incoming invites, or rejections of
|
|
|
|
# said invite) for the user themselves.
|
|
|
|
if event.type == EventTypes.Member and event.state_key == user_id:
|
|
|
|
logger.debug("Returning out-of-band-membership event %s", event)
|
|
|
|
return event
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2016-05-11 14:42:37 +02:00
|
|
|
state = event_id_to_state[event.event_id]
|
|
|
|
|
|
|
|
# get the room_visibility at the time of the event.
|
2022-06-01 13:29:51 +02:00
|
|
|
visibility = get_effective_room_visibility_from_state(state)
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
# Always allow history visibility events on boundaries. This is done
|
|
|
|
# by setting the effective visibility to the least restrictive
|
|
|
|
# of the old vs new.
|
|
|
|
if event.type == EventTypes.RoomHistoryVisibility:
|
|
|
|
prev_content = event.unsigned.get("prev_content", {})
|
|
|
|
prev_visibility = prev_content.get("history_visibility", None)
|
|
|
|
|
|
|
|
if prev_visibility not in VISIBILITY_PRIORITY:
|
2020-12-16 14:46:37 +01:00
|
|
|
prev_visibility = HistoryVisibility.SHARED
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
new_priority = VISIBILITY_PRIORITY.index(visibility)
|
|
|
|
old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
|
|
|
|
if old_priority < new_priority:
|
|
|
|
visibility = prev_visibility
|
|
|
|
|
|
|
|
# likewise, if the event is the user's own membership event, use
|
|
|
|
# the 'most joined' membership
|
|
|
|
membership = None
|
|
|
|
if event.type == EventTypes.Member and event.state_key == user_id:
|
|
|
|
membership = event.content.get("membership", None)
|
|
|
|
if membership not in MEMBERSHIP_PRIORITY:
|
|
|
|
membership = "leave"
|
|
|
|
|
|
|
|
prev_content = event.unsigned.get("prev_content", {})
|
|
|
|
prev_membership = prev_content.get("membership", None)
|
|
|
|
if prev_membership not in MEMBERSHIP_PRIORITY:
|
|
|
|
prev_membership = "leave"
|
|
|
|
|
2017-03-23 19:50:31 +01:00
|
|
|
# Always allow the user to see their own leave events, otherwise
|
|
|
|
# they won't see the room disappear if they reject the invite
|
2022-03-04 11:31:19 +01:00
|
|
|
#
|
|
|
|
# (Note this doesn't work for out-of-band invite rejections, which don't
|
|
|
|
# have prev_state populated. They are handled above in the outlier code.)
|
2017-03-23 19:50:31 +01:00
|
|
|
if membership == "leave" and (
|
|
|
|
prev_membership == "join" or prev_membership == "invite"
|
|
|
|
):
|
2018-06-25 14:42:55 +02:00
|
|
|
return event
|
2017-03-23 19:50:31 +01:00
|
|
|
|
2016-05-11 14:42:37 +02:00
|
|
|
new_priority = MEMBERSHIP_PRIORITY.index(membership)
|
|
|
|
old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
|
|
|
|
if old_priority < new_priority:
|
|
|
|
membership = prev_membership
|
|
|
|
|
|
|
|
# otherwise, get the user's membership at the time of the event.
|
|
|
|
if membership is None:
|
|
|
|
membership_event = state.get((EventTypes.Member, user_id), None)
|
|
|
|
if membership_event:
|
2018-07-23 18:13:34 +02:00
|
|
|
membership = membership_event.membership
|
2016-05-11 14:42:37 +02:00
|
|
|
|
|
|
|
# if the user was a member of the room at the time of the event,
|
|
|
|
# they can see it.
|
|
|
|
if membership == Membership.JOIN:
|
2018-06-25 14:42:55 +02:00
|
|
|
return event
|
|
|
|
|
|
|
|
# otherwise, it depends on the room visibility.
|
2016-05-11 14:42:37 +02:00
|
|
|
|
2020-12-16 14:46:37 +01:00
|
|
|
if visibility == HistoryVisibility.JOINED:
|
2016-05-11 14:42:37 +02:00
|
|
|
# we weren't a member at the time of the event, so we can't
|
|
|
|
# see this event.
|
2018-06-25 14:42:55 +02:00
|
|
|
return None
|
2016-05-11 14:42:37 +02:00
|
|
|
|
2020-12-16 14:46:37 +01:00
|
|
|
elif visibility == HistoryVisibility.INVITED:
|
2016-05-11 14:42:37 +02:00
|
|
|
# user can also see the event if they were *invited* at the time
|
|
|
|
# of the event.
|
2019-06-20 11:32:02 +02:00
|
|
|
return event if membership == Membership.INVITE else None
|
2018-06-25 14:42:55 +02:00
|
|
|
|
2020-12-16 14:46:37 +01:00
|
|
|
elif visibility == HistoryVisibility.SHARED and is_peeking:
|
2018-06-25 14:42:55 +02:00
|
|
|
# if the visibility is shared, users cannot see the event unless
|
2021-02-03 13:24:53 +01:00
|
|
|
# they have *subsequently* joined the room (or were members at the
|
2018-06-25 14:42:55 +02:00
|
|
|
# time, of course)
|
2016-05-11 14:42:37 +02:00
|
|
|
#
|
|
|
|
# XXX: if the user has subsequently joined and then left again,
|
|
|
|
# ideally we would share history up to the point they left. But
|
2018-06-25 14:42:55 +02:00
|
|
|
# we don't know when they left. We just treat it as though they
|
|
|
|
# never joined, and restrict access.
|
|
|
|
return None
|
|
|
|
|
|
|
|
# the visibility is either shared or world_readable, and the user was
|
|
|
|
# not a member at the time. We allow it, provided the original sender
|
|
|
|
# has not requested their data to be erased, in which case, we return
|
|
|
|
# a redacted version.
|
|
|
|
if erased_senders[event.sender]:
|
|
|
|
return prune_event(event)
|
|
|
|
|
|
|
|
return event
|
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
# Check each event: gives an iterable of None or (a potentially modified)
|
|
|
|
# EventBase.
|
2018-07-21 07:47:18 +02:00
|
|
|
filtered_events = map(allowed, events)
|
2018-06-25 14:42:55 +02:00
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
# Turn it into a list and remove None entries before returning.
|
|
|
|
return [ev for ev in filtered_events if ev]
|
2018-07-16 12:38:45 +02:00
|
|
|
|
|
|
|
|
2022-06-01 13:29:51 +02:00
|
|
|
def get_effective_room_visibility_from_state(state: StateMap[EventBase]) -> str:
|
|
|
|
"""Get the actual history vis, from a state map including the history_visibility event
|
|
|
|
|
|
|
|
Handles missing and invalid history visibility events.
|
|
|
|
"""
|
|
|
|
visibility_event = state.get(_HISTORY_VIS_KEY, None)
|
|
|
|
if not visibility_event:
|
|
|
|
return HistoryVisibility.SHARED
|
|
|
|
|
|
|
|
visibility = visibility_event.content.get(
|
|
|
|
"history_visibility", HistoryVisibility.SHARED
|
|
|
|
)
|
|
|
|
if visibility not in VISIBILITY_PRIORITY:
|
|
|
|
visibility = HistoryVisibility.SHARED
|
|
|
|
return visibility
|
|
|
|
|
|
|
|
|
2020-07-27 18:32:08 +02:00
|
|
|
async def filter_events_for_server(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage: StorageControllers,
|
2021-03-24 11:49:01 +01:00
|
|
|
server_name: str,
|
|
|
|
events: List[EventBase],
|
|
|
|
redact: bool = True,
|
|
|
|
check_history_visibility_only: bool = False,
|
|
|
|
) -> List[EventBase]:
|
2019-02-20 17:54:35 +01:00
|
|
|
"""Filter a list of events based on whether given server is allowed to
|
|
|
|
see them.
|
|
|
|
|
|
|
|
Args:
|
2019-10-23 18:25:54 +02:00
|
|
|
storage
|
2021-03-24 11:49:01 +01:00
|
|
|
server_name
|
|
|
|
events
|
|
|
|
redact: Whether to return a redacted version of the event, or
|
2019-02-20 17:54:35 +01:00
|
|
|
to filter them out entirely.
|
2021-03-24 11:49:01 +01:00
|
|
|
check_history_visibility_only: Whether to only check the
|
2019-03-04 15:34:34 +01:00
|
|
|
history visibility, rather than things like if the sender has been
|
|
|
|
erased. This is used e.g. during pagination to decide whether to
|
|
|
|
backfill or not.
|
2019-02-20 17:54:35 +01:00
|
|
|
|
|
|
|
Returns
|
2021-03-24 11:49:01 +01:00
|
|
|
The filtered events.
|
2019-02-20 17:54:35 +01:00
|
|
|
"""
|
2018-07-16 16:22:27 +02:00
|
|
|
|
2021-03-24 11:49:01 +01:00
|
|
|
def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool:
|
2019-03-04 17:04:04 +01:00
|
|
|
if erased_senders and erased_senders[event.sender]:
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.info("Sender of %s has been erased, redacting", event.event_id)
|
2019-03-04 15:34:34 +01:00
|
|
|
return True
|
|
|
|
return False
|
2018-07-16 16:22:27 +02:00
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
def check_event_is_visible(
|
|
|
|
visibility: str, memberships: StateMap[EventBase]
|
|
|
|
) -> bool:
|
|
|
|
if visibility not in (HistoryVisibility.INVITED, HistoryVisibility.JOINED):
|
|
|
|
return True
|
2018-07-16 12:38:45 +02:00
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
# We now loop through all membership events looking for
|
|
|
|
# membership states for the requesting server to determine
|
|
|
|
# if the server is either in the room or has been invited
|
|
|
|
# into the room.
|
|
|
|
for ev in memberships.values():
|
|
|
|
assert get_domain_from_id(ev.state_key) == server_name
|
|
|
|
|
|
|
|
memtype = ev.membership
|
|
|
|
if memtype == Membership.JOIN:
|
|
|
|
return True
|
|
|
|
elif memtype == Membership.INVITE:
|
|
|
|
if visibility == HistoryVisibility.INVITED:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# server has no users in the room: redact
|
|
|
|
return False
|
2018-07-16 12:38:45 +02:00
|
|
|
|
2019-03-04 15:34:34 +01:00
|
|
|
if not check_history_visibility_only:
|
2021-07-19 16:28:05 +02:00
|
|
|
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
|
2019-03-04 15:34:34 +01:00
|
|
|
else:
|
|
|
|
# We don't want to check whether users are erased, which is equivalent
|
|
|
|
# to no users having been erased.
|
|
|
|
erased_senders = {}
|
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
# Let's check to see if all the events have a history visibility
|
|
|
|
# of "shared" or "world_readable". If that's the case then we don't
|
|
|
|
# need to check membership (as we know the server is in the room).
|
|
|
|
event_to_history_vis = await _event_to_history_vis(storage, events)
|
|
|
|
|
|
|
|
# for any with restricted vis, we also need the memberships
|
|
|
|
event_to_memberships = await _event_to_memberships(
|
|
|
|
storage,
|
|
|
|
[
|
|
|
|
e
|
|
|
|
for e in events
|
|
|
|
if event_to_history_vis[e.event_id]
|
|
|
|
not in (HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
|
|
|
|
],
|
|
|
|
server_name,
|
|
|
|
)
|
|
|
|
|
|
|
|
to_return = []
|
|
|
|
for e in events:
|
|
|
|
erased = is_sender_erased(e, erased_senders)
|
|
|
|
visible = check_event_is_visible(
|
|
|
|
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
|
|
|
|
)
|
|
|
|
if visible and not erased:
|
|
|
|
to_return.append(e)
|
|
|
|
elif redact:
|
|
|
|
to_return.append(prune_event(e))
|
|
|
|
|
|
|
|
return to_return
|
|
|
|
|
|
|
|
|
|
|
|
async def _event_to_history_vis(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage: StorageControllers, events: Collection[EventBase]
|
2022-03-31 19:39:34 +02:00
|
|
|
) -> Dict[str, str]:
|
|
|
|
"""Get the history visibility at each of the given events
|
|
|
|
|
|
|
|
Returns a map from event id to history_visibility setting
|
|
|
|
"""
|
|
|
|
|
|
|
|
# outliers get special treatment here. We don't have the state at that point in the
|
|
|
|
# room (and attempting to look it up will raise an exception), so all we can really
|
|
|
|
# do is assume that the requesting server is allowed to see the event. That's
|
|
|
|
# equivalent to there not being a history_visibility event, so we just exclude
|
|
|
|
# any outliers from the query.
|
|
|
|
event_to_state_ids = await storage.state.get_state_ids_for_events(
|
|
|
|
frozenset(e.event_id for e in events if not e.internal_metadata.is_outlier()),
|
|
|
|
state_filter=StateFilter.from_types(types=(_HISTORY_VIS_KEY,)),
|
|
|
|
)
|
|
|
|
|
|
|
|
visibility_ids = {
|
|
|
|
vis_event_id
|
|
|
|
for vis_event_id in (
|
|
|
|
state_ids.get(_HISTORY_VIS_KEY) for state_ids in event_to_state_ids.values()
|
|
|
|
)
|
|
|
|
if vis_event_id
|
|
|
|
}
|
|
|
|
vis_events = await storage.main.get_events(visibility_ids)
|
|
|
|
|
|
|
|
result: Dict[str, str] = {}
|
|
|
|
for event in events:
|
|
|
|
vis = HistoryVisibility.SHARED
|
|
|
|
state_ids = event_to_state_ids.get(event.event_id)
|
|
|
|
|
|
|
|
# if we didn't find any state for this event, it's an outlier, and we assume
|
|
|
|
# it's open
|
|
|
|
visibility_id = None
|
|
|
|
if state_ids:
|
|
|
|
visibility_id = state_ids.get(_HISTORY_VIS_KEY)
|
|
|
|
|
|
|
|
if visibility_id:
|
|
|
|
vis_event = vis_events[visibility_id]
|
|
|
|
vis = vis_event.content.get("history_visibility", HistoryVisibility.SHARED)
|
|
|
|
assert isinstance(vis, str)
|
|
|
|
|
|
|
|
result[event.event_id] = vis
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
async def _event_to_memberships(
|
2022-05-31 14:17:50 +02:00
|
|
|
storage: StorageControllers, events: Collection[EventBase], server_name: str
|
2022-03-31 19:39:34 +02:00
|
|
|
) -> Dict[str, StateMap[EventBase]]:
|
|
|
|
"""Get the remote membership list at each of the given events
|
|
|
|
|
|
|
|
Returns a map from event id to state map, which will contain only membership events
|
|
|
|
for the given server.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not events:
|
|
|
|
return {}
|
|
|
|
|
|
|
|
# for each event, get the event_ids of the membership state at those events.
|
2022-04-26 11:27:11 +02:00
|
|
|
#
|
|
|
|
# TODO: this means that we request the entire membership list. If there are only
|
|
|
|
# one or two users on this server, and the room is huge, this is very wasteful
|
|
|
|
# (it means more db work, and churns the *stateGroupMembersCache*).
|
|
|
|
# It might be that we could extend StateFilter to specify "give me keys matching
|
|
|
|
# *:<server_name>", to avoid this.
|
|
|
|
|
2020-07-27 18:32:08 +02:00
|
|
|
event_to_state_ids = await storage.state.get_state_ids_for_events(
|
2018-07-16 12:38:45 +02:00
|
|
|
frozenset(e.event_id for e in events),
|
2022-03-31 19:39:34 +02:00
|
|
|
state_filter=StateFilter.from_types(types=((EventTypes.Member, None),)),
|
2018-07-16 12:38:45 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# We only want to pull out member events that correspond to the
|
|
|
|
# server's domain.
|
2018-07-13 17:32:46 +02:00
|
|
|
#
|
|
|
|
# event_to_state_ids contains lots of duplicates, so it turns out to be
|
2018-10-08 14:44:58 +02:00
|
|
|
# cheaper to build a complete event_id => (type, state_key) dict, and then
|
|
|
|
# filter out the ones we don't want
|
2018-07-13 17:32:46 +02:00
|
|
|
#
|
2018-10-08 14:44:58 +02:00
|
|
|
event_id_to_state_key = {
|
|
|
|
event_id: key
|
2020-06-15 13:03:36 +02:00
|
|
|
for key_to_eid in event_to_state_ids.values()
|
|
|
|
for key, event_id in key_to_eid.items()
|
2018-07-13 17:32:46 +02:00
|
|
|
}
|
2018-07-16 12:38:45 +02:00
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
def include(state_key: str) -> bool:
|
2018-07-17 12:13:57 +02:00
|
|
|
# we avoid using get_domain_from_id here for efficiency.
|
2018-07-13 17:32:46 +02:00
|
|
|
idx = state_key.find(":")
|
|
|
|
if idx == -1:
|
2018-07-16 12:38:45 +02:00
|
|
|
return False
|
2019-06-20 11:32:02 +02:00
|
|
|
return state_key[idx + 1 :] == server_name
|
|
|
|
|
2020-07-27 18:32:08 +02:00
|
|
|
event_map = await storage.main.get_events(
|
2022-03-31 19:39:34 +02:00
|
|
|
[
|
|
|
|
e_id
|
|
|
|
for e_id, (_, state_key) in event_id_to_state_key.items()
|
|
|
|
if include(state_key)
|
|
|
|
]
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2018-07-16 12:38:45 +02:00
|
|
|
|
2022-03-31 19:39:34 +02:00
|
|
|
return {
|
2018-07-16 12:38:45 +02:00
|
|
|
e_id: {
|
|
|
|
key: event_map[inner_e_id]
|
2020-06-15 13:03:36 +02:00
|
|
|
for key, inner_e_id in key_to_eid.items()
|
2018-07-16 12:38:45 +02:00
|
|
|
if inner_e_id in event_map
|
|
|
|
}
|
2020-06-15 13:03:36 +02:00
|
|
|
for e_id, key_to_eid in event_to_state_ids.items()
|
2018-07-16 12:38:45 +02:00
|
|
|
}
|