2015-01-26 19:53:31 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2015 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
from ._base import BaseHandler
|
|
|
|
|
|
|
|
from synapse.streams.config import PaginationConfig
|
2015-01-30 14:33:41 +01:00
|
|
|
from synapse.api.constants import Membership, EventTypes
|
2015-01-26 19:53:31 +01:00
|
|
|
|
|
|
|
from twisted.internet import defer
|
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
import collections
|
2015-01-26 19:53:31 +01:00
|
|
|
import logging
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
SyncConfig = collections.namedtuple("SyncConfig", [
|
|
|
|
"user",
|
|
|
|
"limit",
|
|
|
|
"gap",
|
2015-01-26 19:53:31 +01:00
|
|
|
"sort",
|
|
|
|
"backfill",
|
2015-01-26 16:46:31 +01:00
|
|
|
"filter",
|
2015-01-26 19:53:31 +01:00
|
|
|
])
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
class RoomSyncResult(collections.namedtuple("RoomSyncResult", [
|
2015-01-26 16:46:31 +01:00
|
|
|
"room_id",
|
|
|
|
"limited",
|
|
|
|
"published",
|
2015-01-27 17:24:22 +01:00
|
|
|
"events",
|
2015-01-26 16:46:31 +01:00
|
|
|
"state",
|
2015-01-26 19:53:31 +01:00
|
|
|
"prev_batch",
|
2015-01-29 17:41:21 +01:00
|
|
|
"ephemeral",
|
2015-01-27 17:24:22 +01:00
|
|
|
])):
|
|
|
|
__slots__ = []
|
|
|
|
|
|
|
|
def __nonzero__(self):
|
2015-01-30 16:52:05 +01:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if room needs to be part of the sync result.
|
|
|
|
"""
|
2015-01-29 17:41:21 +01:00
|
|
|
return bool(self.events or self.state or self.ephemeral)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
class SyncResult(collections.namedtuple("SyncResult", [
|
2015-01-27 21:19:36 +01:00
|
|
|
"next_batch", # Token for the next sync
|
|
|
|
"private_user_data", # List of private events for the user.
|
|
|
|
"public_user_data", # List of public events for all users.
|
|
|
|
"rooms", # RoomSyncResult for each room.
|
2015-01-26 16:46:31 +01:00
|
|
|
])):
|
|
|
|
__slots__ = []
|
|
|
|
|
|
|
|
def __nonzero__(self):
|
2015-01-30 16:52:05 +01:00
|
|
|
"""Make the result appear empty if there are no updates. This is used
|
|
|
|
to tell if the notifier needs to wait for more events when polling for
|
|
|
|
events.
|
|
|
|
"""
|
2015-01-27 17:24:22 +01:00
|
|
|
return bool(
|
|
|
|
self.private_user_data or self.public_user_data or self.rooms
|
|
|
|
)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
class SyncHandler(BaseHandler):
|
|
|
|
|
|
|
|
def __init__(self, hs):
|
|
|
|
super(SyncHandler, self).__init__(hs)
|
|
|
|
self.event_sources = hs.get_event_sources()
|
2015-01-26 19:53:31 +01:00
|
|
|
self.clock = hs.get_clock()
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2015-01-27 21:09:52 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-01-26 16:46:31 +01:00
|
|
|
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0):
|
2015-01-27 17:24:22 +01:00
|
|
|
"""Get the sync for a client if we have new data for it now. Otherwise
|
|
|
|
wait for new data to arrive on the server. If the timeout expires, then
|
|
|
|
return an empty sync result.
|
|
|
|
Returns:
|
|
|
|
A Deferred SyncResult.
|
|
|
|
"""
|
|
|
|
if timeout == 0 or since_token is None:
|
2015-01-27 21:09:52 +01:00
|
|
|
result = yield self.current_sync_for_user(sync_config, since_token)
|
|
|
|
defer.returnValue(result)
|
2015-01-26 16:46:31 +01:00
|
|
|
else:
|
2015-05-14 12:25:30 +02:00
|
|
|
def current_sync_callback(before_token, after_token):
|
2015-01-27 21:09:52 +01:00
|
|
|
return self.current_sync_for_user(sync_config, since_token)
|
|
|
|
|
|
|
|
rm_handler = self.hs.get_handlers().room_member_handler
|
2015-08-19 14:46:03 +02:00
|
|
|
|
|
|
|
app_service = yield self.store.get_app_service_by_user_id(
|
|
|
|
sync_config.user.to_string()
|
2015-03-09 17:43:09 +01:00
|
|
|
)
|
2015-08-19 14:46:03 +02:00
|
|
|
if app_service:
|
|
|
|
rooms = yield self.store.get_app_service_rooms(app_service)
|
|
|
|
room_ids = set(r.room_id for r in rooms)
|
|
|
|
else:
|
|
|
|
room_ids = yield rm_handler.get_joined_rooms_for_user(
|
|
|
|
sync_config.user
|
|
|
|
)
|
|
|
|
|
2015-01-27 21:09:52 +01:00
|
|
|
result = yield self.notifier.wait_for_events(
|
|
|
|
sync_config.user, room_ids,
|
|
|
|
sync_config.filter, timeout, current_sync_callback
|
2015-01-26 16:46:31 +01:00
|
|
|
)
|
2015-01-27 21:09:52 +01:00
|
|
|
defer.returnValue(result)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
def current_sync_for_user(self, sync_config, since_token=None):
|
2015-01-27 17:24:22 +01:00
|
|
|
"""Get the sync for client needed to match what the server has now.
|
|
|
|
Returns:
|
|
|
|
A Deferred SyncResult.
|
|
|
|
"""
|
2015-01-26 16:46:31 +01:00
|
|
|
if since_token is None:
|
2015-01-26 19:53:31 +01:00
|
|
|
return self.initial_sync(sync_config)
|
2015-01-26 16:46:31 +01:00
|
|
|
else:
|
2015-01-27 17:24:22 +01:00
|
|
|
if sync_config.gap:
|
|
|
|
return self.incremental_sync_with_gap(sync_config, since_token)
|
|
|
|
else:
|
2015-02-10 18:58:36 +01:00
|
|
|
# TODO(mjark): Handle gapless sync
|
2015-01-30 16:52:05 +01:00
|
|
|
raise NotImplementedError()
|
2015-01-26 16:46:31 +01:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def initial_sync(self, sync_config):
|
2015-01-27 17:24:22 +01:00
|
|
|
"""Get a sync for a client which is starting without any state
|
|
|
|
Returns:
|
|
|
|
A Deferred SyncResult.
|
|
|
|
"""
|
2015-01-26 19:53:31 +01:00
|
|
|
if sync_config.sort == "timeline,desc":
|
|
|
|
# TODO(mjark): Handle going through events in reverse order?.
|
|
|
|
# What does "most recent events" mean when applying the limits mean
|
|
|
|
# in this case?
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
now_token = yield self.event_sources.get_current_token()
|
|
|
|
|
|
|
|
presence_stream = self.event_sources.sources["presence"]
|
2015-01-26 19:53:31 +01:00
|
|
|
# TODO (mjark): This looks wrong, shouldn't we be getting the presence
|
2015-01-26 16:46:31 +01:00
|
|
|
# UP to the present rather than after the present?
|
|
|
|
pagination_config = PaginationConfig(from_token=now_token)
|
|
|
|
presence, _ = yield presence_stream.get_pagination_rows(
|
2015-01-26 19:53:31 +01:00
|
|
|
user=sync_config.user,
|
|
|
|
pagination_config=pagination_config.get_source_config("presence"),
|
|
|
|
key=None
|
2015-01-26 16:46:31 +01:00
|
|
|
)
|
|
|
|
room_list = yield self.store.get_rooms_for_user_where_membership_is(
|
2015-01-26 19:53:31 +01:00
|
|
|
user_id=sync_config.user.to_string(),
|
2015-01-26 16:46:31 +01:00
|
|
|
membership_list=[Membership.INVITE, Membership.JOIN]
|
|
|
|
)
|
|
|
|
|
2015-01-26 19:53:31 +01:00
|
|
|
# TODO (mjark): Does public mean "published"?
|
2015-01-26 16:46:31 +01:00
|
|
|
published_rooms = yield self.store.get_rooms(is_public=True)
|
2015-01-26 19:53:31 +01:00
|
|
|
published_room_ids = set(r["room_id"] for r in published_rooms)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2015-01-26 19:53:31 +01:00
|
|
|
rooms = []
|
2015-01-26 16:46:31 +01:00
|
|
|
for event in room_list:
|
2015-01-27 17:24:22 +01:00
|
|
|
room_sync = yield self.initial_sync_for_room(
|
|
|
|
event.room_id, sync_config, now_token, published_room_ids
|
2015-01-26 16:46:31 +01:00
|
|
|
)
|
2015-01-27 17:24:22 +01:00
|
|
|
rooms.append(room_sync)
|
|
|
|
|
|
|
|
defer.returnValue(SyncResult(
|
|
|
|
public_user_data=presence,
|
|
|
|
private_user_data=[],
|
|
|
|
rooms=rooms,
|
|
|
|
next_batch=now_token,
|
|
|
|
))
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2015-01-29 17:23:03 +01:00
|
|
|
def initial_sync_for_room(self, room_id, sync_config, now_token,
|
2015-01-29 17:41:49 +01:00
|
|
|
published_room_ids):
|
2015-01-27 17:24:22 +01:00
|
|
|
"""Sync a room for a client which is starting without any state
|
|
|
|
Returns:
|
|
|
|
A Deferred RoomSyncResult.
|
|
|
|
"""
|
2015-01-30 12:35:20 +01:00
|
|
|
|
2015-01-30 12:42:09 +01:00
|
|
|
recents, prev_batch_token, limited = yield self.load_filtered_recents(
|
2015-01-30 12:35:20 +01:00
|
|
|
room_id, sync_config, now_token,
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
2015-01-30 12:35:20 +01:00
|
|
|
|
2015-02-09 18:41:29 +01:00
|
|
|
current_state = yield self.state_handler.get_current_state(
|
2015-01-27 17:24:22 +01:00
|
|
|
room_id
|
|
|
|
)
|
2015-02-09 18:41:29 +01:00
|
|
|
current_state_events = current_state.values()
|
2015-01-27 17:24:22 +01:00
|
|
|
|
|
|
|
defer.returnValue(RoomSyncResult(
|
|
|
|
room_id=room_id,
|
|
|
|
published=room_id in published_room_ids,
|
2015-01-30 12:35:20 +01:00
|
|
|
events=recents,
|
2015-01-27 17:24:22 +01:00
|
|
|
prev_batch=prev_batch_token,
|
|
|
|
state=current_state_events,
|
2015-01-30 12:35:20 +01:00
|
|
|
limited=limited,
|
2015-01-29 17:41:21 +01:00
|
|
|
ephemeral=[],
|
2015-01-27 17:24:22 +01:00
|
|
|
))
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def incremental_sync_with_gap(self, sync_config, since_token):
|
|
|
|
""" Get the incremental delta needed to bring the client up to
|
|
|
|
date with the server.
|
|
|
|
Returns:
|
|
|
|
A Deferred SyncResult.
|
|
|
|
"""
|
|
|
|
if sync_config.sort == "timeline,desc":
|
|
|
|
# TODO(mjark): Handle going through events in reverse order?.
|
|
|
|
# What does "most recent events" mean when applying the limits mean
|
|
|
|
# in this case?
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
now_token = yield self.event_sources.get_current_token()
|
|
|
|
|
2015-01-29 04:33:51 +01:00
|
|
|
presence_source = self.event_sources.sources["presence"]
|
|
|
|
presence, presence_key = yield presence_source.get_new_events_for_user(
|
|
|
|
user=sync_config.user,
|
|
|
|
from_key=since_token.presence_key,
|
|
|
|
limit=sync_config.limit,
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
2015-01-29 04:33:51 +01:00
|
|
|
now_token = now_token.copy_and_replace("presence_key", presence_key)
|
|
|
|
|
|
|
|
typing_source = self.event_sources.sources["typing"]
|
|
|
|
typing, typing_key = yield typing_source.get_new_events_for_user(
|
2015-01-27 17:24:22 +01:00
|
|
|
user=sync_config.user,
|
2015-01-29 04:33:51 +01:00
|
|
|
from_key=since_token.typing_key,
|
|
|
|
limit=sync_config.limit,
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
2015-01-29 04:33:51 +01:00
|
|
|
now_token = now_token.copy_and_replace("typing_key", typing_key)
|
|
|
|
|
2015-01-29 17:41:21 +01:00
|
|
|
typing_by_room = {event["room_id"]: [event] for event in typing}
|
|
|
|
for event in typing:
|
|
|
|
event.pop("room_id")
|
2015-01-29 04:33:51 +01:00
|
|
|
logger.debug("Typing %r", typing_by_room)
|
|
|
|
|
2015-01-29 15:40:28 +01:00
|
|
|
rm_handler = self.hs.get_handlers().room_member_handler
|
2015-08-19 14:46:03 +02:00
|
|
|
app_service = yield self.store.get_app_service_by_user_id(
|
|
|
|
sync_config.user.to_string()
|
|
|
|
)
|
|
|
|
if app_service:
|
|
|
|
rooms = yield self.store.get_app_service_rooms(app_service)
|
|
|
|
room_ids = set(r.room_id for r in rooms)
|
|
|
|
else:
|
|
|
|
room_ids = yield rm_handler.get_joined_rooms_for_user(
|
|
|
|
sync_config.user
|
|
|
|
)
|
2015-01-27 17:24:22 +01:00
|
|
|
|
|
|
|
# TODO (mjark): Does public mean "published"?
|
|
|
|
published_rooms = yield self.store.get_rooms(is_public=True)
|
|
|
|
published_room_ids = set(r["room_id"] for r in published_rooms)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2015-01-29 15:40:28 +01:00
|
|
|
room_events, _ = yield self.store.get_room_events_stream(
|
|
|
|
sync_config.user.to_string(),
|
|
|
|
from_key=since_token.room_key,
|
|
|
|
to_key=now_token.room_key,
|
|
|
|
room_id=None,
|
|
|
|
limit=sync_config.limit + 1,
|
|
|
|
)
|
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
rooms = []
|
2015-01-29 15:40:28 +01:00
|
|
|
if len(room_events) <= sync_config.limit:
|
|
|
|
# There is no gap in any of the rooms. Therefore we can just
|
|
|
|
# partition the new events by room and return them.
|
|
|
|
events_by_room_id = {}
|
|
|
|
for event in room_events:
|
|
|
|
events_by_room_id.setdefault(event.room_id, []).append(event)
|
|
|
|
|
|
|
|
for room_id in room_ids:
|
|
|
|
recents = events_by_room_id.get(room_id, [])
|
|
|
|
state = [event for event in recents if event.is_state()]
|
|
|
|
if recents:
|
|
|
|
prev_batch = now_token.copy_and_replace(
|
|
|
|
"room_key", recents[0].internal_metadata.before
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
prev_batch = now_token
|
2015-01-30 14:33:41 +01:00
|
|
|
|
|
|
|
state = yield self.check_joined_room(
|
|
|
|
sync_config, room_id, state
|
|
|
|
)
|
|
|
|
|
2015-01-29 15:40:28 +01:00
|
|
|
room_sync = RoomSyncResult(
|
|
|
|
room_id=room_id,
|
|
|
|
published=room_id in published_room_ids,
|
|
|
|
events=recents,
|
|
|
|
prev_batch=prev_batch,
|
|
|
|
state=state,
|
|
|
|
limited=False,
|
2015-01-29 17:41:21 +01:00
|
|
|
ephemeral=typing_by_room.get(room_id, [])
|
2015-01-29 15:40:28 +01:00
|
|
|
)
|
2015-01-29 17:27:38 +01:00
|
|
|
if room_sync:
|
2015-01-29 15:40:28 +01:00
|
|
|
rooms.append(room_sync)
|
|
|
|
else:
|
|
|
|
for room_id in room_ids:
|
|
|
|
room_sync = yield self.incremental_sync_with_gap_for_room(
|
|
|
|
room_id, sync_config, since_token, now_token,
|
|
|
|
published_room_ids, typing_by_room
|
|
|
|
)
|
|
|
|
if room_sync:
|
|
|
|
rooms.append(room_sync)
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2015-01-26 19:53:31 +01:00
|
|
|
defer.returnValue(SyncResult(
|
|
|
|
public_user_data=presence,
|
|
|
|
private_user_data=[],
|
|
|
|
rooms=rooms,
|
|
|
|
next_batch=now_token,
|
|
|
|
))
|
2015-01-26 16:46:31 +01:00
|
|
|
|
2015-07-03 15:51:01 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def _filter_events_for_client(self, user_id, room_id, events):
|
2015-08-11 11:41:40 +02:00
|
|
|
event_id_to_state = yield self.store.get_state_for_events(
|
2015-08-04 10:32:23 +02:00
|
|
|
room_id, frozenset(e.event_id for e in events),
|
|
|
|
types=(
|
|
|
|
(EventTypes.RoomHistoryVisibility, ""),
|
|
|
|
(EventTypes.Member, user_id),
|
|
|
|
)
|
2015-07-03 15:51:01 +02:00
|
|
|
)
|
|
|
|
|
2015-08-12 18:05:24 +02:00
|
|
|
def allowed(event, state):
|
2015-07-03 15:51:01 +02:00
|
|
|
if event.type == EventTypes.RoomHistoryVisibility:
|
|
|
|
return True
|
|
|
|
|
2015-07-06 14:05:52 +02:00
|
|
|
membership_ev = state.get((EventTypes.Member, user_id), None)
|
|
|
|
if membership_ev:
|
|
|
|
membership = membership_ev.membership
|
|
|
|
else:
|
|
|
|
membership = Membership.LEAVE
|
|
|
|
|
|
|
|
if membership == Membership.JOIN:
|
2015-07-03 15:51:01 +02:00
|
|
|
return True
|
|
|
|
|
|
|
|
history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
|
2015-07-06 14:05:52 +02:00
|
|
|
if history:
|
|
|
|
visibility = history.content.get("history_visibility", "shared")
|
|
|
|
else:
|
|
|
|
visibility = "shared"
|
2015-07-03 15:51:01 +02:00
|
|
|
|
2015-07-06 14:05:52 +02:00
|
|
|
if visibility == "public":
|
|
|
|
return True
|
|
|
|
elif visibility == "shared":
|
|
|
|
return True
|
|
|
|
elif visibility == "joined":
|
|
|
|
return membership == Membership.JOIN
|
|
|
|
elif visibility == "invited":
|
|
|
|
return membership == Membership.INVITE
|
2015-07-03 15:51:01 +02:00
|
|
|
|
2015-07-06 14:05:52 +02:00
|
|
|
return True
|
2015-08-11 11:41:40 +02:00
|
|
|
|
2015-07-03 15:51:01 +02:00
|
|
|
defer.returnValue([
|
2015-08-12 17:01:10 +02:00
|
|
|
event
|
|
|
|
for event in events
|
|
|
|
if allowed(event, event_id_to_state[event.event_id])
|
2015-07-03 15:51:01 +02:00
|
|
|
])
|
|
|
|
|
2015-01-30 12:32:35 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-01-30 12:35:20 +01:00
|
|
|
def load_filtered_recents(self, room_id, sync_config, now_token,
|
|
|
|
since_token=None):
|
2015-01-30 12:32:35 +01:00
|
|
|
limited = True
|
|
|
|
recents = []
|
|
|
|
filtering_factor = 2
|
|
|
|
load_limit = max(sync_config.limit * filtering_factor, 100)
|
|
|
|
max_repeat = 3 # Only try a few times per room, otherwise
|
|
|
|
room_key = now_token.room_key
|
2015-02-06 16:58:40 +01:00
|
|
|
end_key = room_key
|
2015-01-30 12:32:35 +01:00
|
|
|
|
|
|
|
while limited and len(recents) < sync_config.limit and max_repeat:
|
2015-01-30 12:50:15 +01:00
|
|
|
events, keys = yield self.store.get_recent_events_for_room(
|
2015-01-30 12:32:35 +01:00
|
|
|
room_id,
|
|
|
|
limit=load_limit + 1,
|
2015-01-30 12:35:20 +01:00
|
|
|
from_token=since_token.room_key if since_token else None,
|
2015-02-06 16:58:40 +01:00
|
|
|
end_token=end_key,
|
2015-01-30 12:32:35 +01:00
|
|
|
)
|
2015-01-30 12:50:15 +01:00
|
|
|
(room_key, _) = keys
|
2015-02-06 16:58:40 +01:00
|
|
|
end_key = "s" + room_key.split('-')[-1]
|
2015-01-30 12:32:35 +01:00
|
|
|
loaded_recents = sync_config.filter.filter_room_events(events)
|
2015-07-03 15:51:01 +02:00
|
|
|
loaded_recents = yield self._filter_events_for_client(
|
|
|
|
sync_config.user.to_string(), room_id, loaded_recents,
|
|
|
|
)
|
2015-01-30 12:32:35 +01:00
|
|
|
loaded_recents.extend(recents)
|
|
|
|
recents = loaded_recents
|
|
|
|
if len(events) <= load_limit:
|
|
|
|
limited = False
|
|
|
|
max_repeat -= 1
|
|
|
|
|
|
|
|
if len(recents) > sync_config.limit:
|
|
|
|
recents = recents[-sync_config.limit:]
|
|
|
|
room_key = recents[0].internal_metadata.before
|
|
|
|
|
|
|
|
prev_batch_token = now_token.copy_and_replace(
|
|
|
|
"room_key", room_key
|
|
|
|
)
|
|
|
|
|
|
|
|
defer.returnValue((recents, prev_batch_token, limited))
|
|
|
|
|
2015-01-26 16:46:31 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-01-27 17:24:22 +01:00
|
|
|
def incremental_sync_with_gap_for_room(self, room_id, sync_config,
|
|
|
|
since_token, now_token,
|
2015-01-29 04:33:51 +01:00
|
|
|
published_room_ids, typing_by_room):
|
2015-01-27 17:24:22 +01:00
|
|
|
""" Get the incremental delta needed to bring the client up to date for
|
|
|
|
the room. Gives the client the most recent events and the changes to
|
|
|
|
state.
|
|
|
|
Returns:
|
|
|
|
A Deferred RoomSyncResult
|
|
|
|
"""
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
# TODO(mjark): Check for redactions we might have missed.
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2015-01-30 12:42:09 +01:00
|
|
|
recents, prev_batch_token, limited = yield self.load_filtered_recents(
|
2015-01-30 12:35:20 +01:00
|
|
|
room_id, sync_config, now_token, since_token,
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
logging.debug("Recents %r", recents)
|
|
|
|
|
|
|
|
# TODO(mjark): This seems racy since this isn't being passed a
|
|
|
|
# token to indicate what point in the stream this is
|
2015-02-09 18:41:29 +01:00
|
|
|
current_state = yield self.state_handler.get_current_state(
|
2015-01-27 17:24:22 +01:00
|
|
|
room_id
|
|
|
|
)
|
2015-02-09 18:41:29 +01:00
|
|
|
current_state_events = current_state.values()
|
2015-01-27 17:24:22 +01:00
|
|
|
|
|
|
|
state_at_previous_sync = yield self.get_state_at_previous_sync(
|
|
|
|
room_id, since_token=since_token
|
|
|
|
)
|
|
|
|
|
|
|
|
state_events_delta = yield self.compute_state_delta(
|
|
|
|
since_token=since_token,
|
|
|
|
previous_state=state_at_previous_sync,
|
|
|
|
current_state=current_state_events,
|
|
|
|
)
|
|
|
|
|
2015-01-30 14:33:41 +01:00
|
|
|
state_events_delta = yield self.check_joined_room(
|
|
|
|
sync_config, room_id, state_events_delta
|
|
|
|
)
|
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
room_sync = RoomSyncResult(
|
|
|
|
room_id=room_id,
|
|
|
|
published=room_id in published_room_ids,
|
|
|
|
events=recents,
|
|
|
|
prev_batch=prev_batch_token,
|
|
|
|
state=state_events_delta,
|
|
|
|
limited=limited,
|
2015-01-30 17:24:40 +01:00
|
|
|
ephemeral=typing_by_room.get(room_id, [])
|
2015-01-27 17:24:22 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
logging.debug("Room sync: %r", room_sync)
|
|
|
|
|
|
|
|
defer.returnValue(room_sync)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def get_state_at_previous_sync(self, room_id, since_token):
|
|
|
|
""" Get the room state at the previous sync the client made.
|
|
|
|
Returns:
|
|
|
|
A Deferred list of Events.
|
|
|
|
"""
|
|
|
|
last_events, token = yield self.store.get_recent_events_for_room(
|
|
|
|
room_id, end_token=since_token.room_key, limit=1,
|
|
|
|
)
|
|
|
|
|
|
|
|
if last_events:
|
|
|
|
last_event = last_events[0]
|
|
|
|
last_context = yield self.state_handler.compute_event_context(
|
|
|
|
last_event
|
|
|
|
)
|
|
|
|
if last_event.is_state():
|
|
|
|
state = [last_event] + last_context.current_state.values()
|
|
|
|
else:
|
|
|
|
state = last_context.current_state.values()
|
|
|
|
else:
|
|
|
|
state = ()
|
|
|
|
defer.returnValue(state)
|
|
|
|
|
|
|
|
def compute_state_delta(self, since_token, previous_state, current_state):
|
|
|
|
""" Works out the differnce in state between the current state and the
|
|
|
|
state the client got when it last performed a sync.
|
|
|
|
Returns:
|
|
|
|
A list of events.
|
|
|
|
"""
|
|
|
|
# TODO(mjark) Check if the state events were received by the server
|
|
|
|
# after the previous sync, since we need to include those state
|
|
|
|
# updates even if they occured logically before the previous event.
|
|
|
|
# TODO(mjark) Check for new redactions in the state events.
|
2015-01-27 21:19:36 +01:00
|
|
|
previous_dict = {event.event_id: event for event in previous_state}
|
2015-01-27 17:24:22 +01:00
|
|
|
state_delta = []
|
|
|
|
for event in current_state:
|
|
|
|
if event.event_id not in previous_dict:
|
|
|
|
state_delta.append(event)
|
|
|
|
return state_delta
|
2015-01-30 14:33:41 +01:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def check_joined_room(self, sync_config, room_id, state_delta):
|
|
|
|
joined = False
|
|
|
|
for event in state_delta:
|
|
|
|
if (
|
|
|
|
event.type == EventTypes.Member
|
|
|
|
and event.state_key == sync_config.user.to_string()
|
|
|
|
):
|
|
|
|
if event.content["membership"] == Membership.JOIN:
|
|
|
|
joined = True
|
|
|
|
|
|
|
|
if joined:
|
2015-02-09 18:41:29 +01:00
|
|
|
res = yield self.state_handler.get_current_state(room_id)
|
|
|
|
state_delta = res.values()
|
2015-01-30 14:33:41 +01:00
|
|
|
|
|
|
|
defer.returnValue(state_delta)
|