2014-08-12 16:10:52 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2015-01-06 14:21:39 +01:00
|
|
|
# Copyright 2014, 2015 OpenMarket Ltd
|
2014-08-12 16:10:52 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2014-08-19 17:40:25 +02:00
|
|
|
""" This module is responsible for getting events from the DB for pagination
|
|
|
|
and event streaming.
|
|
|
|
|
|
|
|
The order it returns events in depend on whether we are streaming forwards or
|
|
|
|
are paginating backwards. We do this because we want to handle out of order
|
|
|
|
messages nicely, while still returning them in the correct order when we
|
|
|
|
paginate bacwards.
|
|
|
|
|
|
|
|
This is implemented by keeping two ordering columns: stream_ordering and
|
|
|
|
topological_ordering. Stream ordering is basically insertion/received order
|
|
|
|
(except for events from backfill requests). The topolgical_ordering is a
|
|
|
|
weak ordering of events based on the pdu graph.
|
|
|
|
|
|
|
|
This means that we have to have two different types of tokens, depending on
|
|
|
|
what sort order was used:
|
|
|
|
- stream tokens are of the form: "s%d", which maps directly to the column
|
|
|
|
- topological tokems: "t%d-%d", where the integers map to the topological
|
|
|
|
and stream ordering columns respectively.
|
|
|
|
"""
|
|
|
|
|
2014-08-14 19:40:50 +02:00
|
|
|
from twisted.internet import defer
|
2014-08-13 04:14:34 +02:00
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
from ._base import SQLBaseStore
|
2014-08-19 15:19:48 +02:00
|
|
|
from synapse.api.errors import SynapseError
|
|
|
|
from synapse.util.logutils import log_function
|
2014-08-14 19:01:39 +02:00
|
|
|
|
2015-01-13 15:14:21 +01:00
|
|
|
from collections import namedtuple
|
|
|
|
|
2014-08-12 16:10:52 +02:00
|
|
|
import logging
|
|
|
|
|
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
MAX_STREAM_SIZE = 1000
|
2014-08-12 16:10:52 +02:00
|
|
|
|
|
|
|
|
2014-08-19 15:19:48 +02:00
|
|
|
_STREAM_TOKEN = "stream"
|
|
|
|
_TOPOLOGICAL_TOKEN = "topological"
|
|
|
|
|
|
|
|
|
2015-01-13 15:14:21 +01:00
|
|
|
class _StreamToken(namedtuple("_StreamToken", "topological stream")):
|
|
|
|
"""Tokens are positions between events. The token "s1" comes after event 1.
|
|
|
|
|
|
|
|
s0 s1
|
|
|
|
| |
|
|
|
|
[0] V [1] V [2]
|
|
|
|
|
|
|
|
Tokens can either be a point in the live event stream or a cursor going
|
|
|
|
through historic events.
|
|
|
|
|
|
|
|
When traversing the live event stream events are ordered by when they
|
|
|
|
arrived at the homeserver.
|
|
|
|
|
|
|
|
When traversing historic events the events are ordered by their depth in
|
|
|
|
the event graph "topological_ordering" and then by when they arrived at the
|
|
|
|
homeserver "stream_ordering".
|
|
|
|
|
|
|
|
Live tokens start with an "s" followed by the "stream_ordering" id of the
|
|
|
|
event it comes after. Historic tokens start with a "t" followed by the
|
|
|
|
"topological_ordering" id of the event it comes after, follewed by "-",
|
|
|
|
followed by the "stream_ordering" id of the event it comes after.
|
|
|
|
"""
|
|
|
|
__slots__ = []
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def parse(cls, string):
|
|
|
|
try:
|
|
|
|
if string[0] == 's':
|
2015-01-29 02:48:36 +01:00
|
|
|
return cls(topological=None, stream=int(string[1:]))
|
2015-01-13 15:14:21 +01:00
|
|
|
if string[0] == 't':
|
|
|
|
parts = string[1:].split('-', 1)
|
2015-01-29 02:48:36 +01:00
|
|
|
return cls(topological=int(parts[0]), stream=int(parts[1]))
|
2015-01-13 15:14:21 +01:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise SynapseError(400, "Invalid token %r" % (string,))
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def parse_stream_token(cls, string):
|
|
|
|
try:
|
|
|
|
if string[0] == 's':
|
2015-01-29 02:48:36 +01:00
|
|
|
return cls(topological=None, stream=int(string[1:]))
|
2015-01-13 15:14:21 +01:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
raise SynapseError(400, "Invalid token %r" % (string,))
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
if self.topological is not None:
|
|
|
|
return "t%d-%d" % (self.topological, self.stream)
|
|
|
|
else:
|
|
|
|
return "s%d" % (self.stream,)
|
|
|
|
|
|
|
|
def lower_bound(self):
|
|
|
|
if self.topological is None:
|
|
|
|
return "(%d < %s)" % (self.stream, "stream_ordering")
|
|
|
|
else:
|
|
|
|
return "(%d < %s OR (%d == %s AND %d < %s))" % (
|
|
|
|
self.topological, "topological_ordering",
|
|
|
|
self.topological, "topological_ordering",
|
|
|
|
self.stream, "stream_ordering",
|
|
|
|
)
|
2014-08-19 15:19:48 +02:00
|
|
|
|
2015-01-13 15:14:21 +01:00
|
|
|
def upper_bound(self):
|
|
|
|
if self.topological is None:
|
|
|
|
return "(%d >= %s)" % (self.stream, "stream_ordering")
|
|
|
|
else:
|
|
|
|
return "(%d > %s OR (%d == %s AND %d >= %s))" % (
|
|
|
|
self.topological, "topological_ordering",
|
|
|
|
self.topological, "topological_ordering",
|
|
|
|
self.stream, "stream_ordering",
|
|
|
|
)
|
2014-08-19 15:19:48 +02:00
|
|
|
|
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
class StreamStore(SQLBaseStore):
|
2014-08-19 15:19:48 +02:00
|
|
|
@log_function
|
2014-08-14 19:01:39 +02:00
|
|
|
def get_room_events_stream(self, user_id, from_key, to_key, room_id,
|
|
|
|
limit=0, with_feedback=False):
|
2014-08-15 14:58:28 +02:00
|
|
|
# TODO (erikj): Handle compressed feedback
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
current_room_membership_sql = (
|
|
|
|
"SELECT m.room_id FROM room_memberships as m "
|
2014-08-14 19:40:50 +02:00
|
|
|
"INNER JOIN current_state_events as c ON m.event_id = c.event_id "
|
2014-09-19 15:45:21 +02:00
|
|
|
"WHERE m.user_id = ? AND m.membership = 'join'"
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
2014-08-20 17:07:20 +02:00
|
|
|
# We also want to get any membership events about that user, e.g.
|
|
|
|
# invites or leave notifications.
|
|
|
|
membership_sql = (
|
2014-08-15 16:28:54 +02:00
|
|
|
"SELECT m.event_id FROM room_memberships as m "
|
2014-08-14 19:40:50 +02:00
|
|
|
"INNER JOIN current_state_events as c ON m.event_id = c.event_id "
|
2014-08-20 17:07:20 +02:00
|
|
|
"WHERE m.user_id = ? "
|
2014-08-12 16:10:52 +02:00
|
|
|
)
|
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
if limit:
|
|
|
|
limit = max(limit, MAX_STREAM_SIZE)
|
|
|
|
else:
|
2014-08-15 17:04:54 +02:00
|
|
|
limit = MAX_STREAM_SIZE
|
2014-08-14 19:01:39 +02:00
|
|
|
|
2014-08-15 16:53:06 +02:00
|
|
|
# From and to keys should be integers from ordering.
|
2015-01-13 15:14:21 +01:00
|
|
|
from_id = _StreamToken.parse_stream_token(from_key)
|
|
|
|
to_id = _StreamToken.parse_stream_token(to_key)
|
2014-08-15 16:53:06 +02:00
|
|
|
|
|
|
|
if from_key == to_key:
|
2015-01-06 14:03:23 +01:00
|
|
|
return defer.succeed(([], to_key))
|
2014-08-15 16:53:06 +02:00
|
|
|
|
2014-08-14 19:01:39 +02:00
|
|
|
sql = (
|
2015-01-06 14:03:23 +01:00
|
|
|
"SELECT e.event_id, e.stream_ordering FROM events AS e WHERE "
|
2014-11-10 12:15:02 +01:00
|
|
|
"(e.outlier = 0 AND (room_id IN (%(current)s)) OR "
|
2014-08-15 16:28:54 +02:00
|
|
|
"(event_id IN (%(invites)s))) "
|
2014-08-26 19:57:46 +02:00
|
|
|
"AND e.stream_ordering > ? AND e.stream_ordering <= ? "
|
2014-08-19 15:19:48 +02:00
|
|
|
"ORDER BY stream_ordering ASC LIMIT %(limit)d "
|
2014-08-14 19:01:39 +02:00
|
|
|
) % {
|
|
|
|
"current": current_room_membership_sql,
|
2014-08-20 17:07:20 +02:00
|
|
|
"invites": membership_sql,
|
2014-08-19 15:19:48 +02:00
|
|
|
"limit": limit
|
2014-08-14 19:01:39 +02:00
|
|
|
}
|
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
def f(txn):
|
2015-01-13 15:14:21 +01:00
|
|
|
txn.execute(sql, (user_id, user_id, from_id.stream, to_id.stream,))
|
2014-08-12 16:10:52 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
rows = self.cursor_to_dict(txn)
|
2014-08-15 16:28:54 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
ret = self._get_events_txn(
|
|
|
|
txn,
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
|
|
|
|
2015-01-30 12:32:35 +01:00
|
|
|
self._set_before_and_after(ret, rows)
|
2015-01-29 15:40:28 +01:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
if rows:
|
|
|
|
key = "s%d" % max([r["stream_ordering"] for r in rows])
|
2015-01-30 12:32:35 +01:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
else:
|
|
|
|
# Assume we didn't get anything because there was nothing to
|
|
|
|
# get.
|
|
|
|
key = to_key
|
2014-08-15 16:28:54 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
return ret, key
|
2014-08-15 16:28:54 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
return self.runInteraction("get_room_events_stream", f)
|
2014-08-15 14:58:28 +02:00
|
|
|
|
2014-08-19 15:19:48 +02:00
|
|
|
@log_function
|
|
|
|
def paginate_room_events(self, room_id, from_key, to_key=None,
|
|
|
|
direction='b', limit=-1,
|
|
|
|
with_feedback=False):
|
|
|
|
# TODO (erikj): Handle compressed feedback
|
|
|
|
|
2014-08-29 14:28:02 +02:00
|
|
|
# Tokens really represent positions between elements, but we use
|
|
|
|
# the convention of pointing to the event before the gap. Hence
|
|
|
|
# we have a bit of asymmetry when it comes to equalities.
|
2014-08-19 15:19:48 +02:00
|
|
|
args = [room_id]
|
2015-01-13 15:14:21 +01:00
|
|
|
if direction == 'b':
|
|
|
|
order = "DESC"
|
|
|
|
bounds = _StreamToken.parse(from_key).upper_bound()
|
|
|
|
if to_key:
|
|
|
|
bounds = "%s AND %s" % (
|
|
|
|
bounds, _StreamToken.parse(to_key).lower_bound()
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
order = "ASC"
|
|
|
|
bounds = _StreamToken.parse(from_key).lower_bound()
|
|
|
|
if to_key:
|
|
|
|
bounds = "%s AND %s" % (
|
|
|
|
bounds, _StreamToken.parse(to_key).upper_bound()
|
|
|
|
)
|
2014-08-19 15:19:48 +02:00
|
|
|
|
|
|
|
if int(limit) > 0:
|
|
|
|
args.append(int(limit))
|
|
|
|
limit_str = " LIMIT ?"
|
|
|
|
else:
|
|
|
|
limit_str = ""
|
|
|
|
|
|
|
|
sql = (
|
2015-01-06 14:03:23 +01:00
|
|
|
"SELECT * FROM events"
|
2014-11-20 18:26:36 +01:00
|
|
|
" WHERE outlier = 0 AND room_id = ? AND %(bounds)s"
|
|
|
|
" ORDER BY topological_ordering %(order)s,"
|
|
|
|
" stream_ordering %(order)s %(limit)s"
|
2014-09-23 16:28:32 +02:00
|
|
|
) % {
|
|
|
|
"bounds": bounds,
|
|
|
|
"order": order,
|
|
|
|
"limit": limit_str
|
|
|
|
}
|
2014-08-19 15:19:48 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
def f(txn):
|
|
|
|
txn.execute(sql, args)
|
|
|
|
|
|
|
|
rows = self.cursor_to_dict(txn)
|
|
|
|
|
|
|
|
if rows:
|
|
|
|
topo = rows[-1]["topological_ordering"]
|
|
|
|
toke = rows[-1]["stream_ordering"]
|
|
|
|
if direction == 'b':
|
2015-01-13 15:14:21 +01:00
|
|
|
# Tokens are positions between events.
|
|
|
|
# This token points *after* the last event in the chunk.
|
|
|
|
# We need it to point to the event before it in the chunk
|
|
|
|
# when we are going backwards so we subtract one from the
|
|
|
|
# stream part.
|
2015-01-06 14:03:23 +01:00
|
|
|
toke -= 1
|
2015-01-13 15:14:21 +01:00
|
|
|
next_token = str(_StreamToken(topo, toke))
|
2015-01-06 14:03:23 +01:00
|
|
|
else:
|
|
|
|
# TODO (erikj): We should work out what to do here instead.
|
|
|
|
next_token = to_key if to_key else from_key
|
|
|
|
|
|
|
|
events = self._get_events_txn(
|
|
|
|
txn,
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
2014-08-19 15:19:48 +02:00
|
|
|
|
2015-01-30 12:32:35 +01:00
|
|
|
self._set_before_and_after(events, rows)
|
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
return events, next_token,
|
2014-09-06 03:23:36 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
return self.runInteraction("paginate_room_events", f)
|
2014-08-19 15:19:48 +02:00
|
|
|
|
2014-08-21 17:40:21 +02:00
|
|
|
def get_recent_events_for_room(self, room_id, limit, end_token,
|
2015-01-27 17:24:22 +01:00
|
|
|
with_feedback=False, from_token=None):
|
2014-08-15 14:58:28 +02:00
|
|
|
# TODO (erikj): Handle compressed feedback
|
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
end_token = _StreamToken.parse_stream_token(end_token)
|
2014-08-15 14:58:28 +02:00
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
if from_token is None:
|
|
|
|
sql = (
|
|
|
|
"SELECT stream_ordering, topological_ordering, event_id"
|
|
|
|
" FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering <= ? AND outlier = 0"
|
|
|
|
" ORDER BY topological_ordering DESC, stream_ordering DESC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
from_token = _StreamToken.parse_stream_token(from_token)
|
|
|
|
sql = (
|
|
|
|
"SELECT stream_ordering, topological_ordering, event_id"
|
|
|
|
" FROM events"
|
|
|
|
" WHERE room_id = ? AND stream_ordering > ?"
|
|
|
|
" AND stream_ordering <= ? AND outlier = 0"
|
|
|
|
" ORDER BY topological_ordering DESC, stream_ordering DESC"
|
|
|
|
" LIMIT ?"
|
|
|
|
)
|
|
|
|
|
|
|
|
def get_recent_events_for_room_txn(txn):
|
|
|
|
if from_token is None:
|
|
|
|
txn.execute(sql, (room_id, end_token.stream, limit,))
|
|
|
|
else:
|
|
|
|
txn.execute(sql, (
|
|
|
|
room_id, from_token.stream, end_token.stream, limit
|
|
|
|
))
|
2014-08-15 14:58:28 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
rows = self.cursor_to_dict(txn)
|
2014-08-18 17:20:21 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
rows.reverse() # As we selected with reverse ordering
|
2014-08-18 17:20:21 +02:00
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
if rows:
|
2015-01-13 15:14:21 +01:00
|
|
|
# Tokens are positions between events.
|
|
|
|
# This token points *after* the last event in the chunk.
|
|
|
|
# We need it to point to the event before it in the chunk
|
|
|
|
# since we are going backwards so we subtract one from the
|
|
|
|
# stream part.
|
|
|
|
topo = rows[0]["topological_ordering"]
|
2015-01-12 18:38:30 +01:00
|
|
|
toke = rows[0]["stream_ordering"] - 1
|
2015-01-13 15:14:21 +01:00
|
|
|
start_token = str(_StreamToken(topo, toke))
|
2015-01-06 14:03:23 +01:00
|
|
|
|
2015-01-30 12:42:09 +01:00
|
|
|
token = (start_token, str(end_token))
|
2015-01-06 14:03:23 +01:00
|
|
|
else:
|
2015-01-30 12:42:09 +01:00
|
|
|
token = (str(end_token), str(end_token))
|
2015-01-06 14:03:23 +01:00
|
|
|
|
|
|
|
events = self._get_events_txn(
|
|
|
|
txn,
|
|
|
|
[r["event_id"] for r in rows],
|
|
|
|
get_prev_content=True
|
|
|
|
)
|
2014-09-06 03:23:36 +02:00
|
|
|
|
2015-01-30 12:32:35 +01:00
|
|
|
self._set_before_and_after(events, rows)
|
|
|
|
|
2015-01-06 14:03:23 +01:00
|
|
|
return events, token
|
2014-09-06 03:23:36 +02:00
|
|
|
|
2015-01-27 17:24:22 +01:00
|
|
|
return self.runInteraction(
|
|
|
|
"get_recent_events_for_room", get_recent_events_for_room_txn
|
|
|
|
)
|
2014-08-15 14:58:28 +02:00
|
|
|
|
|
|
|
def get_room_events_max_id(self):
|
2014-10-28 12:18:04 +01:00
|
|
|
return self.runInteraction(
|
|
|
|
"get_room_events_max_id",
|
|
|
|
self._get_room_events_max_id_txn
|
|
|
|
)
|
2014-08-26 15:31:48 +02:00
|
|
|
|
|
|
|
def _get_room_events_max_id_txn(self, txn):
|
|
|
|
txn.execute(
|
2014-08-19 15:19:48 +02:00
|
|
|
"SELECT MAX(stream_ordering) as m FROM events"
|
2014-08-15 14:58:28 +02:00
|
|
|
)
|
|
|
|
|
2014-08-26 15:31:48 +02:00
|
|
|
res = self.cursor_to_dict(txn)
|
|
|
|
|
2014-08-19 15:19:48 +02:00
|
|
|
logger.debug("get_room_events_max_id: %s", res)
|
|
|
|
|
|
|
|
if not res or not res[0] or not res[0]["m"]:
|
2014-08-28 18:39:34 +02:00
|
|
|
return "s0"
|
2014-08-15 14:58:28 +02:00
|
|
|
|
2014-08-26 19:57:46 +02:00
|
|
|
key = res[0]["m"]
|
2014-08-26 15:31:48 +02:00
|
|
|
return "s%d" % (key,)
|
2015-01-30 12:32:35 +01:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _set_before_and_after(events, rows):
|
|
|
|
for event, row in zip(events, rows):
|
|
|
|
stream = row["stream_ordering"]
|
|
|
|
topo = event.depth
|
|
|
|
internal = event.internal_metadata
|
|
|
|
internal.before = str(_StreamToken(topo, stream - 1))
|
|
|
|
internal.after = str(_StreamToken(topo, stream))
|