MatrixSynapse/synapse/storage/__init__.py

792 lines
25 KiB
Python
Raw Normal View History

2014-08-12 16:10:52 +02:00
# -*- coding: utf-8 -*-
2015-01-06 14:21:39 +01:00
# Copyright 2014, 2015 OpenMarket Ltd
2014-08-12 16:10:52 +02:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2014-08-14 19:40:50 +02:00
from twisted.internet import defer
2014-08-19 15:20:03 +02:00
from synapse.util.logutils import log_function
2014-12-16 12:29:05 +01:00
from synapse.api.constants import EventTypes
2014-08-19 15:20:03 +02:00
from .appservice import ApplicationServiceStore
2014-08-12 16:10:52 +02:00
from .directory import DirectoryStore
from .feedback import FeedbackStore
from .presence import PresenceStore
from .profile import ProfileStore
from .registration import RegistrationStore
from .room import RoomStore
from .roommember import RoomMemberStore
from .stream import StreamStore
from .transactions import TransactionStore
from .keys import KeyStore
from .event_federation import EventFederationStore
from .pusher import PusherStore
2015-01-22 18:38:53 +01:00
from .push_rule import PushRuleStore
from .media_repository import MediaRepositoryStore
from .rejections import RejectionsStore
2014-10-14 17:59:51 +02:00
from .state import StateStore
from .signatures import SignatureStore
from .filtering import FilteringStore
2014-10-16 01:09:48 +02:00
from syutil.base64util import decode_base64
from syutil.jsonutil import encode_canonical_json
2014-08-12 16:10:52 +02:00
2014-10-31 16:35:39 +01:00
from synapse.crypto.event_signing import compute_event_reference_hash
2014-08-12 16:10:52 +02:00
import fnmatch
import imp
2014-08-19 15:20:03 +02:00
import logging
2014-08-12 16:10:52 +02:00
import os
import re
2014-08-12 16:10:52 +02:00
2014-08-19 15:20:03 +02:00
logger = logging.getLogger(__name__)
# Remember to update this number every time an incompatible change is made to
# database schema files, so the users will be informed on server restarts.
SCHEMA_VERSION = 14
dir_path = os.path.abspath(os.path.dirname(__file__))
class _RollbackButIsFineException(Exception):
""" This exception is used to rollback a transaction without implying
something went wrong.
"""
pass
2014-08-19 15:20:03 +02:00
class DataStore(RoomMemberStore, RoomStore,
2014-08-12 16:10:52 +02:00
RegistrationStore, StreamStore, ProfileStore, FeedbackStore,
2014-10-31 15:00:14 +01:00
PresenceStore, TransactionStore,
DirectoryStore, KeyStore, StateStore, SignatureStore,
2015-02-02 17:05:34 +01:00
ApplicationServiceStore,
EventFederationStore,
MediaRepositoryStore,
RejectionsStore,
FilteringStore,
PusherStore,
2015-01-22 18:38:53 +01:00
PushRuleStore
):
2014-08-12 16:10:52 +02:00
def __init__(self, hs):
super(DataStore, self).__init__(hs)
self.hs = hs
2014-08-12 16:10:52 +02:00
self.min_token_deferred = self._get_min_token()
self.min_token = None
@defer.inlineCallbacks
2014-08-19 15:20:03 +02:00
@log_function
def persist_event(self, event, context, backfilled=False,
is_new_state=True, current_state=None):
stream_ordering = None
if backfilled:
if not self.min_token_deferred.called:
yield self.min_token_deferred
self.min_token -= 1
stream_ordering = self.min_token
try:
yield self.runInteraction(
"persist_event",
2014-11-10 11:31:00 +01:00
self._persist_event_txn,
event=event,
context=context,
backfilled=backfilled,
stream_ordering=stream_ordering,
is_new_state=is_new_state,
current_state=current_state,
)
2014-09-30 13:38:38 +02:00
except _RollbackButIsFineException:
pass
@defer.inlineCallbacks
def get_event(self, event_id, check_redacted=True,
get_prev_content=False, allow_rejected=False,
allow_none=False):
2015-02-03 18:34:07 +01:00
"""Get an event from the database by event_id.
Args:
event_id (str): The event_id of the event to fetch
check_redacted (bool): If True, check if event has been redacted
and redact it.
get_prev_content (bool): If True and event is a state event,
include the previous states content in the unsigned field.
allow_rejected (bool): If True return rejected events.
allow_none (bool): If True, return None if no event found, if
False throw an exception.
Returns:
Deferred : A FrozenEvent.
"""
event = yield self.runInteraction(
"get_event", self._get_event_txn,
event_id,
check_redacted=check_redacted,
get_prev_content=get_prev_content,
allow_rejected=allow_rejected,
)
if not event and not allow_none:
raise RuntimeError("Could not find event %s" % (event_id,))
defer.returnValue(event)
2014-08-19 15:20:03 +02:00
@log_function
def _persist_event_txn(self, txn, event, context, backfilled,
stream_ordering=None, is_new_state=True,
current_state=None):
2015-02-11 16:01:15 +01:00
# Remove the any existing cache entries for the event_id
self._get_event_cache.pop(event.event_id)
# We purposefully do this first since if we include a `current_state`
# key, we *want* to update the `current_state_events` table
if current_state:
txn.execute(
"DELETE FROM current_state_events WHERE room_id = ?",
(event.room_id,)
)
for s in current_state:
self._simple_insert_txn(
txn,
"current_state_events",
{
"event_id": s.event_id,
"room_id": s.room_id,
"type": s.type,
"state_key": s.state_key,
},
or_replace=True,
)
if event.is_state() and is_new_state:
if not backfilled and not context.rejected:
self._simple_insert_txn(
txn,
table="state_forward_extremities",
values={
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
},
or_replace=True,
)
for prev_state_id, _ in event.prev_state:
self._simple_delete_txn(
txn,
table="state_forward_extremities",
keyvalues={
"event_id": prev_state_id,
}
)
outlier = event.internal_metadata.is_outlier()
if not outlier:
self._store_state_groups_txn(txn, event, context)
self._update_min_depth_for_room_txn(
txn,
event.room_id,
event.depth
)
self._handle_prev_events(
txn,
outlier=outlier,
event_id=event.event_id,
prev_events=event.prev_events,
room_id=event.room_id,
)
have_persisted = self._simple_select_one_onecol_txn(
txn,
table="event_json",
keyvalues={"event_id": event.event_id},
retcol="event_id",
allow_none=True,
)
metadata_json = encode_canonical_json(
event.internal_metadata.get_dict()
)
# If we have already persisted this event, we don't need to do any
# more processing.
# The processing above must be done on every call to persist event,
# since they might not have happened on previous calls. For example,
# if we are persisting an event that we had persisted as an outlier,
# but is no longer one.
if have_persisted:
if not outlier:
sql = (
"UPDATE event_json SET internal_metadata = ?"
" WHERE event_id = ?"
)
txn.execute(
sql,
(metadata_json.decode("UTF-8"), event.event_id,)
)
sql = (
"UPDATE events SET outlier = 0"
" WHERE event_id = ?"
)
txn.execute(
sql,
(event.event_id,)
)
return
if event.type == EventTypes.Member:
self._store_room_member_txn(txn, event)
elif event.type == EventTypes.Feedback:
self._store_feedback_txn(txn, event)
elif event.type == EventTypes.Name:
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Redaction:
self._store_redaction(txn, event)
2014-12-01 17:21:17 +01:00
event_dict = {
k: v
for k, v in event.get_dict().items()
2014-12-01 17:21:17 +01:00
if k not in [
"redacted",
"redacted_because",
]
}
self._simple_insert_txn(
txn,
table="event_json",
values={
"event_id": event.event_id,
"room_id": event.room_id,
2014-12-16 14:17:09 +01:00
"internal_metadata": metadata_json.decode("UTF-8"),
2014-12-12 15:53:37 +01:00
"json": encode_canonical_json(event_dict).decode("UTF-8"),
2014-12-01 17:21:17 +01:00
},
or_replace=True,
)
content = encode_canonical_json(
event.content
).decode("UTF-8")
vals = {
"topological_ordering": event.depth,
"event_id": event.event_id,
2014-08-14 19:40:50 +02:00
"type": event.type,
"room_id": event.room_id,
"content": content,
"processed": True,
"outlier": outlier,
"depth": event.depth,
}
if stream_ordering is not None:
vals["stream_ordering"] = stream_ordering
2014-08-15 17:17:36 +02:00
unrec = {
k: v
for k, v in event.get_dict().items()
if k not in vals.keys() and k not in [
2014-11-03 12:34:49 +01:00
"redacted",
"redacted_because",
"signatures",
"hashes",
"prev_events",
]
2014-08-15 17:17:36 +02:00
}
vals["unrecognized_keys"] = encode_canonical_json(
unrec
).decode("UTF-8")
2014-08-19 15:20:03 +02:00
try:
self._simple_insert_txn(
txn,
"events",
vals,
or_replace=(not outlier),
or_ignore=bool(outlier),
)
2014-08-19 15:20:03 +02:00
except:
logger.warn(
2014-08-20 16:53:07 +02:00
"Failed to persist, probably duplicate: %s",
event.event_id,
exc_info=True,
2014-08-20 16:53:07 +02:00
)
raise _RollbackButIsFineException("_persist_event")
if context.rejected:
self._store_rejections_txn(txn, event.event_id, context.rejected)
if event.is_state():
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
2014-08-14 19:40:50 +02:00
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
self._simple_insert_txn(
txn,
"state_events",
vals,
or_replace=True,
)
if is_new_state and not context.rejected:
self._simple_insert_txn(
txn,
"current_state_events",
{
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
},
or_replace=True,
)
for e_id, h in event.prev_state:
self._simple_insert_txn(
txn,
table="event_edges",
values={
"event_id": event.event_id,
"prev_event_id": e_id,
"room_id": event.room_id,
"is_state": 1,
},
or_ignore=True,
)
2014-10-31 18:08:52 +01:00
for hash_alg, hash_base64 in event.hashes.items():
hash_bytes = decode_base64(hash_base64)
self._store_event_content_hash_txn(
txn, event.event_id, hash_alg, hash_bytes,
)
for prev_event_id, prev_hashes in event.prev_events:
for alg, hash_base64 in prev_hashes.items():
hash_bytes = decode_base64(hash_base64)
self._store_prev_event_hash_txn(
txn, event.event_id, prev_event_id, alg, hash_bytes
)
2014-11-06 19:42:18 +01:00
for auth_id, _ in event.auth_events:
self._simple_insert_txn(
txn,
table="event_auth",
values={
"event_id": event.event_id,
"room_id": event.room_id,
"auth_id": auth_id,
},
2014-11-07 11:42:44 +01:00
or_ignore=True,
2014-11-06 19:42:18 +01:00
)
2014-10-31 16:35:39 +01:00
(ref_alg, ref_hash_bytes) = compute_event_reference_hash(event)
self._store_event_reference_hash_txn(
txn, event.event_id, ref_alg, ref_hash_bytes
)
2014-09-24 16:27:59 +02:00
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
self._get_event_cache.pop(event.redacts)
2014-09-24 15:18:08 +02:00
txn.execute(
2014-09-24 16:27:59 +02:00
"INSERT OR IGNORE INTO redactions "
"(event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts)
2014-09-24 15:18:08 +02:00
)
@defer.inlineCallbacks
2014-08-14 19:40:50 +02:00
def get_current_state(self, room_id, event_type=None, state_key=""):
del_sql = (
2014-09-25 16:51:21 +02:00
"SELECT event_id FROM redactions WHERE redacts = e.event_id "
"LIMIT 1"
)
sql = (
2014-09-24 16:27:59 +02:00
"SELECT e.*, (%(redacted)s) AS redacted FROM events as e "
2014-08-14 19:40:50 +02:00
"INNER JOIN current_state_events as c ON e.event_id = c.event_id "
"INNER JOIN state_events as s ON e.event_id = s.event_id "
"WHERE c.room_id = ? "
) % {
2014-09-24 16:27:59 +02:00
"redacted": del_sql,
}
if event_type and state_key is not None:
2014-08-14 19:40:50 +02:00
sql += " AND s.type = ? AND s.state_key = ? "
args = (room_id, event_type, state_key)
elif event_type:
sql += " AND s.type = ?"
args = (room_id, event_type)
2014-08-12 16:10:52 +02:00
else:
args = (room_id, )
2014-08-14 17:58:51 +02:00
results = yield self._execute_and_decode(sql, *args)
events = yield self._parse_events(results)
defer.returnValue(events)
2014-08-12 16:10:52 +02:00
@defer.inlineCallbacks
def get_room_name_and_aliases(self, room_id):
del_sql = (
"SELECT event_id FROM redactions WHERE redacts = e.event_id "
"LIMIT 1"
)
sql = (
"SELECT e.*, (%(redacted)s) AS redacted FROM events as e "
"INNER JOIN current_state_events as c ON e.event_id = c.event_id "
"INNER JOIN state_events as s ON e.event_id = s.event_id "
"WHERE c.room_id = ? "
) % {
"redacted": del_sql,
}
sql += " AND ((s.type = 'm.room.name' AND s.state_key = '')"
sql += " OR s.type = 'm.room.aliases')"
args = (room_id,)
results = yield self._execute_and_decode(sql, *args)
events = yield self._parse_events(results)
name = None
aliases = []
for e in events:
if e.type == 'm.room.name':
name = e.content['name']
elif e.type == 'm.room.aliases':
aliases.extend(e.content['aliases'])
defer.returnValue((name, aliases))
@defer.inlineCallbacks
def _get_min_token(self):
row = yield self._execute(
None,
2014-08-19 15:20:03 +02:00
"SELECT MIN(stream_ordering) FROM events"
)
self.min_token = row[0][0] if row and row[0] and row[0][0] else -1
self.min_token = min(self.min_token, -1)
2014-08-19 15:20:03 +02:00
logger.debug("min_token is: %s", self.min_token)
defer.returnValue(self.min_token)
2014-09-29 15:59:52 +02:00
def insert_client_ip(self, user, access_token, device_id, ip, user_agent):
return self._simple_insert(
"user_ips",
{
"user": user.to_string(),
"access_token": access_token,
2014-09-29 15:59:52 +02:00
"device_id": device_id,
"ip": ip,
"user_agent": user_agent,
2014-09-29 15:59:52 +02:00
"last_seen": int(self._clock.time_msec()),
}
)
2014-09-29 15:59:52 +02:00
def get_user_ip_and_agents(self, user):
return self._simple_select_list(
table="user_ips",
keyvalues={"user": user.to_string()},
retcols=[
"device_id", "access_token", "ip", "user_agent", "last_seen"
],
)
2015-01-23 16:42:52 +01:00
def have_events(self, event_ids):
"""Given a list of event ids, check if we have already processed them.
Returns:
dict: Has an entry for each event id we already have seen. Maps to
the rejected reason string if we rejected the event, else maps to
None.
"""
if not event_ids:
return defer.succeed({})
2015-01-23 16:42:52 +01:00
def f(txn):
sql = (
"SELECT e.event_id, reason FROM events as e "
"LEFT JOIN rejections as r ON e.event_id = r.event_id "
"WHERE e.event_id = ?"
2015-01-23 16:42:52 +01:00
)
res = {}
for event_id in event_ids:
txn.execute(sql, (event_id,))
row = txn.fetchone()
if row:
_, rejected = row
res[event_id] = rejected
return res
return self.runInteraction(
"have_events", f,
)
def read_schema(path):
2014-08-12 16:10:52 +02:00
""" Read the named database schema.
Args:
path: Path of the database schema.
2014-08-12 16:10:52 +02:00
Returns:
A string containing the database schema.
"""
with open(path) as schema_file:
2014-08-12 16:10:52 +02:00
return schema_file.read()
class PrepareDatabaseException(Exception):
pass
class UpgradeDatabaseException(PrepareDatabaseException):
pass
def prepare_database(db_conn):
"""Prepares a database for usage. Will either create all necessary tables
or upgrade from an older schema version.
"""
cur = db_conn.cursor()
version_info = get_or_create_schema_state(cur)
if version_info:
user_version, delta_files, upgraded = version_info
_upgrade_existing_database(cur, user_version, delta_files, upgraded)
else:
_setup_new_database(cur)
cur.execute("PRAGMA user_version = %d" % (SCHEMA_VERSION,))
db_conn.commit()
cur.close()
def _setup_new_database(cur):
current_dir = os.path.join(dir_path, "schema", "current")
directory_entries = os.listdir(current_dir)
valid_dirs = []
pattern = re.compile(r"^\d+(\.sql)?$")
for filename in directory_entries:
match = pattern.match(filename)
abs_path = os.path.join(current_dir, filename)
if match and os.path.isdir(abs_path):
ver = int(match.group(0))
if ver < SCHEMA_VERSION:
valid_dirs.append((ver, abs_path))
if not valid_dirs:
raise RuntimeError("Could not find a suitable current.sql")
max_current_ver, sql_dir = max(valid_dirs, key=lambda x: x[0])
logger.debug("Initialising schema v%d", max_current_ver)
directory_entries = os.listdir(sql_dir)
sql_script = "BEGIN TRANSACTION;\n"
for filename in fnmatch.filter(directory_entries, "*.sql"):
sql_loc = os.path.join(sql_dir, filename)
logger.debug("Applying schema %r", sql_loc)
sql_script += read_schema(sql_loc)
sql_script += "\n"
sql_script += "COMMIT TRANSACTION;"
cur.executescript(sql_script)
cur.execute(
"INSERT INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(max_current_ver, False)
)
_upgrade_existing_database(
cur,
current_version=max_current_ver,
delta_files=[],
upgraded=False
)
def _upgrade_existing_database(cur, current_version, delta_files, upgraded):
"""Upgrades an existing database.
Delta files can either be SQL stored in *.sql files, or python modules
in *.py.
There can be multiple delta files per version. Synapse will keep track of
which delta files have been applied, and will apply any that haven't been
even if there has been no version bump. This is useful for development
where orthogonal schema changes may happen on separate branches.
Args:
cur (Cursor)
current_version (int): The current version of the schema
delta_files (list): A list of deltas that have already been applied
upgraded (bool): Whether the current version was generated by having
applied deltas or from full schema file. If `True` the function
will never apply delta files for the given `current_version`, since
the current_version wasn't generated by applying those delta files.
"""
if current_version > SCHEMA_VERSION:
raise ValueError(
"Cannot use this database as it is too " +
"new for the server to understand"
)
start_ver = current_version
if not upgraded:
start_ver += 1
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.debug("Upgrading schema to v%d", v)
delta_dir = os.path.join(dir_path, "schema", "delta", str(v))
try:
directory_entries = os.listdir(delta_dir)
except OSError:
logger.exception("Could not open delta dir for version %d", v)
raise
directory_entries.sort()
for file_name in directory_entries:
relative_path = os.path.join(str(v), file_name)
if relative_path in delta_files:
continue
absolute_path = os.path.join(
dir_path, "schema", "delta", relative_path,
)
root_name, ext = os.path.splitext(file_name)
if ext == ".py":
module_name = "synapse.storage.v%d_%s" % (
v, root_name
)
with open(absolute_path) as schema_file:
module = imp.load_source(
module_name, absolute_path, schema_file
)
logger.debug("Running script %s", relative_path)
module.run_upgrade(cur)
elif ext == ".sql":
with open(absolute_path) as schema_file:
delta_schema = schema_file.read()
logger.debug("Applying schema %s", relative_path)
cur.executescript(delta_schema)
else:
# Not a valid delta file.
logger.warn(
"Found directory entry that did not end in .py or"
" .sql: %s",
relative_path,
)
continue
# Mark as done.
cur.execute(
"INSERT INTO schema_deltas (version, file)"
" VALUES (?,?)",
(v, relative_path)
)
cur.execute(
"INSERT INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(v, True)
)
def get_or_create_schema_state(txn):
schema_path = os.path.join(
dir_path, "schema", "schema_version.sql",
)
create_schema = read_schema(schema_path)
txn.executescript(create_schema)
txn.execute("SELECT version, upgraded FROM schema_version")
row = txn.fetchone()
current_version = int(row[0]) if row else None
upgraded = bool(row[1]) if row else None
if current_version:
txn.execute(
"SELECT file FROM schema_deltas WHERE version >= ?",
(current_version,)
)
res = txn.fetchall()
return current_version, txn.fetchall(), upgraded
return None
def prepare_sqlite3_database(db_conn):
"""This function should be called before `prepare_database` on sqlite3
databases.
Since we changed the way we store the current schema version and handle
updates to schemas, we need a way to upgrade from the old method to the
new. This only affects sqlite databases since they were the only ones
supported at the time.
"""
with db_conn:
schema_path = os.path.join(
dir_path, "schema", "schema_version.sql",
)
create_schema = read_schema(schema_path)
db_conn.executescript(create_schema)
c = db_conn.execute("SELECT * FROM schema_version")
rows = c.fetchall()
c.close()
if not rows:
c = db_conn.execute("PRAGMA user_version")
row = c.fetchone()
c.close()
if row and row[0]:
ver = row[0]
db_conn.execute(
"INSERT INTO schema_version (version, upgraded)"
" VALUES (?,?)",
(row[0], False)
)