Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes

pull/2587/head
Erik Johnston 2017-05-03 09:49:25 +01:00
commit 02928332d5
21 changed files with 179 additions and 90 deletions

View File

@ -354,8 +354,11 @@ ArchLinux
---------
The quickest way to get up and running with ArchLinux is probably with the community package
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in all
the necessary dependencies.
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
the necessary dependencies. If the default web client is to be served (enabled by default in
the generated config),
https://www.archlinux.org/packages/community/any/python2-matrix-angular-sdk/ will also need to
be installed.
Alternatively, to install using pip a few changes may be needed as ArchLinux
defaults to python 3, but synapse currently assumes python 2.7 by default:

View File

@ -25,7 +25,7 @@ import synapse.config.logger
from synapse.config._base import ConfigError
from synapse.python_dependencies import (
check_requirements, DEPENDENCY_LINKS
check_requirements, CONDITIONAL_REQUIREMENTS
)
from synapse.rest import ClientRestResource
@ -92,7 +92,7 @@ def build_resource_for_web_client(hs):
"\n"
"You can also disable hosting of the webclient via the\n"
"configuration option `web_client`\n"
% {"dep": DEPENDENCY_LINKS["matrix-angular-sdk"]}
% {"dep": CONDITIONAL_REQUIREMENTS["web_client"].keys()[0]}
)
syweb_path = os.path.dirname(syweb.__file__)
webclient_path = os.path.join(syweb_path, "webclient")

View File

@ -234,6 +234,9 @@ def main():
if action == "start" or action == "restart":
if start_stop_synapse:
# Check if synapse is already running
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
abort("synapse.app.homeserver already running")
start(configfile)
for worker in workers:

View File

@ -144,6 +144,12 @@ class ServerConfig(Config):
# Whether to serve a web client from the HTTP/HTTPS root resource.
web_client: True
# The root directory to server for the above web client.
# If left undefined, synapse will serve the matrix-angular-sdk web client.
# Make sure matrix-angular-sdk is installed with pip if web_client is True
# and web_client_location is undefined
# web_client_location: "/path/to/web/root"
# The public-facing base URL for the client API (not including _matrix/...)
# public_baseurl: https://example.com:8448/

View File

@ -50,6 +50,7 @@ class EventContext(object):
"prev_group",
"delta_ids",
"prev_state_events",
"app_service",
]
def __init__(self):
@ -68,3 +69,5 @@ class EventContext(object):
self.delta_ids = None
self.prev_state_events = None
self.app_service = None

View File

@ -225,7 +225,22 @@ def format_event_for_client_v2_without_room_id(d):
def serialize_event(e, time_now_ms, as_client_event=True,
event_format=format_event_for_client_v1,
token_id=None, only_event_fields=None):
token_id=None, only_event_fields=None, is_invite=False):
"""Serialize event for clients
Args:
e (EventBase)
time_now_ms (int)
as_client_event (bool)
event_format
token_id
only_event_fields
is_invite (bool): Whether this is an invite that is being sent to the
invitee
Returns:
dict
"""
# FIXME(erikj): To handle the case of presence events and the like
if not isinstance(e, EventBase):
return e
@ -251,6 +266,12 @@ def serialize_event(e, time_now_ms, as_client_event=True,
if txn_id is not None:
d["unsigned"]["transaction_id"] = txn_id
# If this is an invite for somebody else, then we don't care about the
# invite_room_state as that's meant solely for the invitee. Other clients
# will already have the state since they're in the room.
if not is_invite:
d["unsigned"].pop("invite_room_state", None)
if as_client_event:
d = event_format(d)

View File

@ -171,6 +171,12 @@ class FederationHandler(BaseHandler):
yield self._get_missing_events_for_pdu(
origin, pdu, prevs, min_depth
)
elif prevs - seen:
logger.info(
"Not fetching %d missing events for room %r,event %s: %r...",
len(prevs - seen), pdu.room_id, pdu.event_id,
list(prevs - seen)[:5],
)
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
@ -232,8 +238,8 @@ class FederationHandler(BaseHandler):
latest |= seen
logger.info(
"Missing %d events for room %r: %r...",
len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
"Missing %d events for room %r pdu %s: %r...",
len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5]
)
# XXX: we set timeout to 10s to help workaround
@ -265,11 +271,17 @@ class FederationHandler(BaseHandler):
timeout=10000,
)
logger.info(
"Got %d events: %r...",
len(missing_events), [e.event_id for e in missing_events[:5]]
)
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for e in missing_events:
logger.info("Handling found event %s", e.event_id)
yield self.on_receive_pdu(
origin,
e,
@ -279,6 +291,14 @@ class FederationHandler(BaseHandler):
have_seen = yield self.store.have_events(
[ev for ev, _ in pdu.prev_events]
)
seen = set(have_seen.keys())
if prevs - seen:
logger.info(
"Still missing %d prev events for %s: %r...",
len(prevs - seen), pdu.event_id, list(prevs - seen)[:5]
)
else:
logger.info("Found all missing prev events for %s", pdu.event_id)
defer.returnValue(have_seen)
@log_function

View File

@ -175,7 +175,8 @@ class MessageHandler(BaseHandler):
defer.returnValue(chunk)
@defer.inlineCallbacks
def create_event(self, event_dict, token_id=None, txn_id=None, prev_event_ids=None):
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
prev_event_ids=None):
"""
Given a dict from a client, create a new event.
@ -185,6 +186,7 @@ class MessageHandler(BaseHandler):
Adds display names to Join membership events.
Args:
requester
event_dict (dict): An entire event
token_id (str)
txn_id (str)
@ -226,6 +228,7 @@ class MessageHandler(BaseHandler):
event, context = yield self._create_new_client_event(
builder=builder,
requester=requester,
prev_event_ids=prev_event_ids,
)
@ -319,6 +322,7 @@ class MessageHandler(BaseHandler):
See self.create_event and self.send_nonmember_event.
"""
event, context = yield self.create_event(
requester,
event_dict,
token_id=requester.access_token_id,
txn_id=txn_id
@ -416,7 +420,7 @@ class MessageHandler(BaseHandler):
@measure_func("_create_new_client_event")
@defer.inlineCallbacks
def _create_new_client_event(self, builder, prev_event_ids=None):
def _create_new_client_event(self, builder, requester=None, prev_event_ids=None):
if prev_event_ids:
prev_events = yield self.store.add_event_hashes(prev_event_ids)
prev_max_depth = yield self.store.get_max_depth_of_events(prev_event_ids)
@ -456,6 +460,8 @@ class MessageHandler(BaseHandler):
state_handler = self.state_handler
context = yield state_handler.compute_event_context(builder)
if requester:
context.app_service = requester.app_service
if builder.is_state():
builder.prev_state = yield self.store.add_event_hashes(
@ -531,9 +537,9 @@ class MessageHandler(BaseHandler):
state_to_include_ids = [
e_id
for k, e_id in context.current_state_ids.items()
for k, e_id in context.current_state_ids.iteritems()
if k[0] in self.hs.config.room_invite_state_types
or k[0] == EventTypes.Member and k[1] == event.sender
or k == (EventTypes.Member, event.sender)
]
state_to_include = yield self.store.get_events(state_to_include_ids)
@ -545,7 +551,7 @@ class MessageHandler(BaseHandler):
"content": e.content,
"sender": e.sender,
}
for e in state_to_include.values()
for e in state_to_include.itervalues()
]
invitee = UserID.from_string(event.state_key)
@ -618,6 +624,3 @@ class MessageHandler(BaseHandler):
)
preserve_fn(_notify)()
# If invite, remove room_state from unsigned before sending.
event.unsigned.pop("invite_room_state", None)

View File

@ -70,6 +70,7 @@ class RoomMemberHandler(BaseHandler):
content["kind"] = "guest"
event, context = yield msg_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,

View File

@ -20,7 +20,6 @@ from twisted.internet import defer
from .push_rule_evaluator import PushRuleEvaluatorForEvent
from synapse.api.constants import EventTypes
from synapse.visibility import filter_events_for_clients_context
logger = logging.getLogger(__name__)
@ -67,17 +66,6 @@ class BulkPushRuleEvaluator:
def action_for_event_by_user(self, event, context):
actions_by_user = {}
# None of these users can be peeking since this list of users comes
# from the set of users in the room, so we know for sure they're all
# actually in the room.
user_tuples = [
(u, False) for u in self.rules_by_user.keys()
]
filtered_by_user = yield filter_events_for_clients_context(
self.store, user_tuples, [event], {event.event_id: context}
)
room_members = yield self.store.get_joined_users_from_context(
event, context
)
@ -87,6 +75,14 @@ class BulkPushRuleEvaluator:
condition_cache = {}
for uid, rules in self.rules_by_user.items():
if event.sender == uid:
continue
if not event.is_state():
is_ignored = yield self.store.is_ignored_by(event.sender, uid)
if is_ignored:
continue
display_name = None
profile_info = room_members.get(uid)
if profile_info:
@ -98,13 +94,6 @@ class BulkPushRuleEvaluator:
if event.type == EventTypes.Member and event.state_key == uid:
display_name = event.content.get("displayname", None)
filtered = filtered_by_user[uid]
if len(filtered) == 0:
continue
if filtered[0].sender == uid:
continue
for rule in rules:
if 'enabled' in rule and not rule['enabled']:
continue

View File

@ -39,6 +39,7 @@ class ClientDirectoryServer(ClientV1RestServlet):
def __init__(self, hs):
super(ClientDirectoryServer, self).__init__(hs)
self.store = hs.get_datastore()
self.handlers = hs.get_handlers()
@defer.inlineCallbacks
@ -70,7 +71,10 @@ class ClientDirectoryServer(ClientV1RestServlet):
logger.debug("Got servers: %s", servers)
# TODO(erikj): Check types.
# TODO(erikj): Check that room exists
room = yield self.store.get_room(room_id)
if room is None:
raise SynapseError(400, "Room does not exist")
dir_handler = self.handlers.directory_handler

View File

@ -164,6 +164,7 @@ class RoomStateEventRestServlet(ClientV1RestServlet):
else:
msg_handler = self.handlers.message_handler
event, context = yield msg_handler.create_event(
requester,
event_dict,
token_id=requester.access_token_id,
txn_id=txn_id,

View File

@ -253,6 +253,7 @@ class SyncRestServlet(RestServlet):
invite = serialize_event(
room.invite, time_now, token_id=token_id,
event_format=format_event_for_client_v2_without_room_id,
is_invite=True,
)
unsigned = dict(invite.get("unsigned", {}))
invite["unsigned"] = unsigned

View File

@ -308,3 +308,16 @@ class AccountDataStore(SQLBaseStore):
" WHERE stream_id < ?"
)
txn.execute(update_max_id_sql, (next_id, next_id))
@cachedInlineCallbacks(num_args=2, cache_context=True, max_entries=5000)
def is_ignored_by(self, ignored_user_id, ignorer_user_id, cache_context):
ignored_account_data = yield self.get_global_account_data_by_type_for_user(
"m.ignored_user_list", ignorer_user_id,
on_invalidate=cache_context.invalidate,
)
if not ignored_account_data:
defer.returnValue(False)
defer.returnValue(
ignored_user_id in ignored_account_data.get("ignored_users", {})
)

View File

@ -228,46 +228,69 @@ class BackgroundUpdateStore(SQLBaseStore):
columns (list[str]): columns/expressions to include in index
"""
# if this is postgres, we add the indexes concurrently. Otherwise
# we fall back to doing it inline
if isinstance(self.database_engine, engines.PostgresEngine):
conc = True
else:
conc = False
# We don't use partial indices on SQLite as it wasn't introduced
# until 3.8, and wheezy has 3.7
where_clause = None
sql = (
"CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)"
" %(where_clause)s"
) % {
"conc": "CONCURRENTLY" if conc else "",
"name": index_name,
"table": table,
"columns": ", ".join(columns),
"where_clause": "WHERE " + where_clause if where_clause else ""
}
def create_index_concurrently(conn):
def create_index_psql(conn):
conn.rollback()
# postgres insists on autocommit for the index
conn.set_session(autocommit=True)
c = conn.cursor()
c.execute(sql)
conn.set_session(autocommit=False)
def create_index(conn):
try:
c = conn.cursor()
# If a previous attempt to create the index was interrupted,
# we may already have a half-built index. Let's just drop it
# before trying to create it again.
sql = "DROP INDEX IF EXISTS %s" % (index_name,)
logger.debug("[SQL] %s", sql)
c.execute(sql)
sql = (
"CREATE INDEX CONCURRENTLY %(name)s ON %(table)s"
" (%(columns)s) %(where_clause)s"
) % {
"name": index_name,
"table": table,
"columns": ", ".join(columns),
"where_clause": "WHERE " + where_clause if where_clause else ""
}
logger.debug("[SQL] %s", sql)
c.execute(sql)
finally:
conn.set_session(autocommit=False)
def create_index_sqlite(conn):
# Sqlite doesn't support concurrent creation of indexes.
#
# We don't use partial indices on SQLite as it wasn't introduced
# until 3.8, and wheezy has 3.7
#
# We assume that sqlite doesn't give us invalid indices; however
# we may still end up with the index existing but the
# background_updates not having been recorded if synapse got shut
# down at the wrong moment - hance we use IF NOT EXISTS. (SQLite
# has supported CREATE TABLE|INDEX IF NOT EXISTS since 3.3.0.)
sql = (
"CREATE INDEX IF NOT EXISTS %(name)s ON %(table)s"
" (%(columns)s)"
) % {
"name": index_name,
"table": table,
"columns": ", ".join(columns),
}
c = conn.cursor()
logger.debug("[SQL] %s", sql)
c.execute(sql)
if isinstance(self.database_engine, engines.PostgresEngine):
runner = create_index_psql
else:
runner = create_index_sqlite
@defer.inlineCallbacks
def updater(progress, batch_size):
logger.info("Adding index %s to %s", index_name, table)
if conc:
yield self.runWithConnection(create_index_concurrently)
else:
yield self.runWithConnection(create_index)
yield self.runWithConnection(runner)
yield self._end_background_update(update_name)
defer.returnValue(1)

View File

@ -29,6 +29,7 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.state import resolve_events
from synapse.util.caches.descriptors import cached
from synapse.types import get_domain_from_id
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple, OrderedDict
@ -49,6 +50,9 @@ logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
persist_event_counter = metrics.register_counter("persisted_events")
event_counter = metrics.register_counter(
"persisted_events_sep", labels=["type", "origin_type", "origin_entity"]
)
def encode_json(json_object):
@ -370,6 +374,18 @@ class EventsStore(SQLBaseStore):
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc_by(len(chunk))
for event, context in chunk:
if context.app_service:
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
origin_entity = get_domain_from_id(event.sender)
event_counter.inc(event.type, origin_type, origin_entity)
@defer.inlineCallbacks
def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids):

View File

@ -188,7 +188,7 @@ class PushRuleStore(SQLBaseStore):
user_ids, on_invalidate=cache_context.invalidate,
)
rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
rules_by_user = {k: v for k, v in rules_by_user.iteritems() if v is not None}
defer.returnValue(rules_by_user)
@ -398,7 +398,8 @@ class PushRuleStore(SQLBaseStore):
with self._push_rules_stream_id_gen.get_next() as ids:
stream_id, event_stream_ordering = ids
yield self.runInteraction(
"delete_push_rule", delete_push_rule_txn, stream_id, event_stream_ordering
"delete_push_rule", delete_push_rule_txn, stream_id,
event_stream_ordering,
)
@defer.inlineCallbacks

View File

@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
class DeferredTimedOutError(SynapseError):
def __init__(self):
super(SynapseError, self).__init__(504, "Timed out")
super(DeferredTimedOutError, self).__init__(504, "Timed out")
def unwrapFirstError(failure):

View File

@ -188,25 +188,6 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state):
})
@defer.inlineCallbacks
def filter_events_for_clients_context(store, user_tuples, events, event_id_to_context):
user_ids = set(u[0] for u in user_tuples)
event_id_to_state = {}
for event_id, context in event_id_to_context.items():
state = yield store.get_events([
e_id
for key, e_id in context.current_state_ids.iteritems()
if key == (EventTypes.RoomHistoryVisibility, "")
or (key[0] == EventTypes.Member and key[1] in user_ids)
])
event_id_to_state[event_id] = state
res = yield filter_events_for_clients(
store, user_tuples, events, event_id_to_state
)
defer.returnValue(res)
@defer.inlineCallbacks
def filter_events_for_client(store, user_id, events, is_peeking=False):
"""

View File

@ -27,10 +27,10 @@ class EventInjector:
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
def create_room(self, room):
def create_room(self, room, user):
builder = self.event_builder_factory.new({
"type": EventTypes.Create,
"sender": "",
"sender": user.to_string(),
"room_id": room.to_string(),
"content": {},
})

View File

@ -50,7 +50,7 @@ class EventsStoreTestCase(unittest.TestCase):
# Create something to report
room = RoomID.from_string("!abc123:test")
user = UserID.from_string("@raccoonlover:test")
yield self.event_injector.create_room(room)
yield self.event_injector.create_room(room, user)
self.base_event = yield self._get_last_stream_token()