Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes

pull/2587/head
Erik Johnston 2017-05-03 10:54:24 +01:00
commit dfc7bf2e84
4 changed files with 39 additions and 30 deletions

View File

@ -171,6 +171,16 @@ class FederationHandler(BaseHandler):
yield self._get_missing_events_for_pdu(
origin, pdu, prevs, min_depth
)
# Update the set of things we've seen after trying to
# fetch the missing stuff
have_seen = yield self.store.have_events(prevs)
seen = set(have_seen.iterkeys())
if not prevs - seen:
logger.info(
"Found all missing prev events for %s", pdu.event_id
)
elif prevs - seen:
logger.info(
"Not fetching %d missing events for room %r,event %s: %r...",
@ -178,8 +188,6 @@ class FederationHandler(BaseHandler):
list(prevs - seen)[:5],
)
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if prevs - seen:
logger.info(
"Still missing %d events for room %r: %r...",
@ -214,19 +222,15 @@ class FederationHandler(BaseHandler):
Args:
origin (str): Origin of the pdu. Will be called to get the missing events
pdu: received pdu
prevs (str[]): List of event ids which we are missing
prevs (set(str)): List of event ids which we are missing
min_depth (int): Minimum depth of events to return.
Returns:
Deferred<dict(str, str?)>: updated have_seen dictionary
"""
# We recalculate seen, since it may have changed.
have_seen = yield self.store.have_events(prevs)
seen = set(have_seen.keys())
if not prevs - seen:
# nothing left to do
defer.returnValue(have_seen)
return
latest = yield self.store.get_latest_event_ids_in_room(
pdu.room_id
@ -288,19 +292,6 @@ class FederationHandler(BaseHandler):
get_missing=False
)
have_seen = yield self.store.have_events(
[ev for ev, _ in pdu.prev_events]
)
seen = set(have_seen.keys())
if prevs - seen:
logger.info(
"Still missing %d prev events for %s: %r...",
len(prevs - seen), pdu.event_id, list(prevs - seen)[:5]
)
else:
logger.info("Found all missing prev events for %s", pdu.event_id)
defer.returnValue(have_seen)
@log_function
@defer.inlineCallbacks
def _process_received_pdu(self, origin, pdu, state, auth_chain):

View File

@ -60,12 +60,12 @@ class LoggingTransaction(object):
object.__setattr__(self, "database_engine", database_engine)
object.__setattr__(self, "after_callbacks", after_callbacks)
def call_after(self, callback, *args):
def call_after(self, callback, *args, **kwargs):
"""Call the given callback on the main twisted thread after the
transaction has finished. Used to invalidate the caches on the
correct thread.
"""
self.after_callbacks.append((callback, args))
self.after_callbacks.append((callback, args, kwargs))
def __getattr__(self, name):
return getattr(self.txn, name)
@ -319,8 +319,8 @@ class SQLBaseStore(object):
inner_func, *args, **kwargs
)
finally:
for after_callback, after_args in after_callbacks:
after_callback(*after_args)
for after_callback, after_args, after_kwargs in after_callbacks:
after_callback(*after_args, **after_kwargs)
defer.returnValue(result)
@defer.inlineCallbacks

View File

@ -374,6 +374,12 @@ class EventsStore(SQLBaseStore):
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc_by(len(chunk))
for room_id, (_, _, new_state) in current_state_for_room.iteritems():
self.get_current_state_ids.prefill(
(room_id, ), new_state
)
for event, context in chunk:
if context.app_service:
origin_type = "local"
@ -435,10 +441,10 @@ class EventsStore(SQLBaseStore):
Assumes that we are only persisting events for one room at a time.
Returns:
2-tuple (to_delete, to_insert) where both are state dicts, i.e.
(type, state_key) -> event_id. `to_delete` are the entries to
3-tuple (to_delete, to_insert, new_state) where both are state dicts,
i.e. (type, state_key) -> event_id. `to_delete` are the entries to
first be deleted from current_state_events, `to_insert` are entries
to insert.
to insert. `new_state` is the full set of state.
May return None if there are no changes to be applied.
"""
# Now we need to work out the different state sets for
@ -545,7 +551,7 @@ class EventsStore(SQLBaseStore):
if ev_id in events_to_insert
}
defer.returnValue((to_delete, to_insert))
defer.returnValue((to_delete, to_insert, current_state))
@defer.inlineCallbacks
def get_event(self, event_id, check_redacted=True,
@ -698,7 +704,7 @@ class EventsStore(SQLBaseStore):
def _update_current_state_txn(self, txn, state_delta_by_room):
for room_id, current_state_tuple in state_delta_by_room.iteritems():
to_delete, to_insert = current_state_tuple
to_delete, to_insert, _ = current_state_tuple
txn.executemany(
"DELETE FROM current_state_events WHERE event_id = ?",
[(ev_id,) for ev_id in to_delete.itervalues()],

View File

@ -227,6 +227,18 @@ class StateStore(SQLBaseStore):
],
)
# Prefill the state group cache with this group.
# It's fine to use the sequence like this as the state group map
# is immutable. (If the map wasn't immutable then this prefill could
# race with another update)
txn.call_after(
self._state_group_cache.update,
self._state_group_cache.sequence,
key=context.state_group,
value=context.current_state_ids,
full=True,
)
self._simple_insert_many_txn(
txn,
table="event_to_state_groups",