Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes
commit
ce32d1eed2
62
CHANGES.rst
62
CHANGES.rst
|
|
@ -1,3 +1,65 @@
|
|||
Changes in synapse v0.21.0-rc2 (2017-05-08)
|
||||
===========================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Always mark remotes as up if we receive a signed request from them (PR #2190)
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where users got pushed for rooms they had muted (PR #2200)
|
||||
|
||||
|
||||
Changes in synapse v0.21.0-rc1 (2017-05-08)
|
||||
===========================================
|
||||
|
||||
Features:
|
||||
|
||||
* Add username availability checker API (PR #2183)
|
||||
* Add read marker API (PR #2120)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Enable guest access for the 3pl/3pid APIs (PR #1986)
|
||||
* Add setting to support TURN for guests (PR #2011)
|
||||
* Various performance improvements (PR #2075, #2076, #2080, #2083, #2108,
|
||||
#2158, #2176, #2185)
|
||||
* Make synctl a bit more user friendly (PR #2078, #2127) Thanks @APwhitehat!
|
||||
* Replace HTTP replication with TCP replication (PR #2082, #2097, #2098,
|
||||
#2099, #2103, #2014, #2016, #2115, #2116, #2117)
|
||||
* Support authenticated SMTP (PR #2102) Thanks @DanielDent!
|
||||
* Add a counter metric for successfully-sent transactions (PR #2121)
|
||||
* Propagate errors sensibly from proxied IS requests (PR #2147)
|
||||
* Add more granular event send metrics (PR #2178)
|
||||
|
||||
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix nuke-room script to work with current schema (PR #1927) Thanks
|
||||
@zuckschwerdt!
|
||||
* Fix db port script to not assume postgres tables are in the public schema
|
||||
(PR #2024) Thanks @jerrykan!
|
||||
* Fix getting latest device IP for user with no devices (PR #2118)
|
||||
* Fix rejection of invites to unreachable servers (PR #2145)
|
||||
* Fix code for reporting old verify keys in synapse (PR #2156)
|
||||
* Fix invite state to always include all events (PR #2163)
|
||||
* Fix bug where synapse would always fetch state for any missing event (PR #2170)
|
||||
* Fix a leak with timed out HTTP connections (PR #2180)
|
||||
* Fix bug where we didn't time out HTTP requests to ASes (PR #2192)
|
||||
|
||||
|
||||
Docs:
|
||||
|
||||
* Clarify doc for SQLite to PostgreSQL port (PR #1961) Thanks @benhylau!
|
||||
* Fix typo in synctl help (PR #2107) Thanks @HarHarLinks!
|
||||
* ``web_client_location`` documentation fix (PR #2131) Thanks @matthewjwolff!
|
||||
* Update README.rst with FreeBSD changes (PR #2132) Thanks @feld!
|
||||
* Clarify setting up metrics (PR #2149) Thanks @encks!
|
||||
|
||||
|
||||
Changes in synapse v0.20.0 (2017-04-11)
|
||||
=======================================
|
||||
|
||||
|
|
|
|||
|
|
@ -16,4 +16,4 @@
|
|||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.20.0"
|
||||
__version__ = "0.21.0-rc2"
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ from synapse.http.servlet import (
|
|||
)
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
import functools
|
||||
|
|
@ -79,6 +80,7 @@ class Authenticator(object):
|
|||
def __init__(self, hs):
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
# A method just so we can pass 'self' as the authenticator to the Servlets
|
||||
@defer.inlineCallbacks
|
||||
|
|
@ -138,6 +140,13 @@ class Authenticator(object):
|
|||
logger.info("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
# alive
|
||||
retry_timings = yield self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
logger.info("Marking origin %r as up", origin)
|
||||
preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0)
|
||||
|
||||
defer.returnValue(origin)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from synapse.api.constants import EventTypes
|
|||
from synapse.util import stringutils
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.types import get_domain_from_id, RoomStreamToken
|
||||
from twisted.internet import defer
|
||||
|
|
@ -425,12 +426,38 @@ class DeviceListEduUpdater(object):
|
|||
# This can happen since we batch updates
|
||||
return
|
||||
|
||||
# Given a list of updates we check if we need to resync. This
|
||||
# happens if we've missed updates.
|
||||
resync = yield self._need_to_do_resync(user_id, pending_updates)
|
||||
|
||||
if resync:
|
||||
# Fetch all devices for the user.
|
||||
origin = get_domain_from_id(user_id)
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
try:
|
||||
result = yield self.federation.query_user_devices(origin, user_id)
|
||||
except NotRetryingDestination:
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn(
|
||||
"Failed to handle device list update for %s,"
|
||||
" we're not retrying the remote",
|
||||
user_id,
|
||||
)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
# is out of date. If we bail then we will retry the resync
|
||||
# next time we get a device list update for this user_id.
|
||||
# This makes it more likely that the device lists will
|
||||
# eventually become consistent.
|
||||
return
|
||||
except Exception:
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.exception(
|
||||
"Failed to handle device list update for %s", user_id
|
||||
)
|
||||
return
|
||||
|
||||
stream_id = result["stream_id"]
|
||||
devices = result["devices"]
|
||||
yield self.store.update_remote_device_list_cache(
|
||||
|
|
|
|||
|
|
@ -380,13 +380,6 @@ class FederationHandler(BaseHandler):
|
|||
affected=event.event_id,
|
||||
)
|
||||
|
||||
# if we're receiving valid events from an origin,
|
||||
# it's probably a good idea to mark it as not in retry-state
|
||||
# for sending (although this is a bit of a leap)
|
||||
retry_timings = yield self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
self.store.set_destination_retry_timings(origin, 0, 0)
|
||||
|
||||
room = yield self.store.get_room(event.room_id)
|
||||
|
||||
if not room:
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ from synapse.api.errors import (
|
|||
CodeMessageException, MatrixCodeMessageException, SynapseError, Codes,
|
||||
)
|
||||
from synapse.util.logcontext import preserve_context_over_fn
|
||||
from synapse.util import logcontext
|
||||
import synapse.metrics
|
||||
from synapse.http.endpoint import SpiderEndpoint
|
||||
|
||||
|
|
@ -72,39 +73,45 @@ class SimpleHttpClient(object):
|
|||
contextFactory=hs.get_http_client_context_factory()
|
||||
)
|
||||
self.user_agent = hs.version_string
|
||||
self.clock = hs.get_clock()
|
||||
if hs.config.user_agent_suffix:
|
||||
self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def request(self, method, uri, *args, **kwargs):
|
||||
# A small wrapper around self.agent.request() so we can easily attach
|
||||
# counters to it
|
||||
outgoing_requests_counter.inc(method)
|
||||
d = preserve_context_over_fn(
|
||||
self.agent.request,
|
||||
method, uri, *args, **kwargs
|
||||
)
|
||||
|
||||
def send_request():
|
||||
request_deferred = self.agent.request(
|
||||
method, uri, *args, **kwargs
|
||||
)
|
||||
|
||||
return self.clock.time_bound_deferred(
|
||||
request_deferred,
|
||||
time_out=60,
|
||||
)
|
||||
|
||||
logger.info("Sending request %s %s", method, uri)
|
||||
|
||||
def _cb(response):
|
||||
try:
|
||||
with logcontext.PreserveLoggingContext():
|
||||
response = yield send_request()
|
||||
|
||||
incoming_responses_counter.inc(method, response.code)
|
||||
logger.info(
|
||||
"Received response to %s %s: %s",
|
||||
method, uri, response.code
|
||||
)
|
||||
return response
|
||||
|
||||
def _eb(failure):
|
||||
defer.returnValue(response)
|
||||
except Exception as e:
|
||||
incoming_responses_counter.inc(method, "ERR")
|
||||
logger.info(
|
||||
"Error sending request to %s %s: %s %s",
|
||||
method, uri, failure.type, failure.getErrorMessage()
|
||||
method, uri, type(e).__name__, e.message
|
||||
)
|
||||
return failure
|
||||
|
||||
d.addCallbacks(_cb, _eb)
|
||||
|
||||
return d
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def post_urlencoded_get_json(self, uri, args={}):
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
|||
self.client_ip_last_seen = Cache(
|
||||
name="client_ip_last_seen",
|
||||
keylen=4,
|
||||
max_entries=5000,
|
||||
)
|
||||
|
||||
super(ClientIpStore, self).__init__(hs)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import ujson as json
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
from ._base import SQLBaseStore
|
||||
from ._base import SQLBaseStore, Cache
|
||||
from synapse.util.caches.descriptors import cached, cachedList, cachedInlineCallbacks
|
||||
|
||||
|
||||
|
|
@ -29,6 +29,14 @@ class DeviceStore(SQLBaseStore):
|
|||
def __init__(self, hs):
|
||||
super(DeviceStore, self).__init__(hs)
|
||||
|
||||
# Map of (user_id, device_id) -> bool. If there is an entry that implies
|
||||
# the device exists.
|
||||
self.device_id_exists_cache = Cache(
|
||||
name="device_id_exists",
|
||||
keylen=2,
|
||||
max_entries=10000,
|
||||
)
|
||||
|
||||
self._clock.looping_call(
|
||||
self._prune_old_outbound_device_pokes, 60 * 60 * 1000
|
||||
)
|
||||
|
|
@ -54,6 +62,10 @@ class DeviceStore(SQLBaseStore):
|
|||
defer.Deferred: boolean whether the device was inserted or an
|
||||
existing device existed with that ID.
|
||||
"""
|
||||
key = (user_id, device_id)
|
||||
if self.device_id_exists_cache.get(key, None):
|
||||
defer.returnValue(False)
|
||||
|
||||
try:
|
||||
inserted = yield self._simple_insert(
|
||||
"devices",
|
||||
|
|
@ -65,6 +77,7 @@ class DeviceStore(SQLBaseStore):
|
|||
desc="store_device",
|
||||
or_ignore=True,
|
||||
)
|
||||
self.device_id_exists_cache.prefill(key, True)
|
||||
defer.returnValue(inserted)
|
||||
except Exception as e:
|
||||
logger.error("store_device with device_id=%s(%r) user_id=%s(%r)"
|
||||
|
|
@ -93,6 +106,7 @@ class DeviceStore(SQLBaseStore):
|
|||
desc="get_device",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_device(self, user_id, device_id):
|
||||
"""Delete a device.
|
||||
|
||||
|
|
@ -102,12 +116,15 @@ class DeviceStore(SQLBaseStore):
|
|||
Returns:
|
||||
defer.Deferred
|
||||
"""
|
||||
return self._simple_delete_one(
|
||||
yield self._simple_delete_one(
|
||||
table="devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
desc="delete_device",
|
||||
)
|
||||
|
||||
self.device_id_exists_cache.invalidate((user_id, device_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_devices(self, user_id, device_ids):
|
||||
"""Deletes several devices.
|
||||
|
||||
|
|
@ -117,13 +134,15 @@ class DeviceStore(SQLBaseStore):
|
|||
Returns:
|
||||
defer.Deferred
|
||||
"""
|
||||
return self._simple_delete_many(
|
||||
yield self._simple_delete_many(
|
||||
table="devices",
|
||||
column="device_id",
|
||||
iterable=device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
desc="delete_devices",
|
||||
)
|
||||
for device_id in device_ids:
|
||||
self.device_id_exists_cache.invalidate((user_id, device_id))
|
||||
|
||||
def update_device(self, user_id, device_id, new_display_name=None):
|
||||
"""Update a device.
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
import ujson as json
|
||||
|
|
@ -177,10 +178,14 @@ class EndToEndKeyStore(SQLBaseStore):
|
|||
for algorithm, key_id, json_bytes in new_keys
|
||||
],
|
||||
)
|
||||
txn.call_after(
|
||||
self.count_e2e_one_time_keys.invalidate, (user_id, device_id,)
|
||||
)
|
||||
yield self.runInteraction(
|
||||
"add_e2e_one_time_keys_insert", _add_e2e_one_time_keys
|
||||
)
|
||||
|
||||
@cached(max_entries=10000)
|
||||
def count_e2e_one_time_keys(self, user_id, device_id):
|
||||
""" Count the number of one time keys the server has for a device
|
||||
Returns:
|
||||
|
|
@ -225,6 +230,9 @@ class EndToEndKeyStore(SQLBaseStore):
|
|||
)
|
||||
for user_id, device_id, algorithm, key_id in delete:
|
||||
txn.execute(sql, (user_id, device_id, algorithm, key_id))
|
||||
txn.call_after(
|
||||
self.count_e2e_one_time_keys.invalidate, (user_id, device_id,)
|
||||
)
|
||||
return result
|
||||
return self.runInteraction(
|
||||
"claim_e2e_one_time_keys", _claim_e2e_one_time_keys
|
||||
|
|
@ -242,3 +250,4 @@ class EndToEndKeyStore(SQLBaseStore):
|
|||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
desc="delete_e2e_one_time_keys_by_device"
|
||||
)
|
||||
self.count_e2e_one_time_keys.invalidate((user_id, device_id,))
|
||||
|
|
|
|||
|
|
@ -1342,11 +1342,26 @@ class EventsStore(SQLBaseStore):
|
|||
def _invalidate_get_event_cache(self, event_id):
|
||||
self._get_event_cache.invalidate((event_id,))
|
||||
|
||||
def _get_events_from_cache(self, events, allow_rejected):
|
||||
def _get_events_from_cache(self, events, allow_rejected, update_metrics=True):
|
||||
"""Fetch events from the caches
|
||||
|
||||
Args:
|
||||
events (list(str)): list of event_ids to fetch
|
||||
allow_rejected (bool): Whether to teturn events that were rejected
|
||||
update_metrics (bool): Whether to update the cache hit ratio metrics
|
||||
|
||||
Returns:
|
||||
dict of event_id -> _EventCacheEntry for each event_id in cache. If
|
||||
allow_rejected is `False` then there will still be an entry but it
|
||||
will be `None`
|
||||
"""
|
||||
event_map = {}
|
||||
|
||||
for event_id in events:
|
||||
ret = self._get_event_cache.get((event_id,), None)
|
||||
ret = self._get_event_cache.get(
|
||||
(event_id,), None,
|
||||
update_metrics=update_metrics,
|
||||
)
|
||||
if not ret:
|
||||
continue
|
||||
|
||||
|
|
|
|||
|
|
@ -421,9 +421,13 @@ class RoomMemberStore(SQLBaseStore):
|
|||
# We check if we have any of the member event ids in the event cache
|
||||
# before we ask the DB
|
||||
|
||||
# We don't update the event cache hit ratio as it completely throws off
|
||||
# the hit ratio counts. After all, we don't populate the cache if we
|
||||
# miss it here
|
||||
event_map = self._get_events_from_cache(
|
||||
member_event_ids,
|
||||
allow_rejected=False,
|
||||
update_metrics=False,
|
||||
)
|
||||
|
||||
missing_member_event_ids = []
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ class Cache(object):
|
|||
"Cache objects can only be accessed from the main thread"
|
||||
)
|
||||
|
||||
def get(self, key, default=_CacheSentinel, callback=None):
|
||||
def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True):
|
||||
"""Looks the key up in the caches.
|
||||
|
||||
Args:
|
||||
|
|
@ -104,6 +104,7 @@ class Cache(object):
|
|||
default: What is returned if key is not in the caches. If not
|
||||
specified then function throws KeyError instead
|
||||
callback(fn): Gets called when the entry in the cache is invalidated
|
||||
update_metrics (bool): whether to update the cache hit rate metrics
|
||||
|
||||
Returns:
|
||||
Either a Deferred or the raw result
|
||||
|
|
@ -113,7 +114,8 @@ class Cache(object):
|
|||
if val is not _CacheSentinel:
|
||||
if val.sequence == self.sequence:
|
||||
val.callbacks.update(callbacks)
|
||||
self.metrics.inc_hits()
|
||||
if update_metrics:
|
||||
self.metrics.inc_hits()
|
||||
return val.deferred
|
||||
|
||||
val = self.cache.get(key, _CacheSentinel, callbacks=callbacks)
|
||||
|
|
@ -121,7 +123,8 @@ class Cache(object):
|
|||
self.metrics.inc_hits()
|
||||
return val
|
||||
|
||||
self.metrics.inc_misses()
|
||||
if update_metrics:
|
||||
self.metrics.inc_misses()
|
||||
|
||||
if default is _CacheSentinel:
|
||||
raise KeyError()
|
||||
|
|
|
|||
Loading…
Reference in New Issue