Merge branch 'keyclient_retry_scheme' of github.com:matrix-org/synapse into develop

pull/80/head
Erik Johnston 2015-02-18 10:34:40 +00:00
commit 1be67eca8a
4 changed files with 246 additions and 101 deletions

View File

@ -22,6 +22,8 @@ from syutil.crypto.signing_key import (
from syutil.base64util import decode_base64, encode_base64 from syutil.base64util import decode_base64, encode_base64
from synapse.api.errors import SynapseError, Codes from synapse.api.errors import SynapseError, Codes
from synapse.util.retryutils import get_retry_limiter
from OpenSSL import crypto from OpenSSL import crypto
import logging import logging
@ -87,12 +89,18 @@ class Keyring(object):
return return
# Try to fetch the key from the remote server. # Try to fetch the key from the remote server.
# TODO(markjh): Ratelimit requests to a given server.
(response, tls_certificate) = yield fetch_server_key( limiter = yield get_retry_limiter(
server_name, self.hs.tls_context_factory server_name,
self.clock,
self.store,
) )
with limiter:
(response, tls_certificate) = yield fetch_server_key(
server_name, self.hs.tls_context_factory
)
# Check the response. # Check the response.
x509_certificate_bytes = crypto.dump_certificate( x509_certificate_bytes = crypto.dump_certificate(

View File

@ -24,6 +24,8 @@ from synapse.util.expiringcache import ExpiringCache
from synapse.util.logutils import log_function from synapse.util.logutils import log_function
from synapse.events import FrozenEvent from synapse.events import FrozenEvent
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
import logging import logging
@ -183,24 +185,32 @@ class FederationClient(FederationBase):
pdu = None pdu = None
for destination in destinations: for destination in destinations:
try: try:
transaction_data = yield self.transport_layer.get_event( limiter = yield get_retry_limiter(
destination, event_id destination,
self._clock,
self.store,
) )
logger.debug("transaction_data %r", transaction_data) with limiter:
transaction_data = yield self.transport_layer.get_event(
destination, event_id
)
pdu_list = [ logger.debug("transaction_data %r", transaction_data)
self.event_from_pdu_json(p, outlier=outlier)
for p in transaction_data["pdus"]
]
if pdu_list: pdu_list = [
pdu = pdu_list[0] self.event_from_pdu_json(p, outlier=outlier)
for p in transaction_data["pdus"]
]
# Check signatures are correct. if pdu_list:
pdu = yield self._check_sigs_and_hash(pdu) pdu = pdu_list[0]
# Check signatures are correct.
pdu = yield self._check_sigs_and_hash(pdu)
break
break
except SynapseError: except SynapseError:
logger.info( logger.info(
"Failed to get PDU %s from %s because %s", "Failed to get PDU %s from %s because %s",
@ -216,6 +226,9 @@ class FederationClient(FederationBase):
event_id, destination, e, event_id, destination, e,
) )
continue continue
except NotRetryingDestination as e:
logger.info(e.message)
continue
except Exception as e: except Exception as e:
logger.info( logger.info(
"Failed to get PDU %s from %s because %s", "Failed to get PDU %s from %s because %s",

View File

@ -22,6 +22,9 @@ from .units import Transaction
from synapse.api.errors import HttpResponseException from synapse.api.errors import HttpResponseException
from synapse.util.logutils import log_function from synapse.util.logutils import log_function
from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logcontext import PreserveLoggingContext
from synapse.util.retryutils import (
get_retry_limiter, NotRetryingDestination,
)
import logging import logging
@ -147,25 +150,6 @@ class TransactionQueue(object):
@defer.inlineCallbacks @defer.inlineCallbacks
@log_function @log_function
def _attempt_new_transaction(self, destination): def _attempt_new_transaction(self, destination):
(retry_last_ts, retry_interval) = (0, 0)
retry_timings = yield self.store.get_destination_retry_timings(
destination
)
if retry_timings:
(retry_last_ts, retry_interval) = (
retry_timings.retry_last_ts, retry_timings.retry_interval
)
if retry_last_ts + retry_interval > int(self._clock.time_msec()):
logger.info(
"TX [%s] not ready for retry yet - "
"dropping transaction for now",
destination,
)
return
else:
logger.info("TX [%s] is ready for retry", destination)
if destination in self.pending_transactions: if destination in self.pending_transactions:
# XXX: pending_transactions can get stuck on by a never-ending # XXX: pending_transactions can get stuck on by a never-ending
# request at which point pending_pdus_by_dest just keeps growing. # request at which point pending_pdus_by_dest just keeps growing.
@ -192,15 +176,6 @@ class TransactionQueue(object):
logger.info("TX [%s] Nothing to send", destination) logger.info("TX [%s] Nothing to send", destination)
return return
logger.debug(
"TX [%s] Attempting new transaction"
" (pdus: %d, edus: %d, failures: %d)",
destination,
len(pending_pdus),
len(pending_edus),
len(pending_failures)
)
# Sort based on the order field # Sort based on the order field
pending_pdus.sort(key=lambda t: t[2]) pending_pdus.sort(key=lambda t: t[2])
@ -213,6 +188,21 @@ class TransactionQueue(object):
] ]
try: try:
limiter = yield get_retry_limiter(
destination,
self._clock,
self.store,
)
logger.debug(
"TX [%s] Attempting new transaction"
" (pdus: %d, edus: %d, failures: %d)",
destination,
len(pending_pdus),
len(pending_edus),
len(pending_failures)
)
self.pending_transactions[destination] = 1 self.pending_transactions[destination] = 1
logger.debug("TX [%s] Persisting transaction...", destination) logger.debug("TX [%s] Persisting transaction...", destination)
@ -238,61 +228,57 @@ class TransactionQueue(object):
transaction.transaction_id, transaction.transaction_id,
) )
# Actually send the transaction with limiter:
# Actually send the transaction
# FIXME (erikj): This is a bit of a hack to make the Pdu age # FIXME (erikj): This is a bit of a hack to make the Pdu age
# keys work # keys work
def json_data_cb(): def json_data_cb():
data = transaction.get_dict() data = transaction.get_dict()
now = int(self._clock.time_msec()) now = int(self._clock.time_msec())
if "pdus" in data: if "pdus" in data:
for p in data["pdus"]: for p in data["pdus"]:
if "age_ts" in p: if "age_ts" in p:
unsigned = p.setdefault("unsigned", {}) unsigned = p.setdefault("unsigned", {})
unsigned["age"] = now - int(p["age_ts"]) unsigned["age"] = now - int(p["age_ts"])
del p["age_ts"] del p["age_ts"]
return data return data
try: try:
response = yield self.transport_layer.send_transaction( response = yield self.transport_layer.send_transaction(
transaction, json_data_cb transaction, json_data_cb
) )
code = 200 code = 200
if response: if response:
for e_id, r in getattr(response, "pdus", {}).items(): for e_id, r in getattr(response, "pdus", {}).items():
if "error" in r: if "error" in r:
logger.warn( logger.warn(
"Transaction returned error for %s: %s", "Transaction returned error for %s: %s",
e_id, r, e_id, r,
) )
except HttpResponseException as e:
code = e.code
response = e.response
except HttpResponseException as e: logger.info("TX [%s] got %d response", destination, code)
code = e.code
response = e.response
logger.info("TX [%s] got %d response", destination, code) logger.debug("TX [%s] Sent transaction", destination)
logger.debug("TX [%s] Marking as delivered...", destination)
logger.debug("TX [%s] Sent transaction", destination)
logger.debug("TX [%s] Marking as delivered...", destination)
yield self.transaction_actions.delivered( yield self.transaction_actions.delivered(
transaction, code, response transaction, code, response
) )
logger.debug("TX [%s] Marked as delivered", destination) logger.debug("TX [%s] Marked as delivered", destination)
logger.debug("TX [%s] Yielding to callbacks...", destination) logger.debug("TX [%s] Yielding to callbacks...", destination)
for deferred in deferreds: for deferred in deferreds:
if code == 200: if code == 200:
if retry_last_ts:
# this host is alive! reset retry schedule
yield self.store.set_destination_retry_timings(
destination, 0, 0
)
deferred.callback(None) deferred.callback(None)
else: else:
self.set_retrying(destination, retry_interval)
deferred.errback(RuntimeError("Got status %d" % code)) deferred.errback(RuntimeError("Got status %d" % code))
# Ensures we don't continue until all callbacks on that # Ensures we don't continue until all callbacks on that
@ -303,6 +289,12 @@ class TransactionQueue(object):
pass pass
logger.debug("TX [%s] Yielded to callbacks", destination) logger.debug("TX [%s] Yielded to callbacks", destination)
except NotRetryingDestination:
logger.info(
"TX [%s] not ready for retry yet - "
"dropping transaction for now",
destination,
)
except RuntimeError as e: except RuntimeError as e:
# We capture this here as there as nothing actually listens # We capture this here as there as nothing actually listens
# for this finishing functions deferred. # for this finishing functions deferred.
@ -320,8 +312,6 @@ class TransactionQueue(object):
e, e,
) )
self.set_retrying(destination, retry_interval)
for deferred in deferreds: for deferred in deferreds:
if not deferred.called: if not deferred.called:
deferred.errback(e) deferred.errback(e)
@ -332,22 +322,3 @@ class TransactionQueue(object):
# Check to see if there is anything else to send. # Check to see if there is anything else to send.
self._attempt_new_transaction(destination) self._attempt_new_transaction(destination)
@defer.inlineCallbacks
def set_retrying(self, destination, retry_interval):
# track that this destination is having problems and we should
# give it a chance to recover before trying it again
if retry_interval:
retry_interval *= 2
# plateau at hourly retries for now
if retry_interval >= 60 * 60 * 1000:
retry_interval = 60 * 60 * 1000
else:
retry_interval = 2000 # try again at first after 2 seconds
yield self.store.set_destination_retry_timings(
destination,
int(self._clock.time_msec()),
retry_interval
)

153
synapse/util/retryutils.py Normal file
View File

@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.errors import CodeMessageException
import logging
logger = logging.getLogger(__name__)
class NotRetryingDestination(Exception):
def __init__(self, retry_last_ts, retry_interval, destination):
msg = "Not retrying server %s." % (destination,)
super(NotRetryingDestination, self).__init__(msg)
self.retry_last_ts = retry_last_ts
self.retry_interval = retry_interval
self.destination = destination
@defer.inlineCallbacks
def get_retry_limiter(destination, clock, store, **kwargs):
"""For a given destination check if we have previously failed to
send a request there and are waiting before retrying the destination.
If we are not ready to retry the destination, this will raise a
NotRetryingDestination exception. Otherwise, will return a Context Manager
that will mark the destination as down if an exception is thrown (excluding
CodeMessageException with code < 500)
Example usage:
try:
limiter = yield get_retry_limiter(destination, clock, store)
with limiter:
response = yield do_request()
except NotRetryingDestination:
# We aren't ready to retry that destination.
raise
"""
retry_last_ts, retry_interval = (0, 0)
retry_timings = yield store.get_destination_retry_timings(
destination
)
if retry_timings:
retry_last_ts, retry_interval = (
retry_timings.retry_last_ts, retry_timings.retry_interval
)
now = int(clock.time_msec())
if retry_last_ts + retry_interval > now:
raise NotRetryingDestination(
retry_last_ts=retry_last_ts,
retry_interval=retry_interval,
destination=destination,
)
defer.returnValue(
RetryDestinationLimiter(
destination,
clock,
store,
retry_interval,
**kwargs
)
)
class RetryDestinationLimiter(object):
def __init__(self, destination, clock, store, retry_interval,
min_retry_interval=5000, max_retry_interval=60 * 60 * 1000,
multiplier_retry_interval=2,):
"""Marks the destination as "down" if an exception is thrown in the
context, except for CodeMessageException with code < 500.
If no exception is raised, marks the destination as "up".
Args:
destination (str)
clock (Clock)
store (DataStore)
retry_interval (int): The next retry interval taken from the
database in milliseconds, or zero if the last request was
successful.
min_retry_interval (int): The minimum retry interval to use after
a failed request, in milliseconds.
max_retry_interval (int): The maximum retry interval to use after
a failed request, in milliseconds.
multiplier_retry_interval (int): The multiplier to use to increase
the retry interval after a failed request.
"""
self.clock = clock
self.store = store
self.destination = destination
self.retry_interval = retry_interval
self.min_retry_interval = min_retry_interval
self.max_retry_interval = max_retry_interval
self.multiplier_retry_interval = multiplier_retry_interval
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
def err(failure):
logger.exception(
"Failed to store set_destination_retry_timings",
failure.value
)
valid_err_code = False
if exc_type is CodeMessageException:
valid_err_code = 0 <= exc_val.code < 500
if exc_type is None or valid_err_code:
# We connected successfully.
if not self.retry_interval:
return
retry_last_ts = 0
self.retry_interval = 0
else:
# We couldn't connect.
if self.retry_interval:
self.retry_interval *= self.multiplier_retry_interval
if self.retry_interval >= self.max_retry_interval:
self.retry_interval = self.max_retry_interval
else:
self.retry_interval = self.min_retry_interval
retry_last_ts = int(self._clock.time_msec()),
self.store.set_destination_retry_timings(
self.destination, retry_last_ts, self.retry_interval
).addErrback(err)