2018-07-31 14:52:49 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2018 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import abc
|
|
|
|
import logging
|
|
|
|
import re
|
|
|
|
|
2019-06-07 11:47:31 +02:00
|
|
|
from six import raise_from
|
2018-07-31 14:52:49 +02:00
|
|
|
from six.moves import urllib
|
|
|
|
|
|
|
|
from twisted.internet import defer
|
|
|
|
|
2019-06-07 11:47:31 +02:00
|
|
|
from synapse.api.errors import (
|
|
|
|
CodeMessageException,
|
|
|
|
HttpResponseException,
|
|
|
|
RequestSendFailed,
|
|
|
|
SynapseError,
|
|
|
|
)
|
2019-09-05 18:44:55 +02:00
|
|
|
from synapse.logging.opentracing import (
|
|
|
|
inject_active_span_byte_dict,
|
|
|
|
trace,
|
|
|
|
trace_servlet,
|
|
|
|
)
|
2018-07-31 14:52:49 +02:00
|
|
|
from synapse.util.caches.response_cache import ResponseCache
|
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class ReplicationEndpoint(object):
|
|
|
|
"""Helper base class for defining new replication HTTP endpoints.
|
|
|
|
|
|
|
|
This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
|
|
|
|
(with an `/:txn_id` prefix for cached requests.), where NAME is a name,
|
|
|
|
PATH_ARGS are a tuple of parameters to be encoded in the URL.
|
|
|
|
|
|
|
|
For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`,
|
|
|
|
with `CACHE` set to true then this generates an endpoint:
|
|
|
|
|
|
|
|
/_synapse/replication/send_event/:event_id/:txn_id
|
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
For POST/PUT requests the payload is serialized to json and sent as the
|
|
|
|
body, while for GET requests the payload is added as query parameters. See
|
2018-07-31 14:52:49 +02:00
|
|
|
`_serialize_payload` for details.
|
|
|
|
|
|
|
|
Incoming requests are handled by overriding `_handle_request`. Servers
|
|
|
|
must call `register` to register the path with the HTTP server.
|
|
|
|
|
|
|
|
Requests can be sent by calling the client returned by `make_client`.
|
|
|
|
|
|
|
|
Attributes:
|
|
|
|
NAME (str): A name for the endpoint, added to the path as well as used
|
|
|
|
in logging and metrics.
|
|
|
|
PATH_ARGS (tuple[str]): A list of parameters to be added to the path.
|
|
|
|
Adding parameters to the path (rather than payload) can make it
|
|
|
|
easier to follow along in the log files.
|
2018-08-08 11:35:47 +02:00
|
|
|
METHOD (str): The method of the HTTP request, defaults to POST. Can be
|
|
|
|
one of POST, PUT or GET. If GET then the payload is sent as query
|
|
|
|
parameters rather than a JSON body.
|
2018-07-31 14:52:49 +02:00
|
|
|
CACHE (bool): Whether server should cache the result of the request/
|
|
|
|
If true then transparently adds a txn_id to all requests, and
|
|
|
|
`_handle_request` must return a Deferred.
|
|
|
|
RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504
|
|
|
|
is received.
|
|
|
|
"""
|
|
|
|
|
|
|
|
__metaclass__ = abc.ABCMeta
|
|
|
|
|
|
|
|
NAME = abc.abstractproperty()
|
|
|
|
PATH_ARGS = abc.abstractproperty()
|
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
METHOD = "POST"
|
2018-07-31 14:52:49 +02:00
|
|
|
CACHE = True
|
|
|
|
RETRY_ON_TIMEOUT = True
|
|
|
|
|
|
|
|
def __init__(self, hs):
|
|
|
|
if self.CACHE:
|
|
|
|
self.response_cache = ResponseCache(
|
2019-06-20 11:32:02 +02:00
|
|
|
hs, "repl." + self.NAME, timeout_ms=30 * 60 * 1000
|
2018-07-31 14:52:49 +02:00
|
|
|
)
|
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
assert self.METHOD in ("PUT", "POST", "GET")
|
|
|
|
|
2018-07-31 14:52:49 +02:00
|
|
|
@abc.abstractmethod
|
|
|
|
def _serialize_payload(**kwargs):
|
|
|
|
"""Static method that is called when creating a request.
|
|
|
|
|
|
|
|
Concrete implementations should have explicit parameters (rather than
|
|
|
|
kwargs) so that an appropriate exception is raised if the client is
|
|
|
|
called with unexpected parameters. All PATH_ARGS must appear in
|
|
|
|
argument list.
|
|
|
|
|
|
|
|
Returns:
|
2018-08-08 11:35:47 +02:00
|
|
|
Deferred[dict]|dict: If POST/PUT request then dictionary must be
|
|
|
|
JSON serialisable, otherwise must be appropriate for adding as
|
|
|
|
query args.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
return {}
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
2019-10-29 14:00:51 +01:00
|
|
|
async def _handle_request(self, request, **kwargs):
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Handle incoming request.
|
|
|
|
|
|
|
|
This is called with the request object and PATH_ARGS.
|
|
|
|
|
|
|
|
Returns:
|
2019-10-29 14:00:51 +01:00
|
|
|
tuple[int, dict]: HTTP status code and a JSON serialisable dict
|
|
|
|
to be used as response body of request.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
pass
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def make_client(cls, hs):
|
|
|
|
"""Create a client that makes requests.
|
|
|
|
|
|
|
|
Returns a callable that accepts the same parameters as `_serialize_payload`.
|
|
|
|
"""
|
|
|
|
clock = hs.get_clock()
|
|
|
|
host = hs.config.worker_replication_host
|
|
|
|
port = hs.config.worker_replication_http_port
|
|
|
|
|
|
|
|
client = hs.get_simple_http_client()
|
|
|
|
|
2019-09-05 18:44:55 +02:00
|
|
|
@trace(opname="outgoing_replication_request")
|
2018-07-31 14:52:49 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def send_request(**kwargs):
|
|
|
|
data = yield cls._serialize_payload(**kwargs)
|
|
|
|
|
2019-01-30 15:19:52 +01:00
|
|
|
url_args = [
|
2019-06-20 11:32:02 +02:00
|
|
|
urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS
|
2019-01-30 15:19:52 +01:00
|
|
|
]
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
if cls.CACHE:
|
|
|
|
txn_id = random_string(10)
|
|
|
|
url_args.append(txn_id)
|
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
if cls.METHOD == "POST":
|
2018-07-31 14:52:49 +02:00
|
|
|
request_func = client.post_json_get_json
|
2018-08-08 11:35:47 +02:00
|
|
|
elif cls.METHOD == "PUT":
|
|
|
|
request_func = client.put_json
|
|
|
|
elif cls.METHOD == "GET":
|
2018-07-31 14:52:49 +02:00
|
|
|
request_func = client.get_json
|
2018-08-08 11:35:47 +02:00
|
|
|
else:
|
|
|
|
# We have already asserted in the constructor that a
|
|
|
|
# compatible was picked, but lets be paranoid.
|
|
|
|
raise Exception(
|
|
|
|
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
|
|
|
|
)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
uri = "http://%s:%s/_synapse/replication/%s/%s" % (
|
2019-06-20 11:32:02 +02:00
|
|
|
host,
|
|
|
|
port,
|
|
|
|
cls.NAME,
|
|
|
|
"/".join(url_args),
|
2018-07-31 14:52:49 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
try:
|
|
|
|
# We keep retrying the same request for timeouts. This is so that we
|
|
|
|
# have a good idea that the request has either succeeded or failed on
|
|
|
|
# the master, and so whether we should clean up or not.
|
|
|
|
while True:
|
2019-08-22 19:08:07 +02:00
|
|
|
headers = {}
|
2019-09-05 15:46:04 +02:00
|
|
|
inject_active_span_byte_dict(headers, None, check_destination=False)
|
2018-07-31 14:52:49 +02:00
|
|
|
try:
|
2019-08-22 19:08:07 +02:00
|
|
|
result = yield request_func(uri, data, headers=headers)
|
2018-07-31 14:52:49 +02:00
|
|
|
break
|
|
|
|
except CodeMessageException as e:
|
|
|
|
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
|
|
|
|
raise
|
|
|
|
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning("%s request timed out", cls.NAME)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
# If we timed out we probably don't need to worry about backing
|
|
|
|
# off too much, but lets just wait a little anyway.
|
|
|
|
yield clock.sleep(1)
|
2018-08-03 10:25:15 +02:00
|
|
|
except HttpResponseException as e:
|
2018-07-31 14:52:49 +02:00
|
|
|
# We convert to SynapseError as we know that it was a SynapseError
|
|
|
|
# on the master process that we should send to the client. (And
|
|
|
|
# importantly, not stack traces everywhere)
|
2018-08-03 10:25:15 +02:00
|
|
|
raise e.to_synapse_error()
|
2019-06-07 11:47:31 +02:00
|
|
|
except RequestSendFailed as e:
|
|
|
|
raise_from(SynapseError(502, "Failed to talk to master"), e)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return result
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
return send_request
|
|
|
|
|
|
|
|
def register(self, http_server):
|
|
|
|
"""Called by the server to register this as a handler to the
|
|
|
|
appropriate path.
|
|
|
|
"""
|
|
|
|
|
|
|
|
url_args = list(self.PATH_ARGS)
|
|
|
|
handler = self._handle_request
|
2018-08-08 11:35:47 +02:00
|
|
|
method = self.METHOD
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
if self.CACHE:
|
|
|
|
handler = self._cached_handler
|
|
|
|
url_args.append("txn_id")
|
|
|
|
|
|
|
|
args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
|
2019-06-20 11:32:02 +02:00
|
|
|
pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2019-09-05 15:46:04 +02:00
|
|
|
handler = trace_servlet(self.__class__.__name__, extract_context=True)(handler)
|
|
|
|
# We don't let register paths trace this servlet using the default tracing
|
|
|
|
# options because we wish to extract the context explicitly.
|
2019-08-22 19:08:07 +02:00
|
|
|
http_server.register_paths(
|
2019-09-05 15:46:04 +02:00
|
|
|
method, [pattern], handler, self.__class__.__name__, trace=False
|
2019-08-22 19:08:07 +02:00
|
|
|
)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
def _cached_handler(self, request, txn_id, **kwargs):
|
2018-08-08 11:29:58 +02:00
|
|
|
"""Called on new incoming requests when caching is enabled. Checks
|
2018-08-09 10:56:10 +02:00
|
|
|
if there is a cached response for the request and returns that,
|
2018-08-08 11:29:58 +02:00
|
|
|
otherwise calls `_handle_request` and caches its response.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
# We just use the txn_id here, but we probably also want to use the
|
|
|
|
# other PATH_ARGS as well.
|
|
|
|
|
|
|
|
assert self.CACHE
|
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
return self.response_cache.wrap(txn_id, self._handle_request, request, **kwargs)
|