2018-07-31 14:52:49 +02:00
|
|
|
# Copyright 2018 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import abc
|
|
|
|
import logging
|
|
|
|
import re
|
2022-02-08 13:44:39 +01:00
|
|
|
import urllib.parse
|
2020-05-01 18:19:56 +02:00
|
|
|
from inspect import signature
|
2023-01-18 20:35:29 +01:00
|
|
|
from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar, Dict, List, Tuple
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2020-09-29 12:06:11 +02:00
|
|
|
from prometheus_client import Counter, Gauge
|
|
|
|
|
2022-03-09 15:53:28 +01:00
|
|
|
from twisted.internet.error import ConnectError, DNSLookupError
|
2022-02-08 13:44:39 +01:00
|
|
|
from twisted.web.server import Request
|
|
|
|
|
2020-09-29 12:06:11 +02:00
|
|
|
from synapse.api.errors import HttpResponseException, SynapseError
|
|
|
|
from synapse.http import RequestTimedOutError
|
2022-08-31 13:16:05 +02:00
|
|
|
from synapse.http.server import HttpServer
|
2023-01-18 20:35:29 +01:00
|
|
|
from synapse.http.servlet import parse_json_object_from_request
|
2022-05-11 13:25:39 +02:00
|
|
|
from synapse.http.site import SynapseRequest
|
2021-06-09 12:33:00 +02:00
|
|
|
from synapse.logging import opentracing
|
2022-07-19 20:14:30 +02:00
|
|
|
from synapse.logging.opentracing import trace_with_opname
|
2022-02-08 13:44:39 +01:00
|
|
|
from synapse.types import JsonDict
|
2018-07-31 14:52:49 +02:00
|
|
|
from synapse.util.caches.response_cache import ResponseCache
|
2022-08-31 13:16:05 +02:00
|
|
|
from synapse.util.cancellation import is_function_cancellable
|
2018-07-31 14:52:49 +02:00
|
|
|
from synapse.util.stringutils import random_string
|
|
|
|
|
2021-03-08 20:00:07 +01:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2018-07-31 14:52:49 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2020-09-29 12:06:11 +02:00
|
|
|
_pending_outgoing_requests = Gauge(
|
|
|
|
"synapse_pending_outgoing_replication_requests",
|
|
|
|
"Number of active outgoing replication requests, by replication method name",
|
|
|
|
["name"],
|
|
|
|
)
|
|
|
|
|
|
|
|
_outgoing_request_counter = Counter(
|
|
|
|
"synapse_outgoing_replication_requests",
|
|
|
|
"Number of outgoing replication requests, by replication method name and result",
|
|
|
|
["name", "code"],
|
|
|
|
)
|
|
|
|
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
_STREAM_POSITION_KEY = "_INT_STREAM_POS"
|
|
|
|
|
|
|
|
|
2020-09-16 21:15:55 +02:00
|
|
|
class ReplicationEndpoint(metaclass=abc.ABCMeta):
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Helper base class for defining new replication HTTP endpoints.
|
|
|
|
|
|
|
|
This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
|
2020-02-26 17:58:33 +01:00
|
|
|
(with a `/:txn_id` suffix for cached requests), where NAME is a name,
|
2018-07-31 14:52:49 +02:00
|
|
|
PATH_ARGS are a tuple of parameters to be encoded in the URL.
|
|
|
|
|
|
|
|
For example, if `NAME` is "send_event" and `PATH_ARGS` is `("event_id",)`,
|
|
|
|
with `CACHE` set to true then this generates an endpoint:
|
|
|
|
|
|
|
|
/_synapse/replication/send_event/:event_id/:txn_id
|
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
For POST/PUT requests the payload is serialized to json and sent as the
|
|
|
|
body, while for GET requests the payload is added as query parameters. See
|
2018-07-31 14:52:49 +02:00
|
|
|
`_serialize_payload` for details.
|
|
|
|
|
|
|
|
Incoming requests are handled by overriding `_handle_request`. Servers
|
|
|
|
must call `register` to register the path with the HTTP server.
|
|
|
|
|
|
|
|
Requests can be sent by calling the client returned by `make_client`.
|
2020-05-01 18:19:56 +02:00
|
|
|
Requests are sent to master process by default, but can be sent to other
|
|
|
|
named processes by specifying an `instance_name` keyword argument.
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
Attributes:
|
|
|
|
NAME (str): A name for the endpoint, added to the path as well as used
|
|
|
|
in logging and metrics.
|
|
|
|
PATH_ARGS (tuple[str]): A list of parameters to be added to the path.
|
|
|
|
Adding parameters to the path (rather than payload) can make it
|
|
|
|
easier to follow along in the log files.
|
2018-08-08 11:35:47 +02:00
|
|
|
METHOD (str): The method of the HTTP request, defaults to POST. Can be
|
|
|
|
one of POST, PUT or GET. If GET then the payload is sent as query
|
|
|
|
parameters rather than a JSON body.
|
2018-07-31 14:52:49 +02:00
|
|
|
CACHE (bool): Whether server should cache the result of the request/
|
|
|
|
If true then transparently adds a txn_id to all requests, and
|
|
|
|
`_handle_request` must return a Deferred.
|
|
|
|
RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504
|
|
|
|
is received.
|
2022-03-09 15:53:28 +01:00
|
|
|
RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when
|
|
|
|
a connection error is received.
|
|
|
|
RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when
|
|
|
|
receiving connection errors, each will backoff exponentially longer.
|
2023-01-18 20:35:29 +01:00
|
|
|
WAIT_FOR_STREAMS (bool): Whether to wait for replication streams to
|
|
|
|
catch up before processing the request and/or response. Defaults to
|
|
|
|
True.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
|
2021-07-15 12:02:43 +02:00
|
|
|
NAME: str = abc.abstractproperty() # type: ignore
|
|
|
|
PATH_ARGS: Tuple[str, ...] = abc.abstractproperty() # type: ignore
|
2018-08-08 11:35:47 +02:00
|
|
|
METHOD = "POST"
|
2018-07-31 14:52:49 +02:00
|
|
|
CACHE = True
|
|
|
|
RETRY_ON_TIMEOUT = True
|
2022-03-09 15:53:28 +01:00
|
|
|
RETRY_ON_CONNECT_ERROR = True
|
|
|
|
RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
WAIT_FOR_STREAMS: ClassVar[bool] = True
|
|
|
|
|
2021-03-08 20:00:07 +01:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
2018-07-31 14:52:49 +02:00
|
|
|
if self.CACHE:
|
2021-07-15 12:02:43 +02:00
|
|
|
self.response_cache: ResponseCache[str] = ResponseCache(
|
2021-03-08 20:00:07 +01:00
|
|
|
hs.get_clock(), "repl." + self.NAME, timeout_ms=30 * 60 * 1000
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2020-05-01 18:19:56 +02:00
|
|
|
# We reserve `instance_name` as a parameter to sending requests, so we
|
|
|
|
# assert here that sub classes don't try and use the name.
|
|
|
|
assert (
|
|
|
|
"instance_name" not in self.PATH_ARGS
|
2020-07-09 15:52:58 +02:00
|
|
|
), "`instance_name` is a reserved parameter name"
|
2020-05-01 18:19:56 +02:00
|
|
|
assert (
|
|
|
|
"instance_name"
|
|
|
|
not in signature(self.__class__._serialize_payload).parameters
|
2020-07-09 15:52:58 +02:00
|
|
|
), "`instance_name` is a reserved parameter name"
|
2020-05-01 18:19:56 +02:00
|
|
|
|
2018-08-08 11:35:47 +02:00
|
|
|
assert self.METHOD in ("PUT", "POST", "GET")
|
|
|
|
|
2020-12-04 16:56:28 +01:00
|
|
|
self._replication_secret = None
|
|
|
|
if hs.config.worker.worker_replication_secret:
|
|
|
|
self._replication_secret = hs.config.worker.worker_replication_secret
|
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
self._streams = hs.get_replication_command_handler().get_streams_to_replicate()
|
|
|
|
self._replication = hs.get_replication_data_handler()
|
|
|
|
self._instance_name = hs.get_instance_name()
|
|
|
|
|
2022-02-08 13:44:39 +01:00
|
|
|
def _check_auth(self, request: Request) -> None:
|
2020-12-04 16:56:28 +01:00
|
|
|
# Get the authorization header.
|
|
|
|
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
|
|
|
|
2022-02-08 13:44:39 +01:00
|
|
|
if not auth_headers:
|
|
|
|
raise RuntimeError("Missing Authorization header.")
|
2020-12-04 16:56:28 +01:00
|
|
|
if len(auth_headers) > 1:
|
|
|
|
raise RuntimeError("Too many Authorization headers.")
|
|
|
|
parts = auth_headers[0].split(b" ")
|
|
|
|
if parts[0] == b"Bearer" and len(parts) == 2:
|
|
|
|
received_secret = parts[1].decode("ascii")
|
|
|
|
if self._replication_secret == received_secret:
|
|
|
|
# Success!
|
|
|
|
return
|
|
|
|
|
|
|
|
raise RuntimeError("Invalid Authorization header.")
|
|
|
|
|
2018-07-31 14:52:49 +02:00
|
|
|
@abc.abstractmethod
|
2022-02-08 13:44:39 +01:00
|
|
|
async def _serialize_payload(**kwargs) -> JsonDict:
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Static method that is called when creating a request.
|
|
|
|
|
|
|
|
Concrete implementations should have explicit parameters (rather than
|
|
|
|
kwargs) so that an appropriate exception is raised if the client is
|
|
|
|
called with unexpected parameters. All PATH_ARGS must appear in
|
|
|
|
argument list.
|
|
|
|
|
|
|
|
Returns:
|
2022-11-16 16:25:24 +01:00
|
|
|
If POST/PUT request then dictionary must be JSON serialisable,
|
2020-08-03 13:12:55 +02:00
|
|
|
otherwise must be appropriate for adding as query args.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
return {}
|
|
|
|
|
|
|
|
@abc.abstractmethod
|
2022-02-08 13:44:39 +01:00
|
|
|
async def _handle_request(
|
2023-01-18 20:35:29 +01:00
|
|
|
self, request: Request, content: JsonDict, **kwargs: Any
|
2022-02-08 13:44:39 +01:00
|
|
|
) -> Tuple[int, JsonDict]:
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Handle incoming request.
|
|
|
|
|
|
|
|
This is called with the request object and PATH_ARGS.
|
|
|
|
|
|
|
|
Returns:
|
2022-02-08 13:44:39 +01:00
|
|
|
HTTP status code and a JSON serialisable dict to be used as response
|
|
|
|
body of request.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
@classmethod
|
2022-02-08 13:44:39 +01:00
|
|
|
def make_client(cls, hs: "HomeServer") -> Callable:
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Create a client that makes requests.
|
|
|
|
|
2021-04-23 13:21:55 +02:00
|
|
|
Returns a callable that accepts the same parameters as
|
|
|
|
`_serialize_payload`, and also accepts an optional `instance_name`
|
|
|
|
parameter to specify which instance to hit (the instance must be in
|
|
|
|
the `instance_map` config).
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
clock = hs.get_clock()
|
|
|
|
client = hs.get_simple_http_client()
|
2020-05-22 17:11:35 +02:00
|
|
|
local_instance_name = hs.get_instance_name()
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2022-11-15 13:55:00 +01:00
|
|
|
# The value of these option should match the replication listener settings
|
2021-09-24 13:25:21 +02:00
|
|
|
master_host = hs.config.worker.worker_replication_host
|
|
|
|
master_port = hs.config.worker.worker_replication_http_port
|
2022-11-15 13:55:00 +01:00
|
|
|
master_tls = hs.config.worker.worker_replication_http_tls
|
2020-05-14 15:00:58 +02:00
|
|
|
|
|
|
|
instance_map = hs.config.worker.instance_map
|
|
|
|
|
2020-09-29 12:06:11 +02:00
|
|
|
outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME)
|
|
|
|
|
2020-12-04 16:56:28 +01:00
|
|
|
replication_secret = None
|
|
|
|
if hs.config.worker.worker_replication_secret:
|
|
|
|
replication_secret = hs.config.worker.worker_replication_secret.encode(
|
|
|
|
"ascii"
|
|
|
|
)
|
|
|
|
|
2022-07-19 20:14:30 +02:00
|
|
|
@trace_with_opname("outgoing_replication_request")
|
2022-02-08 13:44:39 +01:00
|
|
|
async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
|
2023-01-18 20:35:29 +01:00
|
|
|
# We have to pull these out here to avoid circular dependencies...
|
|
|
|
streams = hs.get_replication_command_handler().get_streams_to_replicate()
|
|
|
|
replication = hs.get_replication_data_handler()
|
|
|
|
|
2021-10-12 12:23:46 +02:00
|
|
|
with outgoing_gauge.track_inprogress():
|
|
|
|
if instance_name == local_instance_name:
|
|
|
|
raise Exception("Trying to send HTTP request to self")
|
|
|
|
if instance_name == "master":
|
|
|
|
host = master_host
|
|
|
|
port = master_port
|
2022-11-15 13:55:00 +01:00
|
|
|
tls = master_tls
|
2021-10-12 12:23:46 +02:00
|
|
|
elif instance_name in instance_map:
|
|
|
|
host = instance_map[instance_name].host
|
|
|
|
port = instance_map[instance_name].port
|
2022-11-15 13:55:00 +01:00
|
|
|
tls = instance_map[instance_name].tls
|
2021-10-12 12:23:46 +02:00
|
|
|
else:
|
|
|
|
raise Exception(
|
|
|
|
"Instance %r not in 'instance_map' config" % (instance_name,)
|
|
|
|
)
|
|
|
|
|
|
|
|
data = await cls._serialize_payload(**kwargs)
|
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
if cls.METHOD != "GET" and cls.WAIT_FOR_STREAMS:
|
|
|
|
# Include the current stream positions that we write to. We
|
|
|
|
# don't do this for GETs as they don't have a body, and we
|
|
|
|
# generally assume that a GET won't rely on data we have
|
|
|
|
# written.
|
|
|
|
if _STREAM_POSITION_KEY in data:
|
|
|
|
raise Exception(
|
|
|
|
"data to send contains %r key", _STREAM_POSITION_KEY
|
|
|
|
)
|
|
|
|
|
|
|
|
data[_STREAM_POSITION_KEY] = {
|
|
|
|
"streams": {
|
|
|
|
stream.NAME: stream.current_token(local_instance_name)
|
|
|
|
for stream in streams
|
|
|
|
},
|
|
|
|
"instance_name": local_instance_name,
|
|
|
|
}
|
|
|
|
|
2021-10-12 12:23:46 +02:00
|
|
|
url_args = [
|
|
|
|
urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS
|
|
|
|
]
|
|
|
|
|
|
|
|
if cls.CACHE:
|
|
|
|
txn_id = random_string(10)
|
|
|
|
url_args.append(txn_id)
|
|
|
|
|
|
|
|
if cls.METHOD == "POST":
|
2021-10-22 19:15:41 +02:00
|
|
|
request_func: Callable[
|
|
|
|
..., Awaitable[Any]
|
|
|
|
] = client.post_json_get_json
|
2021-10-12 12:23:46 +02:00
|
|
|
elif cls.METHOD == "PUT":
|
|
|
|
request_func = client.put_json
|
|
|
|
elif cls.METHOD == "GET":
|
|
|
|
request_func = client.get_json
|
|
|
|
else:
|
|
|
|
# We have already asserted in the constructor that a
|
|
|
|
# compatible was picked, but lets be paranoid.
|
|
|
|
raise Exception(
|
|
|
|
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
|
|
|
|
)
|
|
|
|
|
2022-11-15 13:55:00 +01:00
|
|
|
# Here the protocol is hard coded to be http by default or https in case the replication
|
|
|
|
# port is set to have tls true.
|
|
|
|
scheme = "https" if tls else "http"
|
|
|
|
uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
|
|
|
|
scheme,
|
2021-10-12 12:23:46 +02:00
|
|
|
host,
|
|
|
|
port,
|
|
|
|
cls.NAME,
|
|
|
|
"/".join(url_args),
|
2020-05-14 15:00:58 +02:00
|
|
|
)
|
2020-05-01 18:19:56 +02:00
|
|
|
|
2022-03-09 15:53:28 +01:00
|
|
|
headers: Dict[bytes, List[bytes]] = {}
|
|
|
|
# Add an authorization header, if configured.
|
|
|
|
if replication_secret:
|
|
|
|
headers[b"Authorization"] = [b"Bearer " + replication_secret]
|
|
|
|
opentracing.inject_header_dict(headers, check_destination=False)
|
|
|
|
|
2021-10-12 12:23:46 +02:00
|
|
|
try:
|
2022-03-09 15:53:28 +01:00
|
|
|
# Keep track of attempts made so we can bail if we don't manage to
|
|
|
|
# connect to the target after N tries.
|
|
|
|
attempts = 0
|
2021-10-12 12:23:46 +02:00
|
|
|
# We keep retrying the same request for timeouts. This is so that we
|
|
|
|
# have a good idea that the request has either succeeded or failed
|
|
|
|
# on the master, and so whether we should clean up or not.
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
result = await request_func(uri, data, headers=headers)
|
|
|
|
break
|
|
|
|
except RequestTimedOutError:
|
|
|
|
if not cls.RETRY_ON_TIMEOUT:
|
|
|
|
raise
|
|
|
|
|
2022-03-09 15:53:28 +01:00
|
|
|
logger.warning("%s request timed out; retrying", cls.NAME)
|
|
|
|
|
|
|
|
# If we timed out we probably don't need to worry about backing
|
|
|
|
# off too much, but lets just wait a little anyway.
|
|
|
|
await clock.sleep(1)
|
|
|
|
except (ConnectError, DNSLookupError) as e:
|
|
|
|
if not cls.RETRY_ON_CONNECT_ERROR:
|
|
|
|
raise
|
|
|
|
if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS:
|
|
|
|
raise
|
|
|
|
|
2022-03-29 12:41:19 +02:00
|
|
|
delay = 2**attempts
|
2022-03-09 15:53:28 +01:00
|
|
|
logger.warning(
|
|
|
|
"%s request connection failed; retrying in %ds: %r",
|
|
|
|
cls.NAME,
|
|
|
|
delay,
|
|
|
|
e,
|
|
|
|
)
|
2021-10-12 12:23:46 +02:00
|
|
|
|
2022-03-09 15:53:28 +01:00
|
|
|
await clock.sleep(delay)
|
|
|
|
attempts += 1
|
2021-10-12 12:23:46 +02:00
|
|
|
except HttpResponseException as e:
|
|
|
|
# We convert to SynapseError as we know that it was a SynapseError
|
|
|
|
# on the main process that we should send to the client. (And
|
|
|
|
# importantly, not stack traces everywhere)
|
|
|
|
_outgoing_request_counter.labels(cls.NAME, e.code).inc()
|
|
|
|
raise e.to_synapse_error()
|
|
|
|
except Exception as e:
|
|
|
|
_outgoing_request_counter.labels(cls.NAME, "ERR").inc()
|
2022-02-22 16:52:08 +01:00
|
|
|
raise SynapseError(
|
|
|
|
502, f"Failed to talk to {instance_name} process"
|
|
|
|
) from e
|
2021-10-12 12:23:46 +02:00
|
|
|
|
|
|
|
_outgoing_request_counter.labels(cls.NAME, 200).inc()
|
2023-01-18 20:35:29 +01:00
|
|
|
|
|
|
|
# Wait on any streams that the remote may have written to.
|
|
|
|
for stream_name, position in result.get(
|
|
|
|
_STREAM_POSITION_KEY, {}
|
|
|
|
).items():
|
|
|
|
await replication.wait_for_stream_position(
|
|
|
|
instance_name=instance_name,
|
|
|
|
stream_name=stream_name,
|
|
|
|
position=position,
|
|
|
|
)
|
|
|
|
|
2021-10-12 12:23:46 +02:00
|
|
|
return result
|
2018-07-31 14:52:49 +02:00
|
|
|
|
|
|
|
return send_request
|
|
|
|
|
2022-02-08 13:44:39 +01:00
|
|
|
def register(self, http_server: HttpServer) -> None:
|
2018-07-31 14:52:49 +02:00
|
|
|
"""Called by the server to register this as a handler to the
|
|
|
|
appropriate path.
|
|
|
|
"""
|
|
|
|
|
|
|
|
url_args = list(self.PATH_ARGS)
|
2018-08-08 11:35:47 +02:00
|
|
|
method = self.METHOD
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2022-08-31 13:16:05 +02:00
|
|
|
if self.CACHE and is_function_cancellable(self._handle_request):
|
2022-05-11 13:25:39 +02:00
|
|
|
raise Exception(
|
|
|
|
f"{self.__class__.__name__} has been marked as cancellable, but CACHE "
|
|
|
|
"is set. The cancellable flag would have no effect."
|
|
|
|
)
|
|
|
|
|
2018-07-31 14:52:49 +02:00
|
|
|
if self.CACHE:
|
|
|
|
url_args.append("txn_id")
|
|
|
|
|
|
|
|
args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
|
2019-06-20 11:32:02 +02:00
|
|
|
pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2019-08-22 19:08:07 +02:00
|
|
|
http_server.register_paths(
|
2021-02-16 23:32:34 +01:00
|
|
|
method,
|
|
|
|
[pattern],
|
|
|
|
self._check_auth_and_handle,
|
|
|
|
self.__class__.__name__,
|
2019-08-22 19:08:07 +02:00
|
|
|
)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2022-02-08 13:44:39 +01:00
|
|
|
async def _check_auth_and_handle(
|
2022-05-11 13:25:39 +02:00
|
|
|
self, request: SynapseRequest, **kwargs: Any
|
2022-02-08 13:44:39 +01:00
|
|
|
) -> Tuple[int, JsonDict]:
|
2018-08-08 11:29:58 +02:00
|
|
|
"""Called on new incoming requests when caching is enabled. Checks
|
2018-08-09 10:56:10 +02:00
|
|
|
if there is a cached response for the request and returns that,
|
2018-08-08 11:29:58 +02:00
|
|
|
otherwise calls `_handle_request` and caches its response.
|
2018-07-31 14:52:49 +02:00
|
|
|
"""
|
|
|
|
# We just use the txn_id here, but we probably also want to use the
|
|
|
|
# other PATH_ARGS as well.
|
|
|
|
|
2020-12-04 16:56:28 +01:00
|
|
|
# Check the authorization headers before handling the request.
|
|
|
|
if self._replication_secret:
|
|
|
|
self._check_auth(request)
|
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
if self.METHOD == "GET":
|
|
|
|
# GET APIs always have an empty body.
|
|
|
|
content = {}
|
|
|
|
else:
|
|
|
|
content = parse_json_object_from_request(request)
|
|
|
|
|
|
|
|
# Wait on any streams that the remote may have written to.
|
|
|
|
for stream_name, position in content.get(_STREAM_POSITION_KEY, {"streams": {}})[
|
|
|
|
"streams"
|
|
|
|
].items():
|
|
|
|
await self._replication.wait_for_stream_position(
|
|
|
|
instance_name=content[_STREAM_POSITION_KEY]["instance_name"],
|
|
|
|
stream_name=stream_name,
|
|
|
|
position=position,
|
|
|
|
)
|
|
|
|
|
2020-12-04 16:56:28 +01:00
|
|
|
if self.CACHE:
|
|
|
|
txn_id = kwargs.pop("txn_id")
|
|
|
|
|
2022-05-11 13:25:39 +02:00
|
|
|
# We ignore the `@cancellable` flag, since cancellation wouldn't interupt
|
|
|
|
# `_handle_request` and `ResponseCache` does not handle cancellation
|
|
|
|
# correctly yet. In particular, there may be issues to do with logging
|
|
|
|
# context lifetimes.
|
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
code, response = await self.response_cache.wrap(
|
|
|
|
txn_id, self._handle_request, request, content, **kwargs
|
2020-12-04 16:56:28 +01:00
|
|
|
)
|
2023-01-18 20:35:29 +01:00
|
|
|
else:
|
|
|
|
# The `@cancellable` decorator may be applied to `_handle_request`. But we
|
|
|
|
# told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
|
|
|
|
# so we have to set up the cancellable flag ourselves.
|
|
|
|
request.is_render_cancellable = is_function_cancellable(
|
|
|
|
self._handle_request
|
|
|
|
)
|
|
|
|
|
|
|
|
code, response = await self._handle_request(request, content, **kwargs)
|
|
|
|
|
|
|
|
# Return streams we may have written to in the course of processing this
|
|
|
|
# request.
|
|
|
|
if _STREAM_POSITION_KEY in response:
|
|
|
|
raise Exception("data to send contains %r key", _STREAM_POSITION_KEY)
|
2018-07-31 14:52:49 +02:00
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
if self.WAIT_FOR_STREAMS:
|
|
|
|
response[_STREAM_POSITION_KEY] = {
|
|
|
|
stream.NAME: stream.current_token(self._instance_name)
|
|
|
|
for stream in self._streams
|
|
|
|
}
|
2022-05-11 13:25:39 +02:00
|
|
|
|
2023-01-18 20:35:29 +01:00
|
|
|
return code, response
|