2015-01-26 11:45:24 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-07 05:26:29 +01:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2018-04-18 00:41:36 +02:00
|
|
|
# Copyright 2018 New Vector Ltd
|
2019-11-27 22:54:07 +01:00
|
|
|
# Copyright 2019 Matrix.org Federation C.I.C
|
2015-01-26 11:45:24 +01:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2017-12-30 19:40:19 +01:00
|
|
|
import logging
|
2020-07-16 16:12:54 +02:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Awaitable,
|
|
|
|
Callable,
|
|
|
|
Dict,
|
|
|
|
List,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
|
|
|
Union,
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
from prometheus_client import Counter, Gauge, Histogram
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
from twisted.internet import defer
|
2018-07-04 16:31:00 +02:00
|
|
|
from twisted.internet.abstract import isIPAddress
|
2018-07-23 23:13:19 +02:00
|
|
|
from twisted.python import failure
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2019-04-01 11:24:38 +02:00
|
|
|
from synapse.api.constants import EventTypes, Membership
|
2018-07-25 23:25:41 +02:00
|
|
|
from synapse.api.errors import (
|
|
|
|
AuthError,
|
2019-02-23 15:31:08 +01:00
|
|
|
Codes,
|
2018-07-25 23:25:41 +02:00
|
|
|
FederationError,
|
|
|
|
IncompatibleRoomVersionError,
|
|
|
|
NotFoundError,
|
|
|
|
SynapseError,
|
2019-05-21 14:47:25 +02:00
|
|
|
UnsupportedRoomVersionError,
|
2018-07-25 23:25:41 +02:00
|
|
|
)
|
2019-04-01 11:24:38 +02:00
|
|
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
2020-04-07 21:03:23 +02:00
|
|
|
from synapse.events import EventBase
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
2018-03-12 15:07:39 +01:00
|
|
|
from synapse.federation.persistence import TransactionActions
|
2017-12-30 19:40:19 +01:00
|
|
|
from synapse.federation.units import Edu, Transaction
|
2018-07-04 16:31:00 +02:00
|
|
|
from synapse.http.endpoint import parse_server_name
|
2020-11-19 11:05:33 +01:00
|
|
|
from synapse.http.servlet import assert_params_in_dict
|
2020-01-14 15:08:35 +01:00
|
|
|
from synapse.logging.context import (
|
|
|
|
make_deferred_yieldable,
|
|
|
|
nested_logging_context,
|
|
|
|
run_in_background,
|
|
|
|
)
|
2019-08-22 19:21:10 +02:00
|
|
|
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
|
2019-07-03 16:07:04 +02:00
|
|
|
from synapse.logging.utils import log_function
|
2018-07-26 12:44:22 +02:00
|
|
|
from synapse.replication.http.federation import (
|
|
|
|
ReplicationFederationSendEduRestServlet,
|
|
|
|
ReplicationGetQueryRestServlet,
|
|
|
|
)
|
2020-02-03 14:15:24 +01:00
|
|
|
from synapse.types import JsonDict, get_domain_from_id
|
2020-08-19 13:26:03 +02:00
|
|
|
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
2018-08-10 15:50:21 +02:00
|
|
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
2017-12-30 19:40:19 +01:00
|
|
|
from synapse.util.caches.response_cache import ResponseCache
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2017-10-06 17:07:20 +02:00
|
|
|
# when processing incoming transactions, we try to handle multiple rooms in
|
|
|
|
# parallel, up to this limit.
|
|
|
|
TRANSACTION_CONCURRENCY_LIMIT = 10
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
|
2015-03-10 16:29:22 +01:00
|
|
|
|
2018-05-22 02:47:37 +02:00
|
|
|
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
|
2015-03-10 16:29:22 +01:00
|
|
|
|
2018-05-22 23:54:22 +02:00
|
|
|
received_queries_counter = Counter(
|
|
|
|
"synapse_federation_server_received_queries", "", ["type"]
|
|
|
|
)
|
2015-02-24 19:10:44 +01:00
|
|
|
|
2020-06-30 17:58:06 +02:00
|
|
|
pdu_process_time = Histogram(
|
|
|
|
"synapse_federation_server_pdu_process_time", "Time taken to process an event",
|
|
|
|
)
|
|
|
|
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
last_pdu_age_metric = Gauge(
|
|
|
|
"synapse_federation_last_received_pdu_age",
|
|
|
|
"The age (in seconds) of the last PDU successfully received from the given domain",
|
|
|
|
labelnames=("server_name",),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2015-02-03 15:58:30 +01:00
|
|
|
class FederationServer(FederationBase):
|
2016-06-15 16:12:59 +02:00
|
|
|
def __init__(self, hs):
|
2020-09-18 15:56:44 +02:00
|
|
|
super().__init__(hs)
|
2016-06-15 16:12:59 +02:00
|
|
|
|
2016-07-21 11:30:12 +02:00
|
|
|
self.auth = hs.get_auth()
|
2020-10-09 13:24:34 +02:00
|
|
|
self.handler = hs.get_federation_handler()
|
2019-11-27 22:54:07 +01:00
|
|
|
self.state = hs.get_state_handler()
|
2016-07-21 11:30:12 +02:00
|
|
|
|
2020-02-07 16:45:39 +01:00
|
|
|
self.device_handler = hs.get_device_handler()
|
2020-10-09 13:24:34 +02:00
|
|
|
|
|
|
|
# Ensure the following handlers are loaded since they register callbacks
|
|
|
|
# with FederationHandlerRegistry.
|
|
|
|
hs.get_directory_handler()
|
|
|
|
|
2020-09-18 11:49:29 +02:00
|
|
|
self._federation_ratelimiter = hs.get_federation_ratelimiter()
|
2020-02-07 16:45:39 +01:00
|
|
|
|
2018-08-10 15:50:21 +02:00
|
|
|
self._server_linearizer = Linearizer("fed_server")
|
|
|
|
self._transaction_linearizer = Linearizer("fed_txn_handler")
|
2016-06-15 16:12:59 +02:00
|
|
|
|
2020-09-18 11:49:29 +02:00
|
|
|
# We cache results for transaction with the same ID
|
|
|
|
self._transaction_resp_cache = ResponseCache(
|
|
|
|
hs, "fed_txn_handler", timeout_ms=30000
|
2020-10-09 17:35:11 +02:00
|
|
|
) # type: ResponseCache[Tuple[str, str]]
|
2020-09-18 11:49:29 +02:00
|
|
|
|
2018-03-12 15:07:39 +01:00
|
|
|
self.transaction_actions = TransactionActions(self.store)
|
|
|
|
|
2018-03-12 17:17:08 +01:00
|
|
|
self.registry = hs.get_federation_registry()
|
|
|
|
|
2016-07-21 11:30:12 +02:00
|
|
|
# We cache responses to state queries, as they take a while and often
|
|
|
|
# come in waves.
|
2020-10-09 17:35:11 +02:00
|
|
|
self._state_resp_cache = ResponseCache(
|
|
|
|
hs, "state_resp", timeout_ms=30000
|
|
|
|
) # type: ResponseCache[Tuple[str, str]]
|
2020-07-23 19:38:19 +02:00
|
|
|
self._state_ids_resp_cache = ResponseCache(
|
|
|
|
hs, "state_ids_resp", timeout_ms=30000
|
2020-10-09 17:35:11 +02:00
|
|
|
) # type: ResponseCache[Tuple[str, str]]
|
2016-07-21 11:30:12 +02:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
self._federation_metrics_domains = (
|
|
|
|
hs.get_config().federation.federation_metrics_domains
|
|
|
|
)
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_backfill_request(
|
|
|
|
self, origin: str, room_id: str, versions: List[str], limit: int
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2019-10-29 15:05:32 +01:00
|
|
|
with (await self._server_linearizer.queue((origin, room_id))):
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
pdus = await self.handler.on_backfill_request(
|
2016-06-17 17:43:45 +02:00
|
|
|
origin, room_id, versions, limit
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2016-06-17 17:43:45 +02:00
|
|
|
res = self._transaction_from_pdus(pdus).get_dict()
|
|
|
|
|
2019-08-30 17:28:26 +02:00
|
|
|
return 200, res
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_incoming_transaction(
|
|
|
|
self, origin: str, transaction_data: JsonDict
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2017-10-06 16:18:58 +02:00
|
|
|
# keep this as early as possible to make the calculated origin ts as
|
|
|
|
# accurate as possible.
|
2017-10-06 16:31:58 +02:00
|
|
|
request_time = self._clock.time_msec()
|
2015-03-10 16:29:22 +01:00
|
|
|
|
2017-10-06 16:18:58 +02:00
|
|
|
transaction = Transaction(**transaction_data)
|
2020-09-18 11:49:29 +02:00
|
|
|
transaction_id = transaction.transaction_id # type: ignore
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-09-18 11:49:29 +02:00
|
|
|
if not transaction_id:
|
2017-10-06 16:31:58 +02:00
|
|
|
raise Exception("Transaction missing transaction_id")
|
|
|
|
|
2020-09-18 11:49:29 +02:00
|
|
|
logger.debug("[%s] Got transaction", transaction_id)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-09-18 11:49:29 +02:00
|
|
|
# We wrap in a ResponseCache so that we de-duplicate retried
|
|
|
|
# transactions.
|
|
|
|
return await self._transaction_resp_cache.wrap(
|
|
|
|
(origin, transaction_id),
|
|
|
|
self._on_incoming_transaction_inner,
|
|
|
|
origin,
|
|
|
|
transaction,
|
|
|
|
request_time,
|
|
|
|
)
|
|
|
|
|
|
|
|
async def _on_incoming_transaction_inner(
|
|
|
|
self, origin: str, transaction: Transaction, request_time: int
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
|
|
|
# Use a linearizer to ensure that transactions from a remote are
|
|
|
|
# processed in order.
|
|
|
|
with await self._transaction_linearizer.queue(origin):
|
|
|
|
# We rate limit here *after* we've queued up the incoming requests,
|
|
|
|
# so that we don't fill up the ratelimiter with blocked requests.
|
|
|
|
#
|
|
|
|
# This is important as the ratelimiter allows N concurrent requests
|
|
|
|
# at a time, and only starts ratelimiting if there are more requests
|
|
|
|
# than that being processed at a time. If we queued up requests in
|
|
|
|
# the linearizer/response cache *after* the ratelimiting then those
|
|
|
|
# queued up requests would count as part of the allowed limit of N
|
|
|
|
# concurrent requests.
|
|
|
|
with self._federation_ratelimiter.ratelimit(origin) as d:
|
|
|
|
await d
|
|
|
|
|
|
|
|
result = await self._handle_incoming_transaction(
|
|
|
|
origin, transaction, request_time
|
|
|
|
)
|
2017-10-06 16:31:58 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return result
|
2017-10-06 16:31:58 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def _handle_incoming_transaction(
|
|
|
|
self, origin: str, transaction: Transaction, request_time: int
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2017-10-06 16:31:58 +02:00
|
|
|
""" Process an incoming transaction and return the HTTP response
|
|
|
|
|
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
origin: the server making the request
|
|
|
|
transaction: incoming transaction
|
|
|
|
request_time: timestamp that the HTTP request arrived at
|
2017-10-06 16:31:58 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-04-07 21:03:23 +02:00
|
|
|
HTTP response code and body
|
2017-10-06 16:31:58 +02:00
|
|
|
"""
|
2019-10-29 15:05:32 +01:00
|
|
|
response = await self.transaction_actions.have_responded(origin, transaction)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
if response:
|
2015-01-26 15:33:11 +01:00
|
|
|
logger.debug(
|
2017-01-07 03:13:06 +01:00
|
|
|
"[%s] We've already responded to this request",
|
2020-04-07 21:03:23 +02:00
|
|
|
transaction.transaction_id, # type: ignore
|
2015-01-26 15:33:11 +01:00
|
|
|
)
|
2019-07-23 15:00:55 +02:00
|
|
|
return response
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
logger.debug("[%s] Transaction is new", transaction.transaction_id) # type: ignore
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2019-10-30 12:16:19 +01:00
|
|
|
# Reject if PDU count > 50 or EDU count > 100
|
2020-04-07 21:03:23 +02:00
|
|
|
if len(transaction.pdus) > 50 or ( # type: ignore
|
|
|
|
hasattr(transaction, "edus") and len(transaction.edus) > 100 # type: ignore
|
2019-06-20 11:32:02 +02:00
|
|
|
):
|
2019-01-31 12:44:04 +01:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.info("Transaction PDU or EDU count too large. Returning 400")
|
2019-01-31 12:44:04 +01:00
|
|
|
|
|
|
|
response = {}
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.transaction_actions.set_response(
|
2019-06-20 11:32:02 +02:00
|
|
|
origin, transaction, 400, response
|
2019-01-31 12:44:04 +01:00
|
|
|
)
|
2019-08-30 17:28:26 +02:00
|
|
|
return 400, response
|
2019-01-31 12:44:04 +01:00
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
# We process PDUs and EDUs in parallel. This is important as we don't
|
|
|
|
# want to block things like to device messages from reaching clients
|
|
|
|
# behind the potentially expensive handling of PDUs.
|
|
|
|
pdu_results, _ = await make_deferred_yieldable(
|
|
|
|
defer.gatherResults(
|
|
|
|
[
|
|
|
|
run_in_background(
|
|
|
|
self._handle_pdus_in_txn, origin, transaction, request_time
|
|
|
|
),
|
|
|
|
run_in_background(self._handle_edus_in_txn, origin, transaction),
|
|
|
|
],
|
|
|
|
consumeErrors=True,
|
|
|
|
).addErrback(unwrapFirstError)
|
|
|
|
)
|
|
|
|
|
|
|
|
response = {"pdus": pdu_results}
|
|
|
|
|
|
|
|
logger.debug("Returning: %s", str(response))
|
|
|
|
|
|
|
|
await self.transaction_actions.set_response(origin, transaction, 200, response)
|
|
|
|
return 200, response
|
|
|
|
|
|
|
|
async def _handle_pdus_in_txn(
|
|
|
|
self, origin: str, transaction: Transaction, request_time: int
|
|
|
|
) -> Dict[str, dict]:
|
|
|
|
"""Process the PDUs in a received transaction.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
origin: the server making the request
|
|
|
|
transaction: incoming transaction
|
|
|
|
request_time: timestamp that the HTTP request arrived at
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A map from event ID of a processed PDU to any errors we should
|
|
|
|
report back to the sending server.
|
|
|
|
"""
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
received_pdus_counter.inc(len(transaction.pdus)) # type: ignore
|
2017-10-06 16:18:58 +02:00
|
|
|
|
2018-09-04 02:23:18 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
pdus_by_room = {} # type: Dict[str, List[EventBase]]
|
2017-10-06 16:18:58 +02:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
newest_pdu_ts = 0
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
for p in transaction.pdus: # type: ignore
|
2020-10-01 12:09:12 +02:00
|
|
|
# FIXME (richardv): I don't think this works:
|
|
|
|
# https://github.com/matrix-org/synapse/issues/8429
|
2017-10-06 16:18:58 +02:00
|
|
|
if "unsigned" in p:
|
|
|
|
unsigned = p["unsigned"]
|
|
|
|
if "age" in unsigned:
|
|
|
|
p["age"] = unsigned["age"]
|
|
|
|
if "age" in p:
|
|
|
|
p["age_ts"] = request_time - int(p["age"])
|
|
|
|
del p["age"]
|
|
|
|
|
2018-11-09 12:28:22 +01:00
|
|
|
# We try and pull out an event ID so that if later checks fail we
|
2018-11-08 13:11:20 +01:00
|
|
|
# can log something sensible. We don't mandate an event ID here in
|
|
|
|
# case future event formats get rid of the key.
|
|
|
|
possible_event_id = p.get("event_id", "<Unknown>")
|
|
|
|
|
|
|
|
# Now we get the room ID so that we can check that we know the
|
|
|
|
# version of the room.
|
|
|
|
room_id = p.get("room_id")
|
|
|
|
if not room_id:
|
|
|
|
logger.info(
|
|
|
|
"Ignoring PDU as does not have a room_id. Event ID: %s",
|
|
|
|
possible_event_id,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
2020-01-31 17:50:13 +01:00
|
|
|
room_version = await self.store.get_room_version(room_id)
|
2018-11-08 13:11:20 +01:00
|
|
|
except NotFoundError:
|
2018-11-09 12:28:25 +01:00
|
|
|
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
2018-11-08 13:11:20 +01:00
|
|
|
continue
|
2020-01-31 17:50:13 +01:00
|
|
|
except UnsupportedRoomVersionError as e:
|
2019-05-21 14:47:25 +02:00
|
|
|
# this can happen if support for a given room version is withdrawn,
|
|
|
|
# so that we still get events for said room.
|
2020-01-31 17:50:13 +01:00
|
|
|
logger.info("Ignoring PDU: %s", e)
|
2019-05-21 14:47:25 +02:00
|
|
|
continue
|
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
event = event_from_pdu_json(p, room_version)
|
2017-10-06 17:07:20 +02:00
|
|
|
pdus_by_room.setdefault(room_id, []).append(event)
|
2017-10-06 16:18:58 +02:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
if event.origin_server_ts > newest_pdu_ts:
|
|
|
|
newest_pdu_ts = event.origin_server_ts
|
|
|
|
|
2017-10-06 16:18:58 +02:00
|
|
|
pdu_results = {}
|
2015-05-08 17:32:18 +02:00
|
|
|
|
2017-10-06 17:07:20 +02:00
|
|
|
# we can process different rooms in parallel (which is useful if they
|
|
|
|
# require callouts to other servers to fetch missing events), but
|
|
|
|
# impose a limit to avoid going too crazy with ram/cpu.
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def process_pdus_for_room(room_id: str):
|
2017-10-06 17:07:20 +02:00
|
|
|
logger.debug("Processing PDUs for %s", room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
try:
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
except AuthError as e:
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
for pdu in pdus_by_room[room_id]:
|
|
|
|
event_id = pdu.event_id
|
|
|
|
pdu_results[event_id] = e.error_dict()
|
|
|
|
return
|
|
|
|
|
2017-10-06 17:07:20 +02:00
|
|
|
for pdu in pdus_by_room[room_id]:
|
|
|
|
event_id = pdu.event_id
|
2020-06-30 17:58:06 +02:00
|
|
|
with pdu_process_time.time():
|
|
|
|
with nested_logging_context(event_id):
|
|
|
|
try:
|
|
|
|
await self._handle_received_pdu(origin, pdu)
|
|
|
|
pdu_results[event_id] = {}
|
|
|
|
except FederationError as e:
|
|
|
|
logger.warning("Error handling PDU %s: %s", event_id, e)
|
|
|
|
pdu_results[event_id] = {"error": str(e)}
|
|
|
|
except Exception as e:
|
|
|
|
f = failure.Failure()
|
|
|
|
pdu_results[event_id] = {"error": str(e)}
|
|
|
|
logger.error(
|
|
|
|
"Failed to handle PDU %s",
|
|
|
|
event_id,
|
|
|
|
exc_info=(f.type, f.value, f.getTracebackObject()),
|
|
|
|
)
|
2017-10-06 17:07:20 +02:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
await concurrently_execute(
|
2019-06-20 11:32:02 +02:00
|
|
|
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
2017-10-06 17:07:20 +02:00
|
|
|
)
|
2015-05-08 17:32:18 +02:00
|
|
|
|
2020-10-01 12:09:12 +02:00
|
|
|
if newest_pdu_ts and origin in self._federation_metrics_domains:
|
|
|
|
newest_pdu_age = self._clock.time_msec() - newest_pdu_ts
|
|
|
|
last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000)
|
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
return pdu_results
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
async def _handle_edus_in_txn(self, origin: str, transaction: Transaction):
|
|
|
|
"""Process the EDUs in a received transaction.
|
|
|
|
"""
|
2015-02-17 14:24:13 +01:00
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
async def _process_edu(edu_dict):
|
|
|
|
received_edus_counter.inc()
|
2017-10-06 16:18:58 +02:00
|
|
|
|
2020-02-07 14:56:38 +01:00
|
|
|
edu = Edu(
|
|
|
|
origin=origin,
|
|
|
|
destination=self.server_name,
|
|
|
|
edu_type=edu_dict["edu_type"],
|
|
|
|
content=edu_dict["content"],
|
|
|
|
)
|
2020-01-14 15:08:35 +01:00
|
|
|
await self.registry.on_edu(edu.edu_type, origin, edu.content)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-01-14 15:08:35 +01:00
|
|
|
await concurrently_execute(
|
|
|
|
_process_edu,
|
|
|
|
getattr(transaction, "edus", []),
|
|
|
|
TRANSACTION_CONCURRENCY_LIMIT,
|
|
|
|
)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-11-19 11:05:33 +01:00
|
|
|
async def on_room_state_request(
|
2020-04-07 21:03:23 +02:00
|
|
|
self, origin: str, room_id: str, event_id: str
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
in_room = await self.auth.check_host_in_room(room_id, origin)
|
2016-07-21 11:30:12 +02:00
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
|
|
|
|
2018-04-20 12:10:04 +02:00
|
|
|
# we grab the linearizer to protect ourselves from servers which hammer
|
|
|
|
# us. In theory we might already have the response to this query
|
|
|
|
# in the cache so we could return it without waiting for the linearizer
|
|
|
|
# - but that's non-trivial to get right, and anyway somewhat defeats
|
|
|
|
# the point of the linearizer.
|
2019-10-29 15:05:32 +01:00
|
|
|
with (await self._server_linearizer.queue((origin, room_id))):
|
2019-11-27 22:54:07 +01:00
|
|
|
resp = dict(
|
|
|
|
await self._state_resp_cache.wrap(
|
|
|
|
(room_id, event_id),
|
|
|
|
self._on_context_state_request_compute,
|
|
|
|
room_id,
|
|
|
|
event_id,
|
|
|
|
)
|
2018-04-20 12:10:04 +02:00
|
|
|
)
|
2016-07-21 11:30:12 +02:00
|
|
|
|
2020-01-31 11:06:21 +01:00
|
|
|
room_version = await self.store.get_room_version_id(room_id)
|
2019-11-27 22:54:07 +01:00
|
|
|
resp["room_version"] = room_version
|
|
|
|
|
2019-08-30 17:28:26 +02:00
|
|
|
return 200, resp
|
2015-02-10 16:46:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_state_ids_request(
|
|
|
|
self, origin: str, room_id: str, event_id: str
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2016-08-03 15:47:37 +02:00
|
|
|
if not event_id:
|
|
|
|
raise NotImplementedError("Specify an event")
|
|
|
|
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
in_room = await self.auth.check_host_in_room(room_id, origin)
|
2016-08-03 15:47:37 +02:00
|
|
|
if not in_room:
|
|
|
|
raise AuthError(403, "Host not in room.")
|
|
|
|
|
2020-07-23 19:38:19 +02:00
|
|
|
resp = await self._state_ids_resp_cache.wrap(
|
|
|
|
(room_id, event_id), self._on_state_ids_request_compute, room_id, event_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
return 200, resp
|
|
|
|
|
|
|
|
async def _on_state_ids_request_compute(self, room_id, event_id):
|
2019-10-29 15:05:32 +01:00
|
|
|
state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
|
|
|
|
auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
|
2020-07-23 19:38:19 +02:00
|
|
|
return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
|
2016-08-03 15:47:37 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def _on_context_state_request_compute(
|
|
|
|
self, room_id: str, event_id: str
|
|
|
|
) -> Dict[str, list]:
|
2019-11-27 22:54:07 +01:00
|
|
|
if event_id:
|
|
|
|
pdus = await self.handler.get_state_for_pdu(room_id, event_id)
|
|
|
|
else:
|
|
|
|
pdus = (await self.state.get_current_state(room_id)).values()
|
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
|
2016-07-21 11:30:12 +02:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return {
|
|
|
|
"pdus": [pdu.get_pdu_json() for pdu in pdus],
|
|
|
|
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
|
|
|
|
}
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_pdu_request(
|
|
|
|
self, origin: str, event_id: str
|
|
|
|
) -> Tuple[int, Union[JsonDict, str]]:
|
2019-10-29 15:05:32 +01:00
|
|
|
pdu = await self.handler.get_persisted_pdu(origin, event_id)
|
2015-01-26 11:45:24 +01:00
|
|
|
|
|
|
|
if pdu:
|
2019-08-30 17:28:26 +02:00
|
|
|
return 200, self._transaction_from_pdus([pdu]).get_dict()
|
2015-01-26 11:45:24 +01:00
|
|
|
else:
|
2019-08-30 17:28:26 +02:00
|
|
|
return 404, ""
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_query_request(
|
|
|
|
self, query_type: str, args: Dict[str, str]
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2018-05-22 02:47:37 +02:00
|
|
|
received_queries_counter.labels(query_type).inc()
|
2019-10-29 15:05:32 +01:00
|
|
|
resp = await self.registry.on_query(query_type, args)
|
2019-08-30 17:28:26 +02:00
|
|
|
return 200, resp
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_make_join_request(
|
|
|
|
self, origin: str, room_id: str, user_id: str, supported_versions: List[str]
|
|
|
|
) -> Dict[str, Any]:
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-25 23:25:41 +02:00
|
|
|
|
2020-01-31 11:06:21 +01:00
|
|
|
room_version = await self.store.get_room_version_id(room_id)
|
2018-07-25 23:25:41 +02:00
|
|
|
if room_version not in supported_versions:
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning(
|
|
|
|
"Room version %s not in %s", room_version, supported_versions
|
|
|
|
)
|
2018-07-25 23:25:41 +02:00
|
|
|
raise IncompatibleRoomVersionError(room_version=room_version)
|
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
|
2015-01-26 11:45:24 +01:00
|
|
|
time_now = self._clock.time_msec()
|
2019-07-23 15:00:55 +02:00
|
|
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-02-03 14:15:24 +01:00
|
|
|
async def on_invite_request(
|
|
|
|
self, origin: str, content: JsonDict, room_version_id: str
|
2020-04-07 21:03:23 +02:00
|
|
|
) -> Dict[str, Any]:
|
2020-02-03 14:15:24 +01:00
|
|
|
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
|
|
|
|
if not room_version:
|
2019-02-23 15:31:08 +01:00
|
|
|
raise SynapseError(
|
|
|
|
400,
|
|
|
|
"Homeserver does not support this room version",
|
|
|
|
Codes.UNSUPPORTED_ROOM_VERSION,
|
|
|
|
)
|
|
|
|
|
2020-01-31 17:50:13 +01:00
|
|
|
pdu = event_from_pdu_json(content, room_version)
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
2020-03-19 13:22:56 +01:00
|
|
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
2020-01-30 23:13:02 +01:00
|
|
|
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
|
2015-01-26 11:45:24 +01:00
|
|
|
time_now = self._clock.time_msec()
|
2019-07-23 15:00:55 +02:00
|
|
|
return {"event": ret_pdu.get_pdu_json(time_now)}
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_send_join_request(
|
2020-11-19 11:05:33 +01:00
|
|
|
self, origin: str, content: JsonDict
|
2020-04-07 21:03:23 +02:00
|
|
|
) -> Dict[str, Any]:
|
2015-01-26 11:45:24 +01:00
|
|
|
logger.debug("on_send_join_request: content: %s", content)
|
2019-01-23 21:21:33 +01:00
|
|
|
|
2020-11-19 11:05:33 +01:00
|
|
|
assert_params_in_dict(content, ["room_id"])
|
|
|
|
room_version = await self.store.get_room_version(content["room_id"])
|
2020-01-31 17:50:13 +01:00
|
|
|
pdu = event_from_pdu_json(content, room_version)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2015-01-26 11:45:24 +01:00
|
|
|
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
2019-10-28 13:43:23 +01:00
|
|
|
|
2020-03-19 13:22:56 +01:00
|
|
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
2019-10-28 13:43:23 +01:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
2015-01-26 11:45:24 +01:00
|
|
|
time_now = self._clock.time_msec()
|
2019-11-11 17:40:45 +01:00
|
|
|
return {
|
|
|
|
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
2019-11-11 17:51:54 +01:00
|
|
|
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
|
2019-11-11 17:40:45 +01:00
|
|
|
}
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_make_leave_request(
|
|
|
|
self, origin: str, room_id: str, user_id: str
|
|
|
|
) -> Dict[str, Any]:
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
|
|
|
pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
|
2019-01-23 17:50:06 +01:00
|
|
|
|
2020-01-31 11:06:21 +01:00
|
|
|
room_version = await self.store.get_room_version_id(room_id)
|
2019-01-23 17:50:06 +01:00
|
|
|
|
2015-10-20 12:58:58 +02:00
|
|
|
time_now = self._clock.time_msec()
|
2019-07-23 15:00:55 +02:00
|
|
|
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
2015-10-20 12:58:58 +02:00
|
|
|
|
2020-11-19 11:05:33 +01:00
|
|
|
async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict:
|
2015-10-20 12:58:58 +02:00
|
|
|
logger.debug("on_send_leave_request: content: %s", content)
|
2019-01-23 21:21:33 +01:00
|
|
|
|
2020-11-19 11:05:33 +01:00
|
|
|
assert_params_in_dict(content, ["room_id"])
|
|
|
|
room_version = await self.store.get_room_version(content["room_id"])
|
2020-01-31 17:50:13 +01:00
|
|
|
pdu = event_from_pdu_json(content, room_version)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2015-10-20 12:58:58 +02:00
|
|
|
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
2019-10-28 13:43:23 +01:00
|
|
|
|
2020-03-19 13:22:56 +01:00
|
|
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
2019-10-28 13:43:23 +01:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.handler.on_send_leave_request(origin, pdu)
|
2019-11-11 17:40:45 +01:00
|
|
|
return {}
|
2015-10-20 12:58:58 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_event_auth(
|
|
|
|
self, origin: str, room_id: str, event_id: str
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2019-10-29 15:05:32 +01:00
|
|
|
with (await self._server_linearizer.queue((origin, room_id))):
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2016-06-17 17:43:45 +02:00
|
|
|
time_now = self._clock.time_msec()
|
2019-10-29 15:05:32 +01:00
|
|
|
auth_pdus = await self.handler.on_event_auth(event_id)
|
2019-06-20 11:32:02 +02:00
|
|
|
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
|
2019-08-30 17:28:26 +02:00
|
|
|
return 200, res
|
2015-01-26 11:45:24 +01:00
|
|
|
|
2015-07-23 17:03:38 +02:00
|
|
|
@log_function
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_query_client_keys(
|
|
|
|
self, origin: str, content: Dict[str, str]
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
|
|
|
return await self.on_query_request("client_keys", content)
|
|
|
|
|
|
|
|
async def on_query_user_devices(
|
|
|
|
self, origin: str, user_id: str
|
|
|
|
) -> Tuple[int, Dict[str, Any]]:
|
2020-02-07 16:45:39 +01:00
|
|
|
keys = await self.device_handler.on_federation_query_user_devices(user_id)
|
|
|
|
return 200, keys
|
2017-01-26 17:06:54 +01:00
|
|
|
|
2019-08-22 12:28:12 +02:00
|
|
|
@trace
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_claim_client_keys(
|
|
|
|
self, origin: str, content: JsonDict
|
|
|
|
) -> Dict[str, Any]:
|
2015-07-23 17:03:38 +02:00
|
|
|
query = []
|
|
|
|
for user_id, device_keys in content.get("one_time_keys", {}).items():
|
|
|
|
for device_id, algorithm in device_keys.items():
|
|
|
|
query.append((user_id, device_id, algorithm))
|
2015-08-13 18:27:46 +02:00
|
|
|
|
2019-08-22 12:28:12 +02:00
|
|
|
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
|
2019-10-29 15:05:32 +01:00
|
|
|
results = await self.store.claim_e2e_one_time_keys(query)
|
2015-08-13 18:27:46 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
json_result = {} # type: Dict[str, Dict[str, dict]]
|
2015-07-23 17:03:38 +02:00
|
|
|
for user_id, device_keys in results.items():
|
|
|
|
for device_id, keys in device_keys.items():
|
2020-07-10 20:30:08 +02:00
|
|
|
for key_id, json_str in keys.items():
|
2015-07-23 17:03:38 +02:00
|
|
|
json_result.setdefault(user_id, {})[device_id] = {
|
2020-08-19 13:26:03 +02:00
|
|
|
key_id: json_decoder.decode(json_str)
|
2015-07-23 17:03:38 +02:00
|
|
|
}
|
2015-08-13 18:27:46 +02:00
|
|
|
|
2017-05-09 20:01:39 +02:00
|
|
|
logger.info(
|
|
|
|
"Claimed one-time-keys: %s",
|
2019-06-20 11:32:02 +02:00
|
|
|
",".join(
|
|
|
|
(
|
|
|
|
"%s for %s:%s" % (key_id, user_id, device_id)
|
2020-06-15 13:03:36 +02:00
|
|
|
for user_id, user_keys in json_result.items()
|
|
|
|
for device_id, device_keys in user_keys.items()
|
|
|
|
for key_id, _ in device_keys.items()
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
|
|
|
),
|
2017-05-09 20:01:39 +02:00
|
|
|
)
|
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return {"one_time_keys": json_result}
|
2015-07-23 17:03:38 +02:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
async def on_get_missing_events(
|
2020-04-07 21:03:23 +02:00
|
|
|
self,
|
|
|
|
origin: str,
|
|
|
|
room_id: str,
|
|
|
|
earliest_events: List[str],
|
|
|
|
latest_events: List[str],
|
|
|
|
limit: int,
|
|
|
|
) -> Dict[str, list]:
|
2019-10-29 15:05:32 +01:00
|
|
|
with (await self._server_linearizer.queue((origin, room_id))):
|
2018-07-04 16:31:00 +02:00
|
|
|
origin_host, _ = parse_server_name(origin)
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.check_server_matches_acl(origin_host, room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug(
|
2016-06-17 17:43:45 +02:00
|
|
|
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
2018-10-16 21:37:16 +02:00
|
|
|
" limit: %d",
|
2019-06-20 11:32:02 +02:00
|
|
|
earliest_events,
|
|
|
|
latest_events,
|
|
|
|
limit,
|
2016-06-17 17:43:45 +02:00
|
|
|
)
|
2016-12-31 16:21:37 +01:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
missing_events = await self.handler.on_get_missing_events(
|
2019-06-20 11:32:02 +02:00
|
|
|
origin, room_id, earliest_events, latest_events, limit
|
2016-06-17 17:43:45 +02:00
|
|
|
)
|
2015-02-19 18:24:14 +01:00
|
|
|
|
2016-06-17 17:43:45 +02:00
|
|
|
if len(missing_events) < 5:
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug(
|
2016-06-17 17:43:45 +02:00
|
|
|
"Returning %d events: %r", len(missing_events), missing_events
|
|
|
|
)
|
|
|
|
else:
|
2020-02-06 14:31:05 +01:00
|
|
|
logger.debug("Returning %d events", len(missing_events))
|
2016-06-08 12:55:31 +02:00
|
|
|
|
2016-06-17 17:43:45 +02:00
|
|
|
time_now = self._clock.time_msec()
|
2015-02-19 18:24:14 +01:00
|
|
|
|
2019-07-23 15:00:55 +02:00
|
|
|
return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]}
|
2015-02-19 18:24:14 +01:00
|
|
|
|
2016-05-05 14:42:44 +02:00
|
|
|
@log_function
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_openid_userinfo(self, token: str) -> Optional[str]:
|
2016-05-05 14:42:44 +02:00
|
|
|
ts_now_ms = self._clock.time_msec()
|
2020-04-07 21:03:23 +02:00
|
|
|
return await self.store.get_user_id_for_open_id_token(token, ts_now_ms)
|
2016-05-05 14:42:44 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
def _transaction_from_pdus(self, pdu_list: List[EventBase]) -> Transaction:
|
2015-01-26 11:45:24 +01:00
|
|
|
"""Returns a new Transaction containing the given PDUs suitable for
|
|
|
|
transmission.
|
|
|
|
"""
|
|
|
|
time_now = self._clock.time_msec()
|
|
|
|
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
|
|
|
|
return Transaction(
|
|
|
|
origin=self.server_name,
|
|
|
|
pdus=pdus,
|
|
|
|
origin_server_ts=int(time_now),
|
|
|
|
destination=None,
|
|
|
|
)
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None:
|
2017-03-09 14:15:27 +01:00
|
|
|
""" Process a PDU received in a federation /send/ transaction.
|
|
|
|
|
2018-04-18 00:41:36 +02:00
|
|
|
If the event is invalid, then this method throws a FederationError.
|
|
|
|
(The error will then be logged and sent back to the sender (which
|
|
|
|
probably won't do anything with it), and other events in the
|
|
|
|
transaction will be processed as normal).
|
|
|
|
|
|
|
|
It is likely that we'll then receive other events which refer to
|
|
|
|
this rejected_event in their prev_events, etc. When that happens,
|
|
|
|
we'll attempt to fetch the rejected event again, which will presumably
|
|
|
|
fail, so those second-generation events will also get rejected.
|
|
|
|
|
|
|
|
Eventually, we get to the point where there are more than 10 events
|
|
|
|
between any new events and the original rejected event. Since we
|
|
|
|
only try to backfill 10 events deep on received pdu, we then accept the
|
|
|
|
new event, possibly introducing a discontinuity in the DAG, with new
|
|
|
|
forward extremities, so normal service is approximately returned,
|
|
|
|
until we try to backfill across the discontinuity.
|
|
|
|
|
2017-03-09 14:15:27 +01:00
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
origin: server which sent the pdu
|
|
|
|
pdu: received pdu
|
2018-04-18 00:41:36 +02:00
|
|
|
|
|
|
|
Raises: FederationError if the signatures / hash do not match, or
|
|
|
|
if the event was unacceptable for any other reason (eg, too large,
|
|
|
|
too many prev_events, couldn't find the prev_events)
|
|
|
|
"""
|
2017-10-06 15:24:06 +02:00
|
|
|
# check that it's actually being sent from a valid destination to
|
|
|
|
# workaround bug #1753 in 0.18.5 and 0.18.6
|
2019-01-29 17:26:40 +01:00
|
|
|
if origin != get_domain_from_id(pdu.sender):
|
2017-10-06 15:24:06 +02:00
|
|
|
# We continue to accept join events from any server; this is
|
|
|
|
# necessary for the federation join dance to work correctly.
|
|
|
|
# (When we join over federation, the "helper" server is
|
|
|
|
# responsible for sending out the join event, rather than the
|
2019-01-29 17:26:40 +01:00
|
|
|
# origin. See bug #1893. This is also true for some third party
|
|
|
|
# invites).
|
2017-10-06 15:24:06 +02:00
|
|
|
if not (
|
2019-06-20 11:32:02 +02:00
|
|
|
pdu.type == "m.room.member"
|
|
|
|
and pdu.content
|
|
|
|
and pdu.content.get("membership", None)
|
|
|
|
in (Membership.JOIN, Membership.INVITE)
|
2017-10-06 15:24:06 +02:00
|
|
|
):
|
|
|
|
logger.info(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Discarding PDU %s from invalid origin %s", pdu.event_id, origin
|
2017-10-06 15:24:06 +02:00
|
|
|
)
|
|
|
|
return
|
|
|
|
else:
|
2019-06-20 11:32:02 +02:00
|
|
|
logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
|
2017-10-06 15:24:06 +02:00
|
|
|
|
2019-01-29 18:21:48 +01:00
|
|
|
# We've already checked that we know the room version by this point
|
2020-03-19 13:22:56 +01:00
|
|
|
room_version = await self.store.get_room_version(pdu.room_id)
|
2019-01-29 18:21:48 +01:00
|
|
|
|
2017-03-09 14:15:27 +01:00
|
|
|
# Check signature.
|
|
|
|
try:
|
2019-10-29 15:05:32 +01:00
|
|
|
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
2017-03-09 14:15:27 +01:00
|
|
|
except SynapseError as e:
|
2019-06-20 11:32:02 +02:00
|
|
|
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
2017-03-09 14:15:27 +01:00
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
2017-03-09 13:20:46 +01:00
|
|
|
|
2015-01-26 11:45:24 +01:00
|
|
|
def __str__(self):
|
|
|
|
return "<ReplicationLayer(%s)>" % self.server_name
|
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
async def exchange_third_party_invite(
|
2020-04-07 21:03:23 +02:00
|
|
|
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
|
2016-02-23 16:11:25 +01:00
|
|
|
):
|
2019-10-29 15:05:32 +01:00
|
|
|
ret = await self.handler.exchange_third_party_invite(
|
2019-06-20 11:32:02 +02:00
|
|
|
sender_user_id, target_user_id, room_id, signed
|
2016-02-23 16:11:25 +01:00
|
|
|
)
|
2019-07-23 15:00:55 +02:00
|
|
|
return ret
|
2015-11-05 17:43:19 +01:00
|
|
|
|
2020-11-19 11:05:33 +01:00
|
|
|
async def on_exchange_third_party_invite_request(self, event_dict: Dict):
|
|
|
|
ret = await self.handler.on_exchange_third_party_invite_request(event_dict)
|
2019-07-23 15:00:55 +02:00
|
|
|
return ret
|
2018-03-12 17:17:08 +01:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def check_server_matches_acl(self, server_name: str, room_id: str):
|
2018-07-04 16:31:00 +02:00
|
|
|
"""Check if the given server is allowed by the server ACLs in the room
|
|
|
|
|
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
server_name: name of server, *without any port part*
|
|
|
|
room_id: ID of the room to check
|
2018-07-04 16:31:00 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
AuthError if the server does not match the ACL
|
|
|
|
"""
|
2019-10-29 15:05:32 +01:00
|
|
|
state_ids = await self.store.get_current_state_ids(room_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
|
|
|
|
|
|
|
if not acl_event_id:
|
|
|
|
return
|
|
|
|
|
2019-10-29 15:05:32 +01:00
|
|
|
acl_event = await self.store.get_event(acl_event_id)
|
2018-07-04 16:31:00 +02:00
|
|
|
if server_matches_acl_event(server_name, acl_event):
|
|
|
|
return
|
|
|
|
|
|
|
|
raise AuthError(code=403, msg="Server is banned from room")
|
|
|
|
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
|
2018-07-04 16:31:00 +02:00
|
|
|
"""Check if the given server is allowed by the ACL event
|
|
|
|
|
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
server_name: name of server, without any port part
|
|
|
|
acl_event: m.room.server_acl event
|
2018-07-04 16:31:00 +02:00
|
|
|
|
|
|
|
Returns:
|
2020-04-07 21:03:23 +02:00
|
|
|
True if this server is allowed by the ACLs
|
2018-07-04 16:31:00 +02:00
|
|
|
"""
|
|
|
|
logger.debug("Checking %s against acl %s", server_name, acl_event.content)
|
|
|
|
|
|
|
|
# first of all, check if literal IPs are blocked, and if so, whether the
|
|
|
|
# server name is a literal IP
|
|
|
|
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
|
|
|
|
if not isinstance(allow_ip_literals, bool):
|
2020-07-09 15:52:58 +02:00
|
|
|
logger.warning("Ignoring non-bool allow_ip_literals flag")
|
2018-07-04 16:31:00 +02:00
|
|
|
allow_ip_literals = True
|
|
|
|
if not allow_ip_literals:
|
|
|
|
# check for ipv6 literals. These start with '['.
|
2019-06-20 11:32:02 +02:00
|
|
|
if server_name[0] == "[":
|
2018-07-04 16:31:00 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
# check for ipv4 literals. We can just lift the routine from twisted.
|
|
|
|
if isIPAddress(server_name):
|
|
|
|
return False
|
|
|
|
|
|
|
|
# next, check the deny list
|
|
|
|
deny = acl_event.content.get("deny", [])
|
|
|
|
if not isinstance(deny, (list, tuple)):
|
2020-07-09 15:52:58 +02:00
|
|
|
logger.warning("Ignoring non-list deny ACL %s", deny)
|
2018-07-04 16:31:00 +02:00
|
|
|
deny = []
|
|
|
|
for e in deny:
|
|
|
|
if _acl_entry_matches(server_name, e):
|
|
|
|
# logger.info("%s matched deny rule %s", server_name, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
# then the allow list.
|
|
|
|
allow = acl_event.content.get("allow", [])
|
|
|
|
if not isinstance(allow, (list, tuple)):
|
2020-07-09 15:52:58 +02:00
|
|
|
logger.warning("Ignoring non-list allow ACL %s", allow)
|
2018-07-04 16:31:00 +02:00
|
|
|
allow = []
|
|
|
|
for e in allow:
|
|
|
|
if _acl_entry_matches(server_name, e):
|
|
|
|
# logger.info("%s matched allow rule %s", server_name, e)
|
|
|
|
return True
|
|
|
|
|
|
|
|
# everything else should be rejected.
|
|
|
|
# logger.info("%s fell through", server_name)
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2020-10-01 14:09:18 +02:00
|
|
|
def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool:
|
2020-06-16 14:51:47 +02:00
|
|
|
if not isinstance(acl_entry, str):
|
2019-10-31 11:23:24 +01:00
|
|
|
logger.warning(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
|
|
|
|
)
|
2018-07-04 16:31:00 +02:00
|
|
|
return False
|
2018-10-17 17:14:04 +02:00
|
|
|
regex = glob_to_regex(acl_entry)
|
2020-10-01 14:09:18 +02:00
|
|
|
return bool(regex.match(server_name))
|
2018-07-04 16:31:00 +02:00
|
|
|
|
|
|
|
|
2020-09-04 12:54:56 +02:00
|
|
|
class FederationHandlerRegistry:
|
2018-03-12 17:17:08 +01:00
|
|
|
"""Allows classes to register themselves as handlers for a given EDU or
|
|
|
|
query type for incoming federation traffic.
|
|
|
|
"""
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
def __init__(self, hs: "HomeServer"):
|
|
|
|
self.config = hs.config
|
|
|
|
self.clock = hs.get_clock()
|
|
|
|
self._instance_name = hs.get_instance_name()
|
2018-03-12 17:17:08 +01:00
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
# These are safe to load in monolith mode, but will explode if we try
|
|
|
|
# and use them. However we have guards before we use them to ensure that
|
|
|
|
# we don't route to ourselves, and in monolith mode that will always be
|
|
|
|
# the case.
|
|
|
|
self._get_query_client = ReplicationGetQueryRestServlet.make_client(hs)
|
|
|
|
self._send_edu = ReplicationFederationSendEduRestServlet.make_client(hs)
|
|
|
|
|
|
|
|
self.edu_handlers = (
|
|
|
|
{}
|
|
|
|
) # type: Dict[str, Callable[[str, dict], Awaitable[None]]]
|
|
|
|
self.query_handlers = {} # type: Dict[str, Callable[[dict], Awaitable[None]]]
|
|
|
|
|
|
|
|
# Map from type to instance name that we should route EDU handling to.
|
|
|
|
self._edu_type_to_instance = {} # type: Dict[str, str]
|
|
|
|
|
|
|
|
def register_edu_handler(
|
2020-10-09 13:20:51 +02:00
|
|
|
self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]]
|
2020-07-16 16:12:54 +02:00
|
|
|
):
|
2018-03-12 17:17:08 +01:00
|
|
|
"""Sets the handler callable that will be used to handle an incoming
|
|
|
|
federation EDU of the given type.
|
|
|
|
|
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
edu_type: The type of the incoming EDU to register handler for
|
|
|
|
handler: A callable invoked on incoming EDU
|
2018-03-12 17:17:08 +01:00
|
|
|
of the given type. The arguments are the origin server name and
|
|
|
|
the EDU contents.
|
|
|
|
"""
|
|
|
|
if edu_type in self.edu_handlers:
|
|
|
|
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
|
|
|
|
|
2018-07-26 12:44:22 +02:00
|
|
|
logger.info("Registering federation EDU handler for %r", edu_type)
|
|
|
|
|
2018-03-12 17:17:08 +01:00
|
|
|
self.edu_handlers[edu_type] = handler
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
def register_query_handler(
|
|
|
|
self, query_type: str, handler: Callable[[dict], defer.Deferred]
|
|
|
|
):
|
2018-03-12 17:17:08 +01:00
|
|
|
"""Sets the handler callable that will be used to handle an incoming
|
|
|
|
federation query of the given type.
|
|
|
|
|
|
|
|
Args:
|
2020-04-07 21:03:23 +02:00
|
|
|
query_type: Category name of the query, which should match
|
2018-03-12 17:17:08 +01:00
|
|
|
the string used by make_query.
|
2020-04-07 21:03:23 +02:00
|
|
|
handler: Invoked to handle
|
2018-03-12 17:17:08 +01:00
|
|
|
incoming queries of this type. The return will be yielded
|
|
|
|
on and the result used as the response to the query request.
|
|
|
|
"""
|
|
|
|
if query_type in self.query_handlers:
|
2019-06-20 11:32:02 +02:00
|
|
|
raise KeyError("Already have a Query handler for %s" % (query_type,))
|
2018-03-12 17:17:08 +01:00
|
|
|
|
2018-07-26 12:44:22 +02:00
|
|
|
logger.info("Registering federation query handler for %r", query_type)
|
|
|
|
|
2018-03-12 17:17:08 +01:00
|
|
|
self.query_handlers[query_type] = handler
|
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
def register_instance_for_edu(self, edu_type: str, instance_name: str):
|
|
|
|
"""Register that the EDU handler is on a different instance than master.
|
|
|
|
"""
|
|
|
|
self._edu_type_to_instance[edu_type] = instance_name
|
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_edu(self, edu_type: str, origin: str, content: dict):
|
2020-07-16 16:12:54 +02:00
|
|
|
if not self.config.use_presence and edu_type == "m.presence":
|
|
|
|
return
|
|
|
|
|
|
|
|
# Check if we have a handler on this instance
|
2018-03-12 17:17:08 +01:00
|
|
|
handler = self.edu_handlers.get(edu_type)
|
2020-07-16 16:12:54 +02:00
|
|
|
if handler:
|
|
|
|
with start_active_span_from_edu(content, "handle_edu"):
|
|
|
|
try:
|
|
|
|
await handler(origin, content)
|
|
|
|
except SynapseError as e:
|
|
|
|
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
|
|
|
except Exception:
|
|
|
|
logger.exception("Failed to handle edu %r", edu_type)
|
2020-04-07 21:03:23 +02:00
|
|
|
return
|
2018-03-12 17:17:08 +01:00
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
# Check if we can route it somewhere else that isn't us
|
|
|
|
route_to = self._edu_type_to_instance.get(edu_type, "master")
|
|
|
|
if route_to != self._instance_name:
|
2019-08-22 19:21:10 +02:00
|
|
|
try:
|
2020-07-16 16:12:54 +02:00
|
|
|
await self._send_edu(
|
|
|
|
instance_name=route_to,
|
|
|
|
edu_type=edu_type,
|
|
|
|
origin=origin,
|
|
|
|
content=content,
|
|
|
|
)
|
2019-08-22 19:21:10 +02:00
|
|
|
except SynapseError as e:
|
|
|
|
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
|
|
|
except Exception:
|
|
|
|
logger.exception("Failed to handle edu %r", edu_type)
|
2019-02-27 14:53:46 +01:00
|
|
|
return
|
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
# Oh well, let's just log and move on.
|
|
|
|
logger.warning("No handler registered for EDU type %s", edu_type)
|
2018-07-26 12:44:22 +02:00
|
|
|
|
2020-04-07 21:03:23 +02:00
|
|
|
async def on_query(self, query_type: str, args: dict):
|
2018-07-26 12:44:22 +02:00
|
|
|
handler = self.query_handlers.get(query_type)
|
|
|
|
if handler:
|
2019-10-29 17:36:46 +01:00
|
|
|
return await handler(args)
|
2018-07-26 12:44:22 +02:00
|
|
|
|
2020-07-16 16:12:54 +02:00
|
|
|
# Check if we can route it somewhere else that isn't us
|
|
|
|
if self._instance_name == "master":
|
|
|
|
return await self._get_query_client(query_type=query_type, args=args)
|
|
|
|
|
|
|
|
# Uh oh, no handler! Let's raise an exception so the request returns an
|
|
|
|
# error.
|
|
|
|
logger.warning("No handler registered for query type %s", query_type)
|
|
|
|
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|