2017-08-15 16:57:46 +02:00
|
|
|
# Copyright 2017 New Vector Ltd
|
2021-01-11 16:55:05 +01:00
|
|
|
# Copyright 2019-2021 The Matrix.org Foundation C.I.C
|
2017-08-15 16:57:46 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2021-09-01 12:55:31 +02:00
|
|
|
import atexit
|
2017-08-15 16:57:46 +02:00
|
|
|
import gc
|
|
|
|
import logging
|
2019-07-22 15:19:38 +02:00
|
|
|
import os
|
2019-02-08 18:25:57 +01:00
|
|
|
import signal
|
2019-08-17 10:09:52 +02:00
|
|
|
import socket
|
2017-10-02 19:03:59 +02:00
|
|
|
import sys
|
2019-02-08 18:25:57 +01:00
|
|
|
import traceback
|
2021-03-26 18:33:55 +01:00
|
|
|
import warnings
|
2021-11-10 21:06:54 +01:00
|
|
|
from typing import (
|
|
|
|
TYPE_CHECKING,
|
|
|
|
Any,
|
|
|
|
Awaitable,
|
|
|
|
Callable,
|
|
|
|
Collection,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
List,
|
|
|
|
NoReturn,
|
2021-12-01 13:28:23 +01:00
|
|
|
Optional,
|
2021-11-10 21:06:54 +01:00
|
|
|
Tuple,
|
|
|
|
cast,
|
|
|
|
)
|
2017-10-02 19:03:59 +02:00
|
|
|
|
2021-03-26 18:33:55 +01:00
|
|
|
from cryptography.utils import CryptographyDeprecationWarning
|
2022-05-09 12:27:39 +02:00
|
|
|
from typing_extensions import ParamSpec
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2021-06-21 12:41:25 +02:00
|
|
|
import twisted
|
2021-11-10 21:06:54 +01:00
|
|
|
from twisted.internet import defer, error, reactor as _reactor
|
2023-04-03 11:27:51 +02:00
|
|
|
from twisted.internet.interfaces import (
|
|
|
|
IOpenSSLContextFactory,
|
|
|
|
IReactorSSL,
|
|
|
|
IReactorTCP,
|
|
|
|
IReactorUNIX,
|
|
|
|
)
|
2021-11-10 21:06:54 +01:00
|
|
|
from twisted.internet.protocol import ServerFactory
|
|
|
|
from twisted.internet.tcp import Port
|
2021-06-21 12:41:25 +02:00
|
|
|
from twisted.logger import LoggingFile, LogLevel
|
2019-02-11 11:36:26 +01:00
|
|
|
from twisted.protocols.tls import TLSMemoryBIOFactory
|
2021-10-26 14:45:38 +02:00
|
|
|
from twisted.python.threadpool import ThreadPool
|
2022-11-15 13:55:00 +01:00
|
|
|
from twisted.web.resource import Resource
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2022-05-11 15:43:22 +02:00
|
|
|
import synapse.util.caches
|
2021-04-23 20:20:44 +02:00
|
|
|
from synapse.api.constants import MAX_PDU_SIZE
|
2019-01-30 15:17:55 +01:00
|
|
|
from synapse.app import check_bind_error
|
2020-10-02 14:23:15 +02:00
|
|
|
from synapse.app.phone_stats_home import start_phone_stats_home
|
2022-05-11 15:43:22 +02:00
|
|
|
from synapse.config import ConfigError
|
|
|
|
from synapse.config._base import format_config_error
|
2021-04-23 20:20:44 +02:00
|
|
|
from synapse.config.homeserver import HomeServerConfig
|
2023-04-03 11:27:51 +02:00
|
|
|
from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
|
2019-02-08 18:25:57 +01:00
|
|
|
from synapse.crypto import context_factory
|
2021-08-17 15:22:45 +02:00
|
|
|
from synapse.events.presence_router import load_legacy_presence_router
|
2021-07-20 12:39:46 +02:00
|
|
|
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
|
2021-10-13 13:21:52 +02:00
|
|
|
from synapse.handlers.auth import load_legacy_password_auth_providers
|
2022-11-15 13:55:00 +01:00
|
|
|
from synapse.http.site import SynapseSite
|
2019-07-03 16:07:04 +02:00
|
|
|
from synapse.logging.context import PreserveLoggingContext
|
2022-04-27 15:03:44 +02:00
|
|
|
from synapse.logging.opentracing import init_tracer
|
2022-01-13 15:35:52 +01:00
|
|
|
from synapse.metrics import install_gc_manager, register_threadpool
|
2020-11-26 12:18:10 +01:00
|
|
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
2021-05-06 16:54:07 +02:00
|
|
|
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
2023-04-18 02:57:40 +02:00
|
|
|
from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
|
2021-11-10 21:06:54 +01:00
|
|
|
from synapse.types import ISynapseReactor
|
2022-06-07 16:24:11 +02:00
|
|
|
from synapse.util import SYNAPSE_VERSION
|
2021-07-05 17:32:12 +02:00
|
|
|
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
2020-08-04 11:03:41 +02:00
|
|
|
from synapse.util.daemonize import daemonize_process
|
2021-10-26 14:45:38 +02:00
|
|
|
from synapse.util.gai_resolver import GAIResolver
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.rlimit import change_resource_limit
|
|
|
|
|
2021-07-05 17:32:12 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
# Twisted injects the global reactor to make it easier to import, this confuses
|
|
|
|
# mypy which thinks it is a module. Tell it that it a more proper type.
|
|
|
|
reactor = cast(ISynapseReactor, _reactor)
|
|
|
|
|
|
|
|
|
2017-12-17 13:04:05 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-08-28 13:18:53 +02:00
|
|
|
# list of tuples of function, args list, kwargs dict
|
2021-11-10 21:06:54 +01:00
|
|
|
_sighup_callbacks: List[
|
2022-05-09 12:27:39 +02:00
|
|
|
Tuple[Callable[..., None], Tuple[object, ...], Dict[str, object]]
|
2021-11-10 21:06:54 +01:00
|
|
|
] = []
|
2022-05-09 12:27:39 +02:00
|
|
|
P = ParamSpec("P")
|
2019-02-08 18:25:57 +01:00
|
|
|
|
|
|
|
|
2022-05-09 12:27:39 +02:00
|
|
|
def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None:
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
Register a function to be called when a SIGHUP occurs.
|
|
|
|
|
|
|
|
Args:
|
2021-11-10 21:06:54 +01:00
|
|
|
func: Function to be called when sent a SIGHUP signal.
|
2019-08-28 13:18:53 +02:00
|
|
|
*args, **kwargs: args and kwargs to be passed to the target function.
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
2022-09-30 18:36:28 +02:00
|
|
|
_sighup_callbacks.append((func, args, kwargs))
|
2019-02-08 18:25:57 +01:00
|
|
|
|
2017-08-15 16:57:46 +02:00
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def start_worker_reactor(
|
|
|
|
appname: str,
|
|
|
|
config: HomeServerConfig,
|
2022-06-30 13:58:12 +02:00
|
|
|
# Use a lambda to avoid binding to a given reactor at import time.
|
|
|
|
# (needed when synapse.app.complement_fork_starter is being used)
|
|
|
|
run_command: Callable[[], None] = lambda: reactor.run(),
|
2021-11-10 21:06:54 +01:00
|
|
|
) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Run the reactor in the main process
|
2017-08-15 16:57:46 +02:00
|
|
|
|
|
|
|
Daemonizes if necessary, and then configures some resources, before starting
|
|
|
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
|
|
|
|
|
|
|
Args:
|
2021-11-10 21:06:54 +01:00
|
|
|
appname: application name which will be sent to syslog
|
|
|
|
config: config object
|
|
|
|
run_command: callable that actually runs the reactor
|
2017-08-15 16:57:46 +02:00
|
|
|
"""
|
|
|
|
|
2021-09-13 19:07:12 +02:00
|
|
|
logger = logging.getLogger(config.worker.worker_app)
|
2017-08-15 16:57:46 +02:00
|
|
|
|
|
|
|
start_reactor(
|
|
|
|
appname,
|
2021-09-29 12:44:15 +02:00
|
|
|
soft_file_limit=config.server.soft_file_limit,
|
|
|
|
gc_thresholds=config.server.gc_thresholds,
|
2021-09-24 13:25:21 +02:00
|
|
|
pid_file=config.worker.worker_pid_file,
|
|
|
|
daemonize=config.worker.worker_daemonize,
|
2021-09-29 12:44:15 +02:00
|
|
|
print_pidfile=config.server.print_pidfile,
|
2019-03-14 14:32:14 +01:00
|
|
|
logger=logger,
|
2019-07-01 18:55:26 +02:00
|
|
|
run_command=run_command,
|
2017-08-15 16:57:46 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def start_reactor(
|
2021-11-10 21:06:54 +01:00
|
|
|
appname: str,
|
|
|
|
soft_file_limit: int,
|
2021-12-01 13:28:23 +01:00
|
|
|
gc_thresholds: Optional[Tuple[int, int, int]],
|
2022-04-11 18:07:23 +02:00
|
|
|
pid_file: Optional[str],
|
2021-11-10 21:06:54 +01:00
|
|
|
daemonize: bool,
|
|
|
|
print_pidfile: bool,
|
|
|
|
logger: logging.Logger,
|
2022-06-30 13:58:12 +02:00
|
|
|
# Use a lambda to avoid binding to a given reactor at import time.
|
|
|
|
# (needed when synapse.app.complement_fork_starter is being used)
|
|
|
|
run_command: Callable[[], None] = lambda: reactor.run(),
|
2021-11-10 21:06:54 +01:00
|
|
|
) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Run the reactor in the main process
|
2017-08-15 16:57:46 +02:00
|
|
|
|
|
|
|
Daemonizes if necessary, and then configures some resources, before starting
|
|
|
|
the reactor
|
|
|
|
|
|
|
|
Args:
|
2021-11-10 21:06:54 +01:00
|
|
|
appname: application name which will be sent to syslog
|
|
|
|
soft_file_limit:
|
2017-08-15 16:57:46 +02:00
|
|
|
gc_thresholds:
|
2021-11-10 21:06:54 +01:00
|
|
|
pid_file: name of pid file to write to if daemonize is True
|
|
|
|
daemonize: true to run the reactor in a background process
|
|
|
|
print_pidfile: whether to print the pid file, if daemonize is True
|
|
|
|
logger: logger instance to pass to Daemonize
|
|
|
|
run_command: callable that actually runs the reactor
|
2017-08-15 16:57:46 +02:00
|
|
|
"""
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def run() -> None:
|
2019-07-03 14:40:45 +02:00
|
|
|
logger.info("Running")
|
2021-05-06 16:54:07 +02:00
|
|
|
setup_jemalloc_stats()
|
2019-07-03 14:40:45 +02:00
|
|
|
change_resource_limit(soft_file_limit)
|
|
|
|
if gc_thresholds:
|
|
|
|
gc.set_threshold(*gc_thresholds)
|
2022-01-13 15:35:52 +01:00
|
|
|
install_gc_manager()
|
2019-07-15 15:13:22 +02:00
|
|
|
run_command()
|
2019-07-03 14:40:45 +02:00
|
|
|
|
|
|
|
# make sure that we run the reactor with the sentinel log context,
|
|
|
|
# otherwise other PreserveLoggingContext instances will get confused
|
|
|
|
# and complain when they see the logcontext arbitrarily swapping
|
|
|
|
# between the sentinel and `run` logcontexts.
|
|
|
|
#
|
|
|
|
# We also need to drop the logcontext before forking if we're daemonizing,
|
|
|
|
# otherwise the cputime metrics get confused about the per-thread resource usage
|
|
|
|
# appearing to go backwards.
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
if daemonize:
|
2022-04-11 18:07:23 +02:00
|
|
|
assert pid_file is not None
|
|
|
|
|
2019-07-03 14:40:45 +02:00
|
|
|
if print_pidfile:
|
|
|
|
print(pid_file)
|
|
|
|
|
2020-08-04 11:03:41 +02:00
|
|
|
daemonize_process(pid_file, logger)
|
|
|
|
run()
|
2017-10-02 18:59:34 +02:00
|
|
|
|
|
|
|
|
2020-05-12 12:20:48 +02:00
|
|
|
def quit_with_error(error_string: str) -> NoReturn:
|
2017-10-02 18:59:34 +02:00
|
|
|
message_lines = error_string.split("\n")
|
2021-06-21 12:41:25 +02:00
|
|
|
line_length = min(max(len(line) for line in message_lines), 80) + 2
|
2019-06-20 11:32:02 +02:00
|
|
|
sys.stderr.write("*" * line_length + "\n")
|
2017-10-02 18:59:34 +02:00
|
|
|
for line in message_lines:
|
|
|
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
2019-06-20 11:32:02 +02:00
|
|
|
sys.stderr.write("*" * line_length + "\n")
|
2017-10-02 18:59:34 +02:00
|
|
|
sys.exit(1)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
|
|
|
|
2021-06-21 12:41:25 +02:00
|
|
|
def handle_startup_exception(e: Exception) -> NoReturn:
|
|
|
|
# Exceptions that occur between setting up the logging and forking or starting
|
|
|
|
# the reactor are written to the logs, followed by a summary to stderr.
|
|
|
|
logger.exception("Exception during startup")
|
|
|
|
quit_with_error(
|
|
|
|
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def redirect_stdio_to_logs() -> None:
|
|
|
|
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
|
|
|
|
|
2023-02-22 21:29:09 +01:00
|
|
|
for stream, level in streams:
|
2021-06-21 12:41:25 +02:00
|
|
|
oldStream = getattr(sys, stream)
|
|
|
|
loggingFile = LoggingFile(
|
|
|
|
logger=twisted.logger.Logger(namespace=stream),
|
|
|
|
level=level,
|
|
|
|
encoding=getattr(oldStream, "encoding", None),
|
|
|
|
)
|
|
|
|
setattr(sys, stream, loggingFile)
|
|
|
|
|
|
|
|
print("Redirected stdout/stderr to logs")
|
|
|
|
|
|
|
|
|
2022-05-09 12:27:39 +02:00
|
|
|
def register_start(
|
|
|
|
cb: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs
|
|
|
|
) -> None:
|
2021-01-11 16:55:05 +01:00
|
|
|
"""Register a callback with the reactor, to be called once it is running
|
|
|
|
|
|
|
|
This can be used to initialise parts of the system which require an asynchronous
|
|
|
|
setup.
|
|
|
|
|
|
|
|
Any exception raised by the callback will be printed and logged, and the process
|
|
|
|
will exit.
|
|
|
|
"""
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
async def wrapper() -> None:
|
2021-01-11 16:55:05 +01:00
|
|
|
try:
|
|
|
|
await cb(*args, **kwargs)
|
|
|
|
except Exception:
|
|
|
|
# previously, we used Failure().printTraceback() here, in the hope that
|
|
|
|
# would give better tracebacks than traceback.print_exc(). However, that
|
|
|
|
# doesn't handle chained exceptions (with a __cause__ or __context__) well,
|
|
|
|
# and I *think* the need for Failure() is reduced now that we mostly use
|
|
|
|
# async/await.
|
|
|
|
|
|
|
|
# Write the exception to both the logs *and* the unredirected stderr,
|
|
|
|
# because people tend to get confused if it only goes to one or the other.
|
|
|
|
#
|
|
|
|
# One problem with this is that if people are using a logging config that
|
|
|
|
# logs to the console (as is common eg under docker), they will get two
|
|
|
|
# copies of the exception. We could maybe try to detect that, but it's
|
|
|
|
# probably a cost we can bear.
|
|
|
|
logger.fatal("Error during startup", exc_info=True)
|
|
|
|
print("Error during startup:", file=sys.__stderr__)
|
|
|
|
traceback.print_exc(file=sys.__stderr__)
|
|
|
|
|
|
|
|
# it's no use calling sys.exit here, since that just raises a SystemExit
|
|
|
|
# exception which is then caught by the reactor, and everything carries
|
|
|
|
# on as normal.
|
|
|
|
os._exit(1)
|
|
|
|
|
|
|
|
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
|
|
|
|
|
|
|
|
|
2022-11-24 10:09:17 +01:00
|
|
|
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
|
2018-05-31 11:04:50 +02:00
|
|
|
"""
|
|
|
|
Start Prometheus metrics server.
|
|
|
|
"""
|
2022-08-24 13:35:54 +02:00
|
|
|
from prometheus_client import start_http_server as start_http_server_prometheus
|
|
|
|
|
2022-11-24 10:09:17 +01:00
|
|
|
from synapse.metrics import RegistryProxy
|
2018-05-31 11:04:50 +02:00
|
|
|
|
|
|
|
for host in bind_addresses:
|
2019-02-13 12:48:56 +01:00
|
|
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
2022-11-24 10:09:17 +01:00
|
|
|
_set_prometheus_client_use_created_metrics(False)
|
|
|
|
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
|
2022-08-24 13:35:54 +02:00
|
|
|
|
|
|
|
|
|
|
|
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
|
|
|
|
"""
|
|
|
|
Sets whether prometheus_client should expose `_created`-suffixed metrics for
|
|
|
|
all gauges, histograms and summaries.
|
|
|
|
There is no programmatic way to disable this without poking at internals;
|
|
|
|
the proper way is to use an environment variable which prometheus_client
|
|
|
|
loads at import time.
|
|
|
|
|
|
|
|
The motivation for disabling these `_created` metrics is that they're
|
|
|
|
a waste of space as they're not useful but they take up space in Prometheus.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import prometheus_client.metrics
|
|
|
|
|
|
|
|
if hasattr(prometheus_client.metrics, "_use_created"):
|
|
|
|
prometheus_client.metrics._use_created = new_value
|
|
|
|
else:
|
|
|
|
logger.error(
|
|
|
|
"Can't disable `_created` metrics in prometheus_client (brittle hack broken?)"
|
|
|
|
)
|
2018-05-31 11:04:50 +02:00
|
|
|
|
|
|
|
|
2021-09-06 17:08:03 +02:00
|
|
|
def listen_manhole(
|
2021-11-10 21:06:54 +01:00
|
|
|
bind_addresses: Collection[str],
|
2021-09-06 17:08:03 +02:00
|
|
|
port: int,
|
|
|
|
manhole_settings: ManholeConfig,
|
|
|
|
manhole_globals: dict,
|
2021-11-10 21:06:54 +01:00
|
|
|
) -> None:
|
2021-03-26 18:33:55 +01:00
|
|
|
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
|
|
|
|
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
|
|
|
|
# suppress the warning for now.
|
|
|
|
warnings.filterwarnings(
|
|
|
|
action="ignore",
|
|
|
|
category=CryptographyDeprecationWarning,
|
|
|
|
message="int_from_bytes is deprecated",
|
|
|
|
)
|
|
|
|
|
|
|
|
from synapse.util.manhole import manhole
|
|
|
|
|
|
|
|
listen_tcp(
|
|
|
|
bind_addresses,
|
|
|
|
port,
|
2021-09-06 17:08:03 +02:00
|
|
|
manhole(settings=manhole_settings, globals=manhole_globals),
|
2021-03-26 18:33:55 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def listen_tcp(
|
|
|
|
bind_addresses: Collection[str],
|
|
|
|
port: int,
|
|
|
|
factory: ServerFactory,
|
|
|
|
reactor: IReactorTCP = reactor,
|
|
|
|
backlog: int = 50,
|
|
|
|
) -> List[Port]:
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
|
|
|
Create a TCP socket for a port and several addresses
|
2019-01-30 12:00:02 +01:00
|
|
|
|
|
|
|
Returns:
|
2021-11-10 21:06:54 +01:00
|
|
|
list of twisted.internet.tcp.Port listening for TCP connections
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-02-13 12:53:43 +01:00
|
|
|
r = []
|
2017-09-06 17:48:49 +02:00
|
|
|
for address in bind_addresses:
|
|
|
|
try:
|
2019-06-20 11:32:02 +02:00
|
|
|
r.append(reactor.listenTCP(port, factory, backlog, address))
|
2017-09-06 17:48:49 +02:00
|
|
|
except error.CannotListenError as e:
|
2017-12-17 13:04:05 +01:00
|
|
|
check_bind_error(e, address, bind_addresses)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
# IReactorTCP returns an object implementing IListeningPort from listenTCP,
|
|
|
|
# but we know it will be a Port instance.
|
|
|
|
return r # type: ignore[return-value]
|
2019-01-30 12:00:02 +01:00
|
|
|
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
def listen_unix(
|
|
|
|
path: str,
|
|
|
|
mode: int,
|
|
|
|
factory: ServerFactory,
|
|
|
|
reactor: IReactorUNIX = reactor,
|
|
|
|
backlog: int = 50,
|
|
|
|
) -> List[Port]:
|
|
|
|
"""
|
|
|
|
Create a UNIX socket for a given path and 'mode' permission
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list of twisted.internet.tcp.Port listening for TCP connections
|
|
|
|
"""
|
|
|
|
wantPID = True
|
|
|
|
|
|
|
|
return [
|
|
|
|
# IReactorUNIX returns an object implementing IListeningPort from listenUNIX,
|
|
|
|
# but we know it will be a Port instance.
|
|
|
|
cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID))
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2022-11-15 13:55:00 +01:00
|
|
|
def listen_http(
|
|
|
|
listener_config: ListenerConfig,
|
|
|
|
root_resource: Resource,
|
|
|
|
version_string: str,
|
|
|
|
max_request_body_size: int,
|
2022-11-15 17:36:43 +01:00
|
|
|
context_factory: Optional[IOpenSSLContextFactory],
|
|
|
|
reactor: ISynapseReactor = reactor,
|
2022-11-15 13:55:00 +01:00
|
|
|
) -> List[Port]:
|
|
|
|
assert listener_config.http_options is not None
|
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
site_tag = listener_config.get_site_tag()
|
2022-11-15 13:55:00 +01:00
|
|
|
|
|
|
|
site = SynapseSite(
|
2023-04-03 11:27:51 +02:00
|
|
|
"synapse.access.%s.%s"
|
|
|
|
% ("https" if listener_config.is_tls() else "http", site_tag),
|
2022-11-15 13:55:00 +01:00
|
|
|
site_tag,
|
|
|
|
listener_config,
|
|
|
|
root_resource,
|
|
|
|
version_string,
|
|
|
|
max_request_body_size=max_request_body_size,
|
|
|
|
reactor=reactor,
|
|
|
|
)
|
2023-04-03 11:27:51 +02:00
|
|
|
|
|
|
|
if isinstance(listener_config, TCPListenerConfig):
|
|
|
|
if listener_config.is_tls():
|
|
|
|
# refresh_certificate should have been called before this.
|
|
|
|
assert context_factory is not None
|
|
|
|
ports = listen_ssl(
|
|
|
|
listener_config.bind_addresses,
|
|
|
|
listener_config.port,
|
|
|
|
site,
|
|
|
|
context_factory,
|
|
|
|
reactor=reactor,
|
|
|
|
)
|
|
|
|
logger.info(
|
|
|
|
"Synapse now listening on TCP port %d (TLS)", listener_config.port
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
ports = listen_tcp(
|
|
|
|
listener_config.bind_addresses,
|
|
|
|
listener_config.port,
|
|
|
|
site,
|
|
|
|
reactor=reactor,
|
|
|
|
)
|
|
|
|
logger.info("Synapse now listening on TCP port %d", listener_config.port)
|
|
|
|
|
2022-11-15 13:55:00 +01:00
|
|
|
else:
|
2023-04-03 11:27:51 +02:00
|
|
|
ports = listen_unix(
|
|
|
|
listener_config.path, listener_config.mode, site, reactor=reactor
|
2022-11-15 13:55:00 +01:00
|
|
|
)
|
2023-04-03 11:27:51 +02:00
|
|
|
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
|
|
|
|
# encoded as a byte string. Decode as utf-8 so pretty.
|
|
|
|
logger.info(
|
|
|
|
"Synapse now listening on Unix Socket at: "
|
|
|
|
f"{ports[0].getHost().name.decode('utf-8')}"
|
|
|
|
)
|
|
|
|
|
2022-11-15 13:55:00 +01:00
|
|
|
return ports
|
|
|
|
|
|
|
|
|
2018-08-17 17:08:45 +02:00
|
|
|
def listen_ssl(
|
2021-11-10 21:06:54 +01:00
|
|
|
bind_addresses: Collection[str],
|
|
|
|
port: int,
|
|
|
|
factory: ServerFactory,
|
|
|
|
context_factory: IOpenSSLContextFactory,
|
|
|
|
reactor: IReactorSSL = reactor,
|
|
|
|
backlog: int = 50,
|
|
|
|
) -> List[Port]:
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-01-30 12:00:02 +01:00
|
|
|
Create an TLS-over-TCP socket for a port and several addresses
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list of twisted.internet.tcp.Port listening for TLS connections
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-01-30 12:00:02 +01:00
|
|
|
r = []
|
2017-09-06 17:48:49 +02:00
|
|
|
for address in bind_addresses:
|
|
|
|
try:
|
2019-01-30 12:00:02 +01:00
|
|
|
r.append(
|
2019-06-20 11:32:02 +02:00
|
|
|
reactor.listenSSL(port, factory, context_factory, backlog, address)
|
2017-09-06 17:48:49 +02:00
|
|
|
)
|
|
|
|
except error.CannotListenError as e:
|
2017-12-17 13:04:05 +01:00
|
|
|
check_bind_error(e, address, bind_addresses)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
# IReactorSSL incorrectly declares that an int is returned from listenSSL,
|
|
|
|
# it actually returns an object implementing IListeningPort, but we know it
|
|
|
|
# will be a Port instance.
|
|
|
|
return r # type: ignore[return-value]
|
2019-02-08 18:25:57 +01:00
|
|
|
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def refresh_certificate(hs: "HomeServer") -> None:
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
Refresh the TLS certificates that Synapse is using by re-reading them from
|
|
|
|
disk and updating the TLS context factories to use them.
|
|
|
|
"""
|
2021-09-29 12:44:15 +02:00
|
|
|
if not hs.config.server.has_tls_listener():
|
2019-02-11 22:30:59 +01:00
|
|
|
return
|
|
|
|
|
2021-10-06 16:47:41 +02:00
|
|
|
hs.config.tls.read_certificate_from_disk()
|
2019-02-08 18:25:57 +01:00
|
|
|
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
|
|
|
|
2019-02-11 11:36:26 +01:00
|
|
|
if hs._listening_services:
|
2019-02-11 22:00:41 +01:00
|
|
|
logger.info("Updating context factories...")
|
2019-02-11 11:36:26 +01:00
|
|
|
for i in hs._listening_services:
|
|
|
|
# When you listenSSL, it doesn't make an SSL port but a TCP one with
|
|
|
|
# a TLS wrapping factory around the factory you actually want to get
|
|
|
|
# requests. This factory attribute is public but missing from
|
|
|
|
# Twisted's documentation.
|
|
|
|
if isinstance(i.factory, TLSMemoryBIOFactory):
|
2019-02-13 12:53:43 +01:00
|
|
|
addr = i.getHost()
|
|
|
|
logger.info(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port
|
2019-02-13 12:53:43 +01:00
|
|
|
)
|
2019-02-11 11:36:26 +01:00
|
|
|
# We want to replace TLS factories with a new one, with the new
|
|
|
|
# TLS configuration. We do this by reaching in and pulling out
|
|
|
|
# the wrappedFactory, and then re-wrapping it.
|
|
|
|
i.factory = TLSMemoryBIOFactory(
|
2019-06-20 11:32:02 +02:00
|
|
|
hs.tls_server_context_factory, False, i.factory.wrappedFactory
|
2019-02-11 11:36:26 +01:00
|
|
|
)
|
2019-02-11 22:00:41 +01:00
|
|
|
logger.info("Context factories updated.")
|
2019-02-11 11:36:26 +01:00
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
async def start(hs: "HomeServer") -> None:
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
Start a Synapse server or worker.
|
|
|
|
|
2021-06-17 19:56:48 +02:00
|
|
|
Should be called once the reactor is running.
|
2019-12-19 15:52:52 +01:00
|
|
|
|
|
|
|
Will start the main HTTP listeners and do some other startup tasks, and then
|
|
|
|
notify systemd.
|
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
Args:
|
2020-06-16 13:44:07 +02:00
|
|
|
hs: homeserver instance
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
2021-10-26 14:45:38 +02:00
|
|
|
reactor = hs.get_reactor()
|
|
|
|
|
|
|
|
# We want to use a separate thread pool for the resolver so that large
|
|
|
|
# numbers of DNS requests don't starve out other users of the threadpool.
|
|
|
|
resolver_threadpool = ThreadPool(name="gai_resolver")
|
|
|
|
resolver_threadpool.start()
|
2021-10-27 14:04:56 +02:00
|
|
|
reactor.addSystemEventTrigger("during", "shutdown", resolver_threadpool.stop)
|
2021-10-26 14:45:38 +02:00
|
|
|
reactor.installNameResolver(
|
|
|
|
GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool)
|
|
|
|
)
|
|
|
|
|
2021-11-01 12:21:36 +01:00
|
|
|
# Register the threadpools with our metrics.
|
|
|
|
register_threadpool("default", reactor.getThreadPool())
|
|
|
|
register_threadpool("gai_resolver", resolver_threadpool)
|
|
|
|
|
2021-01-11 16:55:05 +01:00
|
|
|
# Set up the SIGHUP machinery.
|
|
|
|
if hasattr(signal, "SIGHUP"):
|
|
|
|
|
|
|
|
@wrap_as_background_process("sighup")
|
2021-11-17 20:07:02 +01:00
|
|
|
async def handle_sighup(*args: Any, **kwargs: Any) -> None:
|
2021-01-11 16:55:05 +01:00
|
|
|
# Tell systemd our state, if we're using it. This will silently fail if
|
|
|
|
# we're not using systemd.
|
|
|
|
sdnotify(b"RELOADING=1")
|
|
|
|
|
|
|
|
for i, args, kwargs in _sighup_callbacks:
|
|
|
|
i(*args, **kwargs)
|
|
|
|
|
|
|
|
sdnotify(b"READY=1")
|
|
|
|
|
|
|
|
# We defer running the sighup handlers until next reactor tick. This
|
|
|
|
# is so that we're in a sane state, e.g. flushing the logs may fail
|
|
|
|
# if the sighup happens in the middle of writing a log entry.
|
2021-11-10 21:06:54 +01:00
|
|
|
def run_sighup(*args: Any, **kwargs: Any) -> None:
|
2021-01-11 16:55:05 +01:00
|
|
|
# `callFromThread` should be "signal safe" as well as thread
|
|
|
|
# safe.
|
|
|
|
reactor.callFromThread(handle_sighup, *args, **kwargs)
|
|
|
|
|
|
|
|
signal.signal(signal.SIGHUP, run_sighup)
|
|
|
|
|
|
|
|
register_sighup(refresh_certificate, hs)
|
2022-05-11 15:43:22 +02:00
|
|
|
register_sighup(reload_cache_config, hs.config)
|
|
|
|
|
|
|
|
# Apply the cache config.
|
|
|
|
hs.config.caches.resize_all_caches()
|
2021-01-11 16:55:05 +01:00
|
|
|
|
|
|
|
# Load the certificate from disk.
|
|
|
|
refresh_certificate(hs)
|
|
|
|
|
|
|
|
# Start the tracer
|
2022-04-27 15:03:44 +02:00
|
|
|
init_tracer(hs) # noqa
|
2021-01-11 16:55:05 +01:00
|
|
|
|
2021-06-18 13:15:52 +02:00
|
|
|
# Instantiate the modules so they can register their web resources to the module API
|
|
|
|
# before we start the listeners.
|
|
|
|
module_api = hs.get_module_api()
|
|
|
|
for module, config in hs.config.modules.loaded_modules:
|
2022-06-15 15:36:16 +02:00
|
|
|
m = module(config, module_api)
|
2022-01-25 15:35:35 +01:00
|
|
|
logger.info("Loaded module %s", m)
|
2021-06-18 13:15:52 +02:00
|
|
|
|
|
|
|
load_legacy_spam_checkers(hs)
|
2021-07-20 12:39:46 +02:00
|
|
|
load_legacy_third_party_event_rules(hs)
|
2021-08-17 15:22:45 +02:00
|
|
|
load_legacy_presence_router(hs)
|
2021-10-13 13:21:52 +02:00
|
|
|
load_legacy_password_auth_providers(hs)
|
2021-06-18 13:15:52 +02:00
|
|
|
|
2021-07-05 17:32:12 +02:00
|
|
|
# If we've configured an expiry time for caches, start the background job now.
|
|
|
|
setup_expire_lru_cache_entries(hs)
|
|
|
|
|
2021-01-11 16:55:05 +01:00
|
|
|
# It is now safe to start your Synapse.
|
2021-04-23 20:20:44 +02:00
|
|
|
hs.start_listening()
|
2022-02-23 12:04:02 +01:00
|
|
|
hs.get_datastores().main.db_pool.start_profiling()
|
2021-01-11 16:55:05 +01:00
|
|
|
hs.get_pusherpool().start()
|
|
|
|
|
|
|
|
# Log when we start the shut down process.
|
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
|
|
|
"before", "shutdown", logger.info, "Shutting down..."
|
|
|
|
)
|
|
|
|
|
|
|
|
setup_sentry(hs)
|
|
|
|
setup_sdnotify(hs)
|
|
|
|
|
2022-09-05 12:26:43 +02:00
|
|
|
# If background tasks are running on the main process or this is the worker in
|
|
|
|
# charge of them, start collecting the phone home stats and shared usage metrics.
|
2021-09-13 19:07:12 +02:00
|
|
|
if hs.config.worker.run_background_tasks:
|
2022-09-05 12:26:43 +02:00
|
|
|
await hs.get_common_usage_metrics_manager().setup()
|
2021-01-11 16:55:05 +01:00
|
|
|
start_phone_stats_home(hs)
|
|
|
|
|
|
|
|
# We now freeze all allocated objects in the hopes that (almost)
|
|
|
|
# everything currently allocated are things that will be used for the
|
|
|
|
# rest of time. Doing so means less work each GC (hopefully).
|
|
|
|
#
|
2022-01-25 19:35:18 +01:00
|
|
|
# PyPy does not (yet?) implement gc.freeze()
|
|
|
|
if hasattr(gc, "freeze"):
|
|
|
|
gc.collect()
|
|
|
|
gc.freeze()
|
|
|
|
|
|
|
|
# Speed up shutdowns by freezing all allocated objects. This moves everything
|
|
|
|
# into the permanent generation and excludes them from the final GC.
|
|
|
|
atexit.register(gc.freeze)
|
2021-09-01 12:55:31 +02:00
|
|
|
|
2019-02-12 14:55:58 +01:00
|
|
|
|
2022-05-11 15:43:22 +02:00
|
|
|
def reload_cache_config(config: HomeServerConfig) -> None:
|
|
|
|
"""Reload cache config from disk and immediately apply it.resize caches accordingly.
|
|
|
|
|
|
|
|
If the config is invalid, a `ConfigError` is logged and no changes are made.
|
|
|
|
|
|
|
|
Otherwise, this:
|
|
|
|
- replaces the `caches` section on the given `config` object,
|
|
|
|
- resizes all caches according to the new cache factors, and
|
|
|
|
|
|
|
|
Note that the following cache config keys are read, but not applied:
|
|
|
|
- event_cache_size: used to set a max_size and _original_max_size on
|
|
|
|
EventsWorkerStore._get_event_cache when it is created. We'd have to update
|
|
|
|
the _original_max_size (and maybe
|
|
|
|
- sync_response_cache_duration: would have to update the timeout_sec attribute on
|
|
|
|
HomeServer -> SyncHandler -> ResponseCache.
|
|
|
|
- track_memory_usage. This affects synapse.util.caches.TRACK_MEMORY_USAGE which
|
|
|
|
influences Synapse's self-reported metrics.
|
|
|
|
|
|
|
|
Also, the HTTPConnectionPool in SimpleHTTPClient sets its maxPersistentPerHost
|
|
|
|
parameter based on the global_factor. This won't be applied on a config reload.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
previous_cache_config = config.reload_config_section("caches")
|
|
|
|
except ConfigError as e:
|
|
|
|
logger.warning("Failed to reload cache config")
|
|
|
|
for f in format_config_error(e):
|
|
|
|
logger.warning(f)
|
|
|
|
else:
|
|
|
|
logger.debug(
|
2022-10-21 17:03:44 +02:00
|
|
|
"New cache config. Was:\n %s\nNow:\n %s",
|
2022-05-11 15:43:22 +02:00
|
|
|
previous_cache_config.__dict__,
|
|
|
|
config.caches.__dict__,
|
|
|
|
)
|
|
|
|
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
|
|
|
config.caches.resize_all_caches()
|
|
|
|
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def setup_sentry(hs: "HomeServer") -> None:
|
|
|
|
"""Enable sentry integration, if enabled in configuration"""
|
2019-02-12 17:03:40 +01:00
|
|
|
|
2021-09-23 18:03:01 +02:00
|
|
|
if not hs.config.metrics.sentry_enabled:
|
2019-02-12 14:55:58 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
import sentry_sdk
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2021-09-23 18:03:01 +02:00
|
|
|
sentry_sdk.init(
|
2022-02-14 14:12:22 +01:00
|
|
|
dsn=hs.config.metrics.sentry_dsn,
|
2022-06-07 16:24:11 +02:00
|
|
|
release=SYNAPSE_VERSION,
|
2021-09-23 18:03:01 +02:00
|
|
|
)
|
2019-02-12 17:03:40 +01:00
|
|
|
|
|
|
|
# We set some default tags that give some context to this instance
|
2019-02-12 14:55:58 +01:00
|
|
|
with sentry_sdk.configure_scope() as scope:
|
2021-09-13 19:07:12 +02:00
|
|
|
scope.set_tag("matrix_server_name", hs.config.server.server_name)
|
2019-02-12 14:55:58 +01:00
|
|
|
|
2021-09-13 19:07:12 +02:00
|
|
|
app = (
|
|
|
|
hs.config.worker.worker_app
|
|
|
|
if hs.config.worker.worker_app
|
|
|
|
else "synapse.app.homeserver"
|
|
|
|
)
|
2020-04-29 17:23:08 +02:00
|
|
|
name = hs.get_instance_name()
|
2019-02-12 14:55:58 +01:00
|
|
|
scope.set_tag("worker_app", app)
|
|
|
|
scope.set_tag("worker_name", name)
|
2019-04-09 18:03:35 +02:00
|
|
|
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def setup_sdnotify(hs: "HomeServer") -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Adds process state hooks to tell systemd what we are up to."""
|
2019-07-22 15:19:38 +02:00
|
|
|
|
|
|
|
# Tell systemd our state, if we're using it. This will silently fail if
|
|
|
|
# we're not using systemd.
|
2019-12-19 15:52:52 +01:00
|
|
|
sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
|
2019-07-22 15:19:38 +02:00
|
|
|
|
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
2019-08-17 10:09:52 +02:00
|
|
|
"before", "shutdown", sdnotify, b"STOPPING=1"
|
2019-07-22 15:19:38 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-08-17 10:09:52 +02:00
|
|
|
sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
|
|
|
|
|
|
|
|
|
2021-11-10 21:06:54 +01:00
|
|
|
def sdnotify(state: bytes) -> None:
|
2019-08-17 10:09:52 +02:00
|
|
|
"""
|
|
|
|
Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
|
|
|
|
|
|
|
|
This function is based on the sdnotify python package, but since it's only a few
|
|
|
|
lines of code, it's easier to duplicate it here than to add a dependency on a
|
|
|
|
package which many OSes don't include as a matter of principle.
|
|
|
|
|
|
|
|
Args:
|
2021-11-10 21:06:54 +01:00
|
|
|
state: notification to send
|
2019-08-17 10:09:52 +02:00
|
|
|
"""
|
|
|
|
if not isinstance(state, bytes):
|
|
|
|
raise TypeError("sdnotify should be called with a bytes")
|
|
|
|
if not sdnotify_sockaddr:
|
|
|
|
return
|
|
|
|
addr = sdnotify_sockaddr
|
|
|
|
if addr[0] == "@":
|
|
|
|
addr = "\0" + addr[1:]
|
|
|
|
|
|
|
|
try:
|
|
|
|
with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
|
|
|
|
sock.connect(addr)
|
|
|
|
sock.sendall(state)
|
|
|
|
except Exception as e:
|
|
|
|
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
|
|
|
|
# unless systemd is expecting us to notify it.
|
|
|
|
logger.warning("Unable to send notification to systemd: %s", e)
|
2021-04-23 20:20:44 +02:00
|
|
|
|
|
|
|
|
|
|
|
def max_request_body_size(config: HomeServerConfig) -> int:
|
|
|
|
"""Get a suitable maximum size for incoming HTTP requests"""
|
|
|
|
|
|
|
|
# Other than media uploads, the biggest request we expect to see is a fully-loaded
|
|
|
|
# /federation/v1/send request.
|
|
|
|
#
|
|
|
|
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
|
|
|
|
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
|
|
|
|
# json encoding); there is no specced limit to EDUs (see
|
|
|
|
# https://github.com/matrix-org/matrix-doc/issues/3121).
|
|
|
|
#
|
|
|
|
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
|
|
|
|
#
|
|
|
|
max_request_size = 200 * MAX_PDU_SIZE
|
|
|
|
|
|
|
|
# if we have a media repo enabled, we may need to allow larger uploads than that
|
|
|
|
if config.media.can_load_media_repo:
|
|
|
|
max_request_size = max(max_request_size, config.media.max_upload_size)
|
|
|
|
|
|
|
|
return max_request_size
|