2017-08-15 16:57:46 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2017 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import gc
|
|
|
|
import logging
|
2019-07-22 15:19:38 +02:00
|
|
|
import os
|
2019-02-08 18:25:57 +01:00
|
|
|
import signal
|
2019-08-17 10:09:52 +02:00
|
|
|
import socket
|
2017-10-02 19:03:59 +02:00
|
|
|
import sys
|
2019-02-08 18:25:57 +01:00
|
|
|
import traceback
|
2020-06-16 13:44:07 +02:00
|
|
|
from typing import Iterable
|
2017-10-02 19:03:59 +02:00
|
|
|
|
2020-05-12 12:20:48 +02:00
|
|
|
from typing_extensions import NoReturn
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2019-04-09 18:03:35 +02:00
|
|
|
from twisted.internet import defer, error, reactor
|
2019-02-11 11:36:26 +01:00
|
|
|
from twisted.protocols.tls import TLSMemoryBIOFactory
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2019-02-12 14:55:58 +01:00
|
|
|
import synapse
|
2019-01-30 15:17:55 +01:00
|
|
|
from synapse.app import check_bind_error
|
2020-06-16 13:44:07 +02:00
|
|
|
from synapse.config.server import ListenerConfig
|
2019-02-08 18:25:57 +01:00
|
|
|
from synapse.crypto import context_factory
|
2019-07-03 16:07:04 +02:00
|
|
|
from synapse.logging.context import PreserveLoggingContext
|
2019-04-09 18:03:35 +02:00
|
|
|
from synapse.util.async_helpers import Linearizer
|
2020-08-04 11:03:41 +02:00
|
|
|
from synapse.util.daemonize import daemonize_process
|
2018-07-09 08:09:20 +02:00
|
|
|
from synapse.util.rlimit import change_resource_limit
|
2019-02-12 14:55:58 +01:00
|
|
|
from synapse.util.versionstring import get_version_string
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2017-12-17 13:04:05 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-08-28 13:18:53 +02:00
|
|
|
# list of tuples of function, args list, kwargs dict
|
2019-02-08 18:25:57 +01:00
|
|
|
_sighup_callbacks = []
|
|
|
|
|
|
|
|
|
2019-08-28 13:18:53 +02:00
|
|
|
def register_sighup(func, *args, **kwargs):
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
Register a function to be called when a SIGHUP occurs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
func (function): Function to be called when sent a SIGHUP signal.
|
2019-08-28 13:18:53 +02:00
|
|
|
Will be called with a single default argument, the homeserver.
|
|
|
|
*args, **kwargs: args and kwargs to be passed to the target function.
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
2019-08-28 13:18:53 +02:00
|
|
|
_sighup_callbacks.append((func, args, kwargs))
|
2019-02-08 18:25:57 +01:00
|
|
|
|
2017-08-15 16:57:46 +02:00
|
|
|
|
2019-07-01 18:55:26 +02:00
|
|
|
def start_worker_reactor(appname, config, run_command=reactor.run):
|
2017-08-15 16:57:46 +02:00
|
|
|
""" Run the reactor in the main process
|
|
|
|
|
|
|
|
Daemonizes if necessary, and then configures some resources, before starting
|
|
|
|
the reactor. Pulls configuration from the 'worker' settings in 'config'.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
appname (str): application name which will be sent to syslog
|
|
|
|
config (synapse.config.Config): config object
|
2019-07-01 18:55:26 +02:00
|
|
|
run_command (Callable[]): callable that actually runs the reactor
|
2017-08-15 16:57:46 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
logger = logging.getLogger(config.worker_app)
|
|
|
|
|
|
|
|
start_reactor(
|
|
|
|
appname,
|
2019-03-14 14:32:14 +01:00
|
|
|
soft_file_limit=config.soft_file_limit,
|
|
|
|
gc_thresholds=config.gc_thresholds,
|
|
|
|
pid_file=config.worker_pid_file,
|
|
|
|
daemonize=config.worker_daemonize,
|
|
|
|
print_pidfile=config.print_pidfile,
|
|
|
|
logger=logger,
|
2019-07-01 18:55:26 +02:00
|
|
|
run_command=run_command,
|
2017-08-15 16:57:46 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def start_reactor(
|
2019-07-01 18:55:26 +02:00
|
|
|
appname,
|
|
|
|
soft_file_limit,
|
|
|
|
gc_thresholds,
|
|
|
|
pid_file,
|
|
|
|
daemonize,
|
|
|
|
print_pidfile,
|
|
|
|
logger,
|
|
|
|
run_command=reactor.run,
|
2017-08-15 16:57:46 +02:00
|
|
|
):
|
|
|
|
""" Run the reactor in the main process
|
|
|
|
|
|
|
|
Daemonizes if necessary, and then configures some resources, before starting
|
|
|
|
the reactor
|
|
|
|
|
|
|
|
Args:
|
|
|
|
appname (str): application name which will be sent to syslog
|
|
|
|
soft_file_limit (int):
|
|
|
|
gc_thresholds:
|
|
|
|
pid_file (str): name of pid file to write to if daemonize is True
|
|
|
|
daemonize (bool): true to run the reactor in a background process
|
2019-03-14 14:32:14 +01:00
|
|
|
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
2017-08-15 16:57:46 +02:00
|
|
|
logger (logging.Logger): logger instance to pass to Daemonize
|
2019-07-01 18:55:26 +02:00
|
|
|
run_command (Callable[]): callable that actually runs the reactor
|
2017-08-15 16:57:46 +02:00
|
|
|
"""
|
|
|
|
|
2019-04-09 18:03:35 +02:00
|
|
|
install_dns_limiter(reactor)
|
|
|
|
|
2017-08-15 16:57:46 +02:00
|
|
|
def run():
|
2019-07-03 14:40:45 +02:00
|
|
|
logger.info("Running")
|
|
|
|
change_resource_limit(soft_file_limit)
|
|
|
|
if gc_thresholds:
|
|
|
|
gc.set_threshold(*gc_thresholds)
|
2019-07-15 15:13:22 +02:00
|
|
|
run_command()
|
2019-07-03 14:40:45 +02:00
|
|
|
|
|
|
|
# make sure that we run the reactor with the sentinel log context,
|
|
|
|
# otherwise other PreserveLoggingContext instances will get confused
|
|
|
|
# and complain when they see the logcontext arbitrarily swapping
|
|
|
|
# between the sentinel and `run` logcontexts.
|
|
|
|
#
|
|
|
|
# We also need to drop the logcontext before forking if we're daemonizing,
|
|
|
|
# otherwise the cputime metrics get confused about the per-thread resource usage
|
|
|
|
# appearing to go backwards.
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
if daemonize:
|
|
|
|
if print_pidfile:
|
|
|
|
print(pid_file)
|
|
|
|
|
2020-08-04 11:03:41 +02:00
|
|
|
daemonize_process(pid_file, logger)
|
|
|
|
run()
|
2017-10-02 18:59:34 +02:00
|
|
|
|
|
|
|
|
2020-05-12 12:20:48 +02:00
|
|
|
def quit_with_error(error_string: str) -> NoReturn:
|
2017-10-02 18:59:34 +02:00
|
|
|
message_lines = error_string.split("\n")
|
2020-05-12 12:20:48 +02:00
|
|
|
line_length = max(len(line) for line in message_lines if len(line) < 80) + 2
|
2019-06-20 11:32:02 +02:00
|
|
|
sys.stderr.write("*" * line_length + "\n")
|
2017-10-02 18:59:34 +02:00
|
|
|
for line in message_lines:
|
|
|
|
sys.stderr.write(" %s\n" % (line.rstrip(),))
|
2019-06-20 11:32:02 +02:00
|
|
|
sys.stderr.write("*" * line_length + "\n")
|
2017-10-02 18:59:34 +02:00
|
|
|
sys.exit(1)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
|
|
|
|
2018-05-31 11:04:50 +02:00
|
|
|
def listen_metrics(bind_addresses, port):
|
|
|
|
"""
|
|
|
|
Start Prometheus metrics server.
|
|
|
|
"""
|
2019-07-18 15:57:15 +02:00
|
|
|
from synapse.metrics import RegistryProxy, start_http_server
|
2018-05-31 11:04:50 +02:00
|
|
|
|
|
|
|
for host in bind_addresses:
|
2019-02-13 12:48:56 +01:00
|
|
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
|
|
|
start_http_server(port, addr=host, registry=RegistryProxy)
|
2018-05-31 11:04:50 +02:00
|
|
|
|
|
|
|
|
2018-08-17 17:08:45 +02:00
|
|
|
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
|
|
|
Create a TCP socket for a port and several addresses
|
2019-01-30 12:00:02 +01:00
|
|
|
|
|
|
|
Returns:
|
2019-02-13 14:24:27 +01:00
|
|
|
list[twisted.internet.tcp.Port]: listening for TCP connections
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-02-13 12:53:43 +01:00
|
|
|
r = []
|
2017-09-06 17:48:49 +02:00
|
|
|
for address in bind_addresses:
|
|
|
|
try:
|
2019-06-20 11:32:02 +02:00
|
|
|
r.append(reactor.listenTCP(port, factory, backlog, address))
|
2017-09-06 17:48:49 +02:00
|
|
|
except error.CannotListenError as e:
|
2017-12-17 13:04:05 +01:00
|
|
|
check_bind_error(e, address, bind_addresses)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2019-02-13 12:53:43 +01:00
|
|
|
return r
|
2019-01-30 12:00:02 +01:00
|
|
|
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2018-08-17 17:08:45 +02:00
|
|
|
def listen_ssl(
|
|
|
|
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
|
|
|
|
):
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-01-30 12:00:02 +01:00
|
|
|
Create an TLS-over-TCP socket for a port and several addresses
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
list of twisted.internet.tcp.Port listening for TLS connections
|
2017-09-06 17:48:49 +02:00
|
|
|
"""
|
2019-01-30 12:00:02 +01:00
|
|
|
r = []
|
2017-09-06 17:48:49 +02:00
|
|
|
for address in bind_addresses:
|
|
|
|
try:
|
2019-01-30 12:00:02 +01:00
|
|
|
r.append(
|
2019-06-20 11:32:02 +02:00
|
|
|
reactor.listenSSL(port, factory, context_factory, backlog, address)
|
2017-09-06 17:48:49 +02:00
|
|
|
)
|
|
|
|
except error.CannotListenError as e:
|
2017-12-17 13:04:05 +01:00
|
|
|
check_bind_error(e, address, bind_addresses)
|
2017-09-06 17:48:49 +02:00
|
|
|
|
2019-01-30 12:00:02 +01:00
|
|
|
return r
|
2019-02-08 18:25:57 +01:00
|
|
|
|
|
|
|
|
|
|
|
def refresh_certificate(hs):
|
|
|
|
"""
|
|
|
|
Refresh the TLS certificates that Synapse is using by re-reading them from
|
|
|
|
disk and updating the TLS context factories to use them.
|
|
|
|
"""
|
2019-02-11 22:30:59 +01:00
|
|
|
|
2019-02-11 18:57:58 +01:00
|
|
|
if not hs.config.has_tls_listener():
|
2019-02-12 11:51:31 +01:00
|
|
|
# attempt to reload the certs for the good of the tls_fingerprints
|
|
|
|
hs.config.read_certificate_from_disk(require_cert_and_key=False)
|
2019-02-11 22:30:59 +01:00
|
|
|
return
|
|
|
|
|
2019-02-12 11:51:31 +01:00
|
|
|
hs.config.read_certificate_from_disk(require_cert_and_key=True)
|
2019-02-08 18:25:57 +01:00
|
|
|
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
|
|
|
|
2019-02-11 11:36:26 +01:00
|
|
|
if hs._listening_services:
|
2019-02-11 22:00:41 +01:00
|
|
|
logger.info("Updating context factories...")
|
2019-02-11 11:36:26 +01:00
|
|
|
for i in hs._listening_services:
|
|
|
|
# When you listenSSL, it doesn't make an SSL port but a TCP one with
|
|
|
|
# a TLS wrapping factory around the factory you actually want to get
|
|
|
|
# requests. This factory attribute is public but missing from
|
|
|
|
# Twisted's documentation.
|
|
|
|
if isinstance(i.factory, TLSMemoryBIOFactory):
|
2019-02-13 12:53:43 +01:00
|
|
|
addr = i.getHost()
|
|
|
|
logger.info(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port
|
2019-02-13 12:53:43 +01:00
|
|
|
)
|
2019-02-11 11:36:26 +01:00
|
|
|
# We want to replace TLS factories with a new one, with the new
|
|
|
|
# TLS configuration. We do this by reaching in and pulling out
|
|
|
|
# the wrappedFactory, and then re-wrapping it.
|
|
|
|
i.factory = TLSMemoryBIOFactory(
|
2019-06-20 11:32:02 +02:00
|
|
|
hs.tls_server_context_factory, False, i.factory.wrappedFactory
|
2019-02-11 11:36:26 +01:00
|
|
|
)
|
2019-02-11 22:00:41 +01:00
|
|
|
logger.info("Context factories updated.")
|
2019-02-11 11:36:26 +01:00
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
Start a Synapse server or worker.
|
|
|
|
|
2019-12-19 15:52:52 +01:00
|
|
|
Should be called once the reactor is running and (if we're using ACME) the
|
|
|
|
TLS certificates are in place.
|
|
|
|
|
|
|
|
Will start the main HTTP listeners and do some other startup tasks, and then
|
|
|
|
notify systemd.
|
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
Args:
|
2020-06-16 13:44:07 +02:00
|
|
|
hs: homeserver instance
|
|
|
|
listeners: Listener configuration ('listeners' in homeserver.yaml)
|
2019-02-08 18:25:57 +01:00
|
|
|
"""
|
|
|
|
try:
|
|
|
|
# Set up the SIGHUP machinery.
|
|
|
|
if hasattr(signal, "SIGHUP"):
|
2019-06-20 11:32:02 +02:00
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
def handle_sighup(*args, **kwargs):
|
2019-07-22 15:19:38 +02:00
|
|
|
# Tell systemd our state, if we're using it. This will silently fail if
|
|
|
|
# we're not using systemd.
|
2019-08-17 10:09:52 +02:00
|
|
|
sdnotify(b"RELOADING=1")
|
2019-07-22 15:19:38 +02:00
|
|
|
|
2019-08-28 13:18:53 +02:00
|
|
|
for i, args, kwargs in _sighup_callbacks:
|
|
|
|
i(hs, *args, **kwargs)
|
2019-02-08 18:25:57 +01:00
|
|
|
|
2019-08-17 10:09:52 +02:00
|
|
|
sdnotify(b"READY=1")
|
2019-07-22 15:19:38 +02:00
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
signal.signal(signal.SIGHUP, handle_sighup)
|
|
|
|
|
|
|
|
register_sighup(refresh_certificate)
|
|
|
|
|
|
|
|
# Load the certificate from disk.
|
|
|
|
refresh_certificate(hs)
|
|
|
|
|
2019-07-11 11:36:03 +02:00
|
|
|
# Start the tracer
|
2019-10-02 14:29:01 +02:00
|
|
|
synapse.logging.opentracing.init_tracer( # type: ignore[attr-defined] # noqa
|
2020-04-29 17:23:08 +02:00
|
|
|
hs
|
2019-10-02 14:29:01 +02:00
|
|
|
)
|
2019-07-11 11:36:03 +02:00
|
|
|
|
2019-02-08 18:25:57 +01:00
|
|
|
# It is now safe to start your Synapse.
|
|
|
|
hs.start_listening(listeners)
|
2020-08-05 22:38:57 +02:00
|
|
|
hs.get_datastore().db_pool.start_profiling()
|
2020-03-19 10:48:45 +01:00
|
|
|
hs.get_pusherpool().start()
|
2019-02-12 14:55:58 +01:00
|
|
|
|
2019-02-13 17:14:37 +01:00
|
|
|
setup_sentry(hs)
|
2019-07-22 15:19:38 +02:00
|
|
|
setup_sdnotify(hs)
|
2020-02-19 16:09:00 +01:00
|
|
|
|
|
|
|
# We now freeze all allocated objects in the hopes that (almost)
|
|
|
|
# everything currently allocated are things that will be used for the
|
|
|
|
# rest of time. Doing so means less work each GC (hopefully).
|
|
|
|
#
|
|
|
|
# This only works on Python 3.7
|
|
|
|
if sys.version_info >= (3, 7):
|
|
|
|
gc.collect()
|
|
|
|
gc.freeze()
|
2019-02-08 18:25:57 +01:00
|
|
|
except Exception:
|
|
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
reactor = hs.get_reactor()
|
|
|
|
if reactor.running:
|
|
|
|
reactor.stop()
|
|
|
|
sys.exit(1)
|
2019-02-12 14:55:58 +01:00
|
|
|
|
|
|
|
|
2019-02-13 17:14:37 +01:00
|
|
|
def setup_sentry(hs):
|
|
|
|
"""Enable sentry integration, if enabled in configuration
|
2019-02-12 17:03:40 +01:00
|
|
|
|
|
|
|
Args:
|
|
|
|
hs (synapse.server.HomeServer)
|
|
|
|
"""
|
|
|
|
|
2019-02-12 14:55:58 +01:00
|
|
|
if not hs.config.sentry_enabled:
|
|
|
|
return
|
|
|
|
|
|
|
|
import sentry_sdk
|
2019-06-20 11:32:02 +02:00
|
|
|
|
|
|
|
sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse))
|
2019-02-12 17:03:40 +01:00
|
|
|
|
|
|
|
# We set some default tags that give some context to this instance
|
2019-02-12 14:55:58 +01:00
|
|
|
with sentry_sdk.configure_scope() as scope:
|
|
|
|
scope.set_tag("matrix_server_name", hs.config.server_name)
|
|
|
|
|
|
|
|
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
|
2020-04-29 17:23:08 +02:00
|
|
|
name = hs.get_instance_name()
|
2019-02-12 14:55:58 +01:00
|
|
|
scope.set_tag("worker_app", app)
|
|
|
|
scope.set_tag("worker_name", name)
|
2019-04-09 18:03:35 +02:00
|
|
|
|
|
|
|
|
2019-07-22 15:19:38 +02:00
|
|
|
def setup_sdnotify(hs):
|
|
|
|
"""Adds process state hooks to tell systemd what we are up to.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Tell systemd our state, if we're using it. This will silently fail if
|
|
|
|
# we're not using systemd.
|
2019-12-19 15:52:52 +01:00
|
|
|
sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
|
2019-07-22 15:19:38 +02:00
|
|
|
|
|
|
|
hs.get_reactor().addSystemEventTrigger(
|
2019-08-17 10:09:52 +02:00
|
|
|
"before", "shutdown", sdnotify, b"STOPPING=1"
|
2019-07-22 15:19:38 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-04-09 18:03:35 +02:00
|
|
|
def install_dns_limiter(reactor, max_dns_requests_in_flight=100):
|
|
|
|
"""Replaces the resolver with one that limits the number of in flight DNS
|
|
|
|
requests.
|
|
|
|
|
|
|
|
This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we
|
|
|
|
can run out of file descriptors and infinite loop if we attempt to do too
|
|
|
|
many DNS queries at once
|
2020-08-29 01:05:25 +02:00
|
|
|
|
|
|
|
XXX: I'm confused by this. reactor.nameResolver does not use twisted.names unless
|
|
|
|
you explicitly install twisted.names as the resolver; rather it uses a GAIResolver
|
|
|
|
backed by the reactor's default threadpool (which is limited to 10 threads). So
|
|
|
|
(a) I don't understand why twisted ticket 9620 is relevant, and (b) I don't
|
|
|
|
understand why we would run out of FDs if we did too many lookups at once.
|
|
|
|
-- richvdh 2020/08/29
|
2019-04-09 18:03:35 +02:00
|
|
|
"""
|
|
|
|
new_resolver = _LimitedHostnameResolver(
|
2019-06-20 11:32:02 +02:00
|
|
|
reactor.nameResolver, max_dns_requests_in_flight
|
2019-04-09 18:03:35 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
reactor.installNameResolver(new_resolver)
|
|
|
|
|
|
|
|
|
|
|
|
class _LimitedHostnameResolver(object):
|
|
|
|
"""Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, resolver, max_dns_requests_in_flight):
|
|
|
|
self._resolver = resolver
|
|
|
|
self._limiter = Linearizer(
|
2019-06-20 11:32:02 +02:00
|
|
|
name="dns_client_limiter", max_count=max_dns_requests_in_flight
|
2019-04-09 18:03:35 +02:00
|
|
|
)
|
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
def resolveHostName(
|
|
|
|
self,
|
|
|
|
resolutionReceiver,
|
|
|
|
hostName,
|
|
|
|
portNumber=0,
|
|
|
|
addressTypes=None,
|
|
|
|
transportSemantics="TCP",
|
|
|
|
):
|
2019-04-09 18:03:35 +02:00
|
|
|
# We need this function to return `resolutionReceiver` so we do all the
|
|
|
|
# actual logic involving deferreds in a separate function.
|
2019-05-29 10:17:33 +02:00
|
|
|
|
|
|
|
# even though this is happening within the depths of twisted, we need to drop
|
|
|
|
# our logcontext before starting _resolve, otherwise: (a) _resolve will drop
|
|
|
|
# the logcontext if it returns an incomplete deferred; (b) _resolve will
|
|
|
|
# call the resolutionReceiver *with* a logcontext, which it won't be expecting.
|
|
|
|
with PreserveLoggingContext():
|
|
|
|
self._resolve(
|
|
|
|
resolutionReceiver,
|
|
|
|
hostName,
|
|
|
|
portNumber,
|
|
|
|
addressTypes,
|
|
|
|
transportSemantics,
|
|
|
|
)
|
2019-04-09 18:03:35 +02:00
|
|
|
|
|
|
|
return resolutionReceiver
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
2019-06-20 11:32:02 +02:00
|
|
|
def _resolve(
|
|
|
|
self,
|
|
|
|
resolutionReceiver,
|
|
|
|
hostName,
|
|
|
|
portNumber=0,
|
|
|
|
addressTypes=None,
|
|
|
|
transportSemantics="TCP",
|
|
|
|
):
|
2019-04-09 18:03:35 +02:00
|
|
|
|
|
|
|
with (yield self._limiter.queue(())):
|
|
|
|
# resolveHostName doesn't return a Deferred, so we need to hook into
|
|
|
|
# the receiver interface to get told when resolution has finished.
|
|
|
|
|
|
|
|
deferred = defer.Deferred()
|
|
|
|
receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred)
|
|
|
|
|
|
|
|
self._resolver.resolveHostName(
|
2019-06-20 11:32:02 +02:00
|
|
|
receiver, hostName, portNumber, addressTypes, transportSemantics
|
2019-04-09 18:03:35 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
yield deferred
|
|
|
|
|
|
|
|
|
|
|
|
class _DeferredResolutionReceiver(object):
|
|
|
|
"""Wraps a IResolutionReceiver and simply resolves the given deferred when
|
|
|
|
resolution is complete
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, receiver, deferred):
|
|
|
|
self._receiver = receiver
|
|
|
|
self._deferred = deferred
|
|
|
|
|
|
|
|
def resolutionBegan(self, resolutionInProgress):
|
|
|
|
self._receiver.resolutionBegan(resolutionInProgress)
|
|
|
|
|
|
|
|
def addressResolved(self, address):
|
|
|
|
self._receiver.addressResolved(address)
|
|
|
|
|
|
|
|
def resolutionComplete(self):
|
|
|
|
self._deferred.callback(())
|
|
|
|
self._receiver.resolutionComplete()
|
2019-08-17 10:09:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
|
|
|
|
|
|
|
|
|
|
|
|
def sdnotify(state):
|
|
|
|
"""
|
|
|
|
Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
|
|
|
|
|
|
|
|
This function is based on the sdnotify python package, but since it's only a few
|
|
|
|
lines of code, it's easier to duplicate it here than to add a dependency on a
|
|
|
|
package which many OSes don't include as a matter of principle.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
state (bytes): notification to send
|
|
|
|
"""
|
|
|
|
if not isinstance(state, bytes):
|
|
|
|
raise TypeError("sdnotify should be called with a bytes")
|
|
|
|
if not sdnotify_sockaddr:
|
|
|
|
return
|
|
|
|
addr = sdnotify_sockaddr
|
|
|
|
if addr[0] == "@":
|
|
|
|
addr = "\0" + addr[1:]
|
|
|
|
|
|
|
|
try:
|
|
|
|
with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
|
|
|
|
sock.connect(addr)
|
|
|
|
sock.sendall(state)
|
|
|
|
except Exception as e:
|
|
|
|
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
|
|
|
|
# unless systemd is expecting us to notify it.
|
|
|
|
logger.warning("Unable to send notification to systemd: %s", e)
|