2019-09-23 13:28:01 +02:00
|
|
|
# Copyright 2016 OpenMarket Ltd
|
2021-12-01 13:28:23 +01:00
|
|
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
2016-06-16 12:09:15 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
import argparse
|
2022-05-06 12:43:53 +02:00
|
|
|
import logging
|
|
|
|
from typing import Any, Dict, List, Union
|
2020-09-14 11:16:41 +02:00
|
|
|
|
2020-05-14 15:00:58 +02:00
|
|
|
import attr
|
2023-04-18 01:53:43 +02:00
|
|
|
from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
|
2020-05-14 15:00:58 +02:00
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
from synapse.config._base import (
|
2021-02-24 14:23:18 +01:00
|
|
|
Config,
|
|
|
|
ConfigError,
|
|
|
|
RoutableShardedWorkerHandlingConfig,
|
|
|
|
ShardedWorkerHandlingConfig,
|
|
|
|
)
|
2023-04-18 01:53:43 +02:00
|
|
|
from synapse.config._util import parse_and_validate_mapping
|
2023-04-03 11:27:51 +02:00
|
|
|
from synapse.config.server import (
|
|
|
|
DIRECT_TCP_ERROR,
|
|
|
|
TCPListenerConfig,
|
|
|
|
parse_listener_def,
|
|
|
|
)
|
|
|
|
from synapse.types import JsonDict
|
2016-06-16 12:09:15 +02:00
|
|
|
|
2022-05-06 12:43:53 +02:00
|
|
|
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
|
|
|
|
The '%s' configuration option is deprecated and will be removed in a future
|
|
|
|
Synapse version. Please use ``%s: name_of_worker`` instead.
|
|
|
|
"""
|
|
|
|
|
2023-05-11 12:30:56 +02:00
|
|
|
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
|
|
|
|
Missing data for a worker to connect to main process. Please include '%s' in the
|
|
|
|
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
|
|
|
|
solution) in every worker's yaml as various `worker_replication_*` settings as defined
|
|
|
|
in workers documentation here:
|
|
|
|
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
|
|
|
|
"""
|
|
|
|
# This allows for a handy knob when it's time to change from 'master' to
|
|
|
|
# something with less 'history'
|
|
|
|
MAIN_PROCESS_INSTANCE_NAME = "master"
|
|
|
|
# Use this to adjust what the main process is known as in the yaml instance_map
|
|
|
|
MAIN_PROCESS_INSTANCE_MAP_NAME = "main"
|
|
|
|
|
2022-05-06 12:43:53 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-06-16 12:09:15 +02:00
|
|
|
|
2020-09-14 11:16:41 +02:00
|
|
|
def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
|
|
|
|
"""Helper for allowing parsing a string or list of strings to a config
|
|
|
|
option expecting a list of strings.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(obj, str):
|
|
|
|
return [obj]
|
|
|
|
return obj
|
|
|
|
|
|
|
|
|
2023-04-18 01:53:43 +02:00
|
|
|
class ConfigModel(BaseModel):
|
|
|
|
"""A custom version of Pydantic's BaseModel which
|
|
|
|
|
|
|
|
- ignores unknown fields and
|
|
|
|
- does not allow fields to be overwritten after construction,
|
|
|
|
|
|
|
|
but otherwise uses Pydantic's default behaviour.
|
|
|
|
|
|
|
|
For now, ignore unknown fields. In the future, we could change this so that unknown
|
|
|
|
config values cause a ValidationError, provided the error messages are meaningful to
|
|
|
|
server operators.
|
|
|
|
|
|
|
|
Subclassing in this way is recommended by
|
|
|
|
https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally
|
|
|
|
"""
|
|
|
|
|
|
|
|
class Config:
|
|
|
|
# By default, ignore fields that we don't recognise.
|
|
|
|
extra = Extra.ignore
|
|
|
|
# By default, don't allow fields to be reassigned after parsing.
|
|
|
|
allow_mutation = False
|
|
|
|
|
|
|
|
|
|
|
|
class InstanceLocationConfig(ConfigModel):
|
2021-02-16 23:32:34 +01:00
|
|
|
"""The host and port to talk to an instance via HTTP replication."""
|
2020-05-14 15:00:58 +02:00
|
|
|
|
2023-04-18 01:53:43 +02:00
|
|
|
host: StrictStr
|
|
|
|
port: StrictInt
|
|
|
|
tls: StrictBool = False
|
|
|
|
|
|
|
|
def scheme(self) -> str:
|
|
|
|
"""Hardcode a retrievable scheme based on self.tls"""
|
|
|
|
return "https" if self.tls else "http"
|
|
|
|
|
|
|
|
def netloc(self) -> str:
|
|
|
|
"""Nicely format the network location data"""
|
|
|
|
return f"{self.host}:{self.port}"
|
2020-05-14 15:00:58 +02:00
|
|
|
|
|
|
|
|
2020-05-22 17:11:35 +02:00
|
|
|
@attr.s
|
|
|
|
class WriterLocations:
|
|
|
|
"""Specifies the instances that write various streams.
|
|
|
|
|
|
|
|
Attributes:
|
2020-09-14 11:16:41 +02:00
|
|
|
events: The instances that write to the event and backfill streams.
|
2021-11-03 15:25:47 +01:00
|
|
|
typing: The instances that write to the typing stream. Currently
|
|
|
|
can only be a single instance.
|
2021-04-23 13:21:55 +02:00
|
|
|
to_device: The instances that write to the to_device stream. Currently
|
|
|
|
can only be a single instance.
|
|
|
|
account_data: The instances that write to the account data streams. Currently
|
|
|
|
can only be a single instance.
|
|
|
|
receipts: The instances that write to the receipts stream. Currently
|
|
|
|
can only be a single instance.
|
|
|
|
presence: The instances that write to the presence stream. Currently
|
|
|
|
can only be a single instance.
|
2020-05-22 17:11:35 +02:00
|
|
|
"""
|
|
|
|
|
2022-01-13 14:49:28 +01:00
|
|
|
events: List[str] = attr.ib(
|
2021-11-03 15:25:47 +01:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
|
|
|
)
|
2022-01-13 14:49:28 +01:00
|
|
|
typing: List[str] = attr.ib(
|
2021-11-03 15:25:47 +01:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
2020-09-14 11:16:41 +02:00
|
|
|
)
|
2022-01-13 14:49:28 +01:00
|
|
|
to_device: List[str] = attr.ib(
|
2021-02-16 23:32:34 +01:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
2021-01-07 21:19:26 +01:00
|
|
|
)
|
2022-01-13 14:49:28 +01:00
|
|
|
account_data: List[str] = attr.ib(
|
2021-02-16 23:32:34 +01:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
2021-01-18 16:47:59 +01:00
|
|
|
)
|
2022-01-13 14:49:28 +01:00
|
|
|
receipts: List[str] = attr.ib(
|
2021-02-16 23:32:34 +01:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
2021-01-18 16:47:59 +01:00
|
|
|
)
|
2022-01-13 14:49:28 +01:00
|
|
|
presence: List[str] = attr.ib(
|
2021-04-23 13:21:55 +02:00
|
|
|
default=["master"],
|
|
|
|
converter=_instance_to_list_converter,
|
|
|
|
)
|
2020-05-22 17:11:35 +02:00
|
|
|
|
|
|
|
|
2016-06-16 12:09:15 +02:00
|
|
|
class WorkerConfig(Config):
|
|
|
|
"""The workers are processes run separately to the main synapse process.
|
|
|
|
They have their own pid_file and listener configuration. They use the
|
2016-06-16 18:29:50 +02:00
|
|
|
replication_url to talk to the main synapse process."""
|
2016-06-16 12:09:15 +02:00
|
|
|
|
2019-10-10 10:39:35 +02:00
|
|
|
section = "worker"
|
|
|
|
|
2022-04-11 18:07:23 +02:00
|
|
|
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
2016-06-16 18:29:50 +02:00
|
|
|
self.worker_app = config.get("worker_app")
|
2018-01-12 11:39:27 +01:00
|
|
|
|
|
|
|
# Canonicalise worker_app so that master always has None
|
|
|
|
if self.worker_app == "synapse.app.homeserver":
|
|
|
|
self.worker_app = None
|
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
self.worker_listeners = [
|
2022-09-06 09:50:02 +02:00
|
|
|
parse_listener_def(i, x)
|
|
|
|
for i, x in enumerate(config.get("worker_listeners", []))
|
2020-06-16 13:44:07 +02:00
|
|
|
]
|
2022-04-11 18:07:23 +02:00
|
|
|
self.worker_daemonize = bool(config.get("worker_daemonize"))
|
2016-06-16 18:29:50 +02:00
|
|
|
self.worker_pid_file = config.get("worker_pid_file")
|
2022-04-11 18:07:23 +02:00
|
|
|
|
|
|
|
worker_log_config = config.get("worker_log_config")
|
|
|
|
if worker_log_config is not None and not isinstance(worker_log_config, str):
|
|
|
|
raise ConfigError("worker_log_config must be a string")
|
|
|
|
self.worker_log_config = worker_log_config
|
2018-02-05 18:22:16 +01:00
|
|
|
|
|
|
|
# The port on the main synapse for TCP replication
|
2022-09-06 09:50:02 +02:00
|
|
|
if "worker_replication_port" in config:
|
|
|
|
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
|
2018-02-05 18:22:16 +01:00
|
|
|
|
2020-12-04 16:56:28 +01:00
|
|
|
# The shared secret used for authentication when connecting to the main synapse.
|
|
|
|
self.worker_replication_secret = config.get("worker_replication_secret", None)
|
|
|
|
|
2017-03-27 17:33:44 +02:00
|
|
|
self.worker_name = config.get("worker_name", self.worker_app)
|
2023-05-11 12:30:56 +02:00
|
|
|
self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME
|
2017-01-20 12:45:29 +01:00
|
|
|
|
2022-11-16 23:16:25 +01:00
|
|
|
# FIXME: Remove this check after a suitable amount of time.
|
2017-07-07 19:19:46 +02:00
|
|
|
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
|
2022-11-16 23:16:25 +01:00
|
|
|
if self.worker_main_http_uri is not None:
|
|
|
|
logger.warning(
|
2022-11-17 17:11:08 +01:00
|
|
|
"The config option worker_main_http_uri is unused since Synapse 1.73. "
|
2022-11-16 23:16:25 +01:00
|
|
|
"It can be safely removed from your configuration."
|
|
|
|
)
|
2017-07-07 19:19:46 +02:00
|
|
|
|
2019-03-14 14:32:14 +01:00
|
|
|
# This option is really only here to support `--manhole` command line
|
|
|
|
# argument.
|
|
|
|
manhole = config.get("worker_manhole")
|
|
|
|
if manhole:
|
2019-06-20 11:32:02 +02:00
|
|
|
self.worker_listeners.append(
|
2023-04-03 11:27:51 +02:00
|
|
|
TCPListenerConfig(
|
2021-02-16 23:32:34 +01:00
|
|
|
port=manhole,
|
|
|
|
bind_addresses=["127.0.0.1"],
|
|
|
|
type="manhole",
|
2020-06-16 13:44:07 +02:00
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2019-03-14 14:32:14 +01:00
|
|
|
|
2022-11-22 22:33:58 +01:00
|
|
|
federation_sender_instances = self._worker_names_performing_this_duty(
|
|
|
|
config,
|
|
|
|
"send_federation",
|
|
|
|
"synapse.app.federation_sender",
|
|
|
|
"federation_sender_instances",
|
|
|
|
)
|
2021-02-24 14:23:18 +01:00
|
|
|
self.send_federation = self.instance_name in federation_sender_instances
|
2020-07-30 00:22:13 +02:00
|
|
|
self.federation_shard_config = ShardedWorkerHandlingConfig(
|
|
|
|
federation_sender_instances
|
|
|
|
)
|
|
|
|
|
2020-05-14 15:00:58 +02:00
|
|
|
# A map from instance name to host/port of their HTTP replication endpoint.
|
2023-05-11 12:30:56 +02:00
|
|
|
# Check if the main process is declared. Inject it into the map if it's not,
|
|
|
|
# based first on if a 'main' block is declared then on 'worker_replication_*'
|
|
|
|
# data. If both are available, default to instance_map. The main process
|
|
|
|
# itself doesn't need this data as it would never have to talk to itself.
|
|
|
|
instance_map: Dict[str, Any] = config.get("instance_map", {})
|
|
|
|
|
2023-05-26 16:28:55 +02:00
|
|
|
if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
|
2023-05-11 12:30:56 +02:00
|
|
|
# The host used to connect to the main synapse
|
|
|
|
main_host = config.get("worker_replication_host", None)
|
|
|
|
|
|
|
|
# The port on the main synapse for HTTP replication endpoint
|
|
|
|
main_port = config.get("worker_replication_http_port")
|
|
|
|
|
|
|
|
# The tls mode on the main synapse for HTTP replication endpoint.
|
|
|
|
# For backward compatibility this defaults to False.
|
|
|
|
main_tls = config.get("worker_replication_http_tls", False)
|
|
|
|
|
|
|
|
# For now, accept 'main' in the instance_map, but the replication system
|
|
|
|
# expects 'master', force that into being until it's changed later.
|
|
|
|
if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map:
|
|
|
|
instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[
|
|
|
|
MAIN_PROCESS_INSTANCE_MAP_NAME
|
|
|
|
]
|
|
|
|
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
|
|
|
|
|
|
|
|
# This is the backwards compatibility bit that handles the
|
|
|
|
# worker_replication_* bits using setdefault() to not overwrite anything.
|
|
|
|
elif main_host is not None and main_port is not None:
|
|
|
|
instance_map.setdefault(
|
|
|
|
MAIN_PROCESS_INSTANCE_NAME,
|
|
|
|
{
|
|
|
|
"host": main_host,
|
|
|
|
"port": main_port,
|
|
|
|
"tls": main_tls,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
else:
|
|
|
|
# If we've gotten here, it means that the main process is not on the
|
|
|
|
# instance_map and that not enough worker_replication_* variables
|
|
|
|
# were declared in the worker's yaml.
|
|
|
|
raise ConfigError(
|
|
|
|
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
|
|
|
|
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
|
|
|
)
|
|
|
|
|
2023-04-18 01:53:43 +02:00
|
|
|
self.instance_map: Dict[
|
|
|
|
str, InstanceLocationConfig
|
2023-05-11 12:30:56 +02:00
|
|
|
] = parse_and_validate_mapping(instance_map, InstanceLocationConfig)
|
2020-05-14 15:00:58 +02:00
|
|
|
|
2020-05-22 17:11:35 +02:00
|
|
|
# Map from type of streams to source, c.f. WriterLocations.
|
|
|
|
writers = config.get("stream_writers") or {}
|
|
|
|
self.writers = WriterLocations(**writers)
|
|
|
|
|
2020-09-14 11:16:41 +02:00
|
|
|
# Check that the configured writers for events and typing also appears in
|
2020-05-22 17:11:35 +02:00
|
|
|
# `instance_map`.
|
2021-04-23 13:21:55 +02:00
|
|
|
for stream in (
|
|
|
|
"events",
|
|
|
|
"typing",
|
|
|
|
"to_device",
|
|
|
|
"account_data",
|
|
|
|
"receipts",
|
|
|
|
"presence",
|
|
|
|
):
|
2020-09-14 11:16:41 +02:00
|
|
|
instances = _instance_to_list_converter(getattr(self.writers, stream))
|
|
|
|
for instance in instances:
|
|
|
|
if instance != "master" and instance not in self.instance_map:
|
|
|
|
raise ConfigError(
|
|
|
|
"Instance %r is configured to write %s but does not appear in `instance_map` config."
|
|
|
|
% (instance, stream)
|
|
|
|
)
|
|
|
|
|
2021-11-03 15:25:47 +01:00
|
|
|
if len(self.writers.typing) != 1:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must only specify one instance to handle `typing` messages."
|
|
|
|
)
|
|
|
|
|
2021-01-07 21:19:26 +01:00
|
|
|
if len(self.writers.to_device) != 1:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must only specify one instance to handle `to_device` messages."
|
|
|
|
)
|
|
|
|
|
2021-01-18 16:47:59 +01:00
|
|
|
if len(self.writers.account_data) != 1:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must only specify one instance to handle `account_data` messages."
|
|
|
|
)
|
|
|
|
|
|
|
|
if len(self.writers.receipts) != 1:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must only specify one instance to handle `receipts` messages."
|
|
|
|
)
|
|
|
|
|
2021-02-24 14:23:18 +01:00
|
|
|
if len(self.writers.events) == 0:
|
|
|
|
raise ConfigError("Must specify at least one instance to handle `events`.")
|
|
|
|
|
2021-04-23 13:21:55 +02:00
|
|
|
if len(self.writers.presence) != 1:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must only specify one instance to handle `presence` messages."
|
|
|
|
)
|
|
|
|
|
2021-02-24 14:23:18 +01:00
|
|
|
self.events_shard_config = RoutableShardedWorkerHandlingConfig(
|
|
|
|
self.writers.events
|
|
|
|
)
|
|
|
|
|
|
|
|
# Handle sharded push
|
2022-11-22 22:33:58 +01:00
|
|
|
pusher_instances = self._worker_names_performing_this_duty(
|
|
|
|
config,
|
|
|
|
"start_pushers",
|
|
|
|
"synapse.app.pusher",
|
|
|
|
"pusher_instances",
|
|
|
|
)
|
2021-02-24 14:23:18 +01:00
|
|
|
self.start_pushers = self.instance_name in pusher_instances
|
|
|
|
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
|
2020-05-22 17:11:35 +02:00
|
|
|
|
2020-10-02 14:23:15 +02:00
|
|
|
# Whether this worker should run background tasks or not.
|
|
|
|
#
|
|
|
|
# As a note for developers, the background tasks guarded by this should
|
|
|
|
# be able to run on only a single instance (meaning that they don't
|
|
|
|
# depend on any in-memory state of a particular worker).
|
|
|
|
#
|
|
|
|
# No effort is made to ensure only a single instance of these tasks is
|
|
|
|
# running.
|
|
|
|
background_tasks_instance = config.get("run_background_tasks_on") or "master"
|
|
|
|
self.run_background_tasks = (
|
|
|
|
self.worker_name is None and background_tasks_instance == "master"
|
|
|
|
) or self.worker_name == background_tasks_instance
|
|
|
|
|
2022-05-06 12:43:53 +02:00
|
|
|
self.should_notify_appservices = self._should_this_worker_perform_duty(
|
|
|
|
config,
|
|
|
|
legacy_master_option_name="notify_appservices",
|
|
|
|
legacy_worker_app_name="synapse.app.appservice",
|
|
|
|
new_option_name="notify_appservices_from_worker",
|
|
|
|
)
|
|
|
|
|
2022-05-10 12:08:45 +02:00
|
|
|
self.should_update_user_directory = self._should_this_worker_perform_duty(
|
|
|
|
config,
|
|
|
|
legacy_master_option_name="update_user_directory",
|
|
|
|
legacy_worker_app_name="synapse.app.user_dir",
|
|
|
|
new_option_name="update_user_directory_from_worker",
|
|
|
|
)
|
|
|
|
|
2022-05-06 12:43:53 +02:00
|
|
|
def _should_this_worker_perform_duty(
|
|
|
|
self,
|
|
|
|
config: Dict[str, Any],
|
|
|
|
legacy_master_option_name: str,
|
|
|
|
legacy_worker_app_name: str,
|
|
|
|
new_option_name: str,
|
|
|
|
) -> bool:
|
|
|
|
"""
|
|
|
|
Figures out whether this worker should perform a certain duty.
|
|
|
|
|
|
|
|
This function is temporary and is only to deal with the complexity
|
|
|
|
of allowing old, transitional and new configurations all at once.
|
|
|
|
|
|
|
|
Contradictions between the legacy and new part of a transitional configuration
|
|
|
|
will lead to a ConfigError.
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
config: The config dictionary
|
|
|
|
legacy_master_option_name: The name of a legacy option, whose value is boolean,
|
|
|
|
specifying whether it's the master that should handle a certain duty.
|
|
|
|
e.g. "notify_appservices"
|
|
|
|
legacy_worker_app_name: The name of a legacy Synapse worker application
|
|
|
|
that would traditionally perform this duty.
|
|
|
|
e.g. "synapse.app.appservice"
|
|
|
|
new_option_name: The name of the new option, whose value is the name of a
|
|
|
|
designated worker to perform the duty.
|
|
|
|
e.g. "notify_appservices_from_worker"
|
|
|
|
"""
|
|
|
|
|
|
|
|
# None means 'unspecified'; True means 'run here' and False means
|
|
|
|
# 'don't run here'.
|
|
|
|
new_option_should_run_here = None
|
|
|
|
if new_option_name in config:
|
|
|
|
designated_worker = config[new_option_name] or "master"
|
|
|
|
new_option_should_run_here = (
|
|
|
|
designated_worker == "master" and self.worker_name is None
|
|
|
|
) or designated_worker == self.worker_name
|
|
|
|
|
|
|
|
legacy_option_should_run_here = None
|
|
|
|
if legacy_master_option_name in config:
|
|
|
|
run_on_master = bool(config[legacy_master_option_name])
|
|
|
|
|
|
|
|
legacy_option_should_run_here = (
|
|
|
|
self.worker_name is None and run_on_master
|
|
|
|
) or (self.worker_app == legacy_worker_app_name and not run_on_master)
|
|
|
|
|
|
|
|
# Suggest using the new option instead.
|
|
|
|
logger.warning(
|
|
|
|
_DEPRECATED_WORKER_DUTY_OPTION_USED,
|
|
|
|
legacy_master_option_name,
|
|
|
|
new_option_name,
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.worker_app == legacy_worker_app_name and config.get(
|
|
|
|
legacy_master_option_name, True
|
|
|
|
):
|
|
|
|
# As an extra bit of complication, we need to check that the
|
|
|
|
# specialised worker is only used if the legacy config says the
|
|
|
|
# master isn't performing the duties.
|
|
|
|
raise ConfigError(
|
|
|
|
f"Cannot use deprecated worker app type '{legacy_worker_app_name}' whilst deprecated option '{legacy_master_option_name}' is not set to false.\n"
|
|
|
|
f"Consider setting `worker_app: synapse.app.generic_worker` and using the '{new_option_name}' option instead.\n"
|
|
|
|
f"The '{new_option_name}' option replaces '{legacy_master_option_name}'."
|
|
|
|
)
|
|
|
|
|
|
|
|
if new_option_should_run_here is None and legacy_option_should_run_here is None:
|
|
|
|
# Neither option specified; the fallback behaviour is to run on the main process
|
|
|
|
return self.worker_name is None
|
|
|
|
|
|
|
|
if (
|
|
|
|
new_option_should_run_here is not None
|
|
|
|
and legacy_option_should_run_here is not None
|
|
|
|
):
|
|
|
|
# Both options specified; ensure they match!
|
|
|
|
if new_option_should_run_here != legacy_option_should_run_here:
|
|
|
|
update_worker_type = (
|
|
|
|
" and set worker_app: synapse.app.generic_worker"
|
|
|
|
if self.worker_app == legacy_worker_app_name
|
|
|
|
else ""
|
|
|
|
)
|
|
|
|
# If the values conflict, we suggest the admin removes the legacy option
|
|
|
|
# for simplicity.
|
|
|
|
raise ConfigError(
|
|
|
|
f"Conflicting configuration options: {legacy_master_option_name} (legacy), {new_option_name} (new).\n"
|
|
|
|
f"Suggestion: remove {legacy_master_option_name}{update_worker_type}.\n"
|
|
|
|
)
|
|
|
|
|
|
|
|
# We've already validated that these aren't conflicting; now just see if
|
|
|
|
# either is True.
|
|
|
|
# (By this point, these are either the same value or only one is not None.)
|
|
|
|
return bool(new_option_should_run_here or legacy_option_should_run_here)
|
|
|
|
|
2022-11-22 22:33:58 +01:00
|
|
|
def _worker_names_performing_this_duty(
|
|
|
|
self,
|
|
|
|
config: Dict[str, Any],
|
|
|
|
legacy_option_name: str,
|
|
|
|
legacy_app_name: str,
|
|
|
|
modern_instance_list_name: str,
|
|
|
|
) -> List[str]:
|
|
|
|
"""
|
|
|
|
Retrieves the names of the workers handling a given duty, by either legacy
|
|
|
|
option or instance list.
|
|
|
|
|
|
|
|
There are two ways of configuring which instances handle a given duty, e.g.
|
|
|
|
for configuring pushers:
|
|
|
|
|
|
|
|
1. The old way where "start_pushers" is set to false and running a
|
|
|
|
`synapse.app.pusher'` worker app.
|
|
|
|
2. Specifying the workers sending federation in `pusher_instances`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config: settings read from yaml.
|
|
|
|
legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
|
|
|
|
legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
|
|
|
|
modern_instance_list_name: the string name of the new instance_list. e.g.
|
|
|
|
'pusher_instances'
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A list of worker instance names handling the given duty.
|
|
|
|
"""
|
|
|
|
|
|
|
|
legacy_option = config.get(legacy_option_name, True)
|
|
|
|
|
|
|
|
worker_instances = config.get(modern_instance_list_name)
|
|
|
|
if worker_instances is None:
|
|
|
|
# Default to an empty list, which means "another, unknown, worker is
|
|
|
|
# responsible for it".
|
|
|
|
worker_instances = []
|
|
|
|
|
|
|
|
# If no worker instances are set we check if the legacy option
|
|
|
|
# is set, which means use the main process.
|
|
|
|
if legacy_option:
|
|
|
|
worker_instances = ["master"]
|
|
|
|
|
|
|
|
if self.worker_app == legacy_app_name:
|
|
|
|
if legacy_option:
|
|
|
|
# If we're using `legacy_app_name`, and not using
|
|
|
|
# `modern_instance_list_name`, then we should have
|
|
|
|
# explicitly set `legacy_option_name` to false.
|
|
|
|
raise ConfigError(
|
|
|
|
f"The '{legacy_option_name}' config option must be disabled in "
|
|
|
|
"the main synapse process before they can be run in a separate "
|
|
|
|
"worker.\n"
|
|
|
|
f"Please add `{legacy_option_name}: false` to the main config.\n",
|
|
|
|
)
|
|
|
|
|
|
|
|
worker_instances = [self.worker_name]
|
|
|
|
|
|
|
|
return worker_instances
|
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
def read_arguments(self, args: argparse.Namespace) -> None:
|
2019-03-14 14:32:14 +01:00
|
|
|
# We support a bunch of command line arguments that override options in
|
|
|
|
# the config. A lot of these options have a worker_* prefix when running
|
|
|
|
# on workers so we also have to override them when command line options
|
|
|
|
# are specified.
|
|
|
|
|
2019-03-13 18:33:54 +01:00
|
|
|
if args.daemonize is not None:
|
|
|
|
self.worker_daemonize = args.daemonize
|
2019-03-14 14:32:14 +01:00
|
|
|
if args.manhole is not None:
|
|
|
|
self.worker_manhole = args.worker_manhole
|