2021-09-29 12:44:15 +02:00
|
|
|
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
|
2014-08-31 17:06:39 +02:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
import argparse
|
2021-02-03 13:13:46 +01:00
|
|
|
import itertools
|
2018-05-31 11:04:50 +02:00
|
|
|
import logging
|
2018-12-21 16:04:57 +01:00
|
|
|
import os.path
|
2021-11-08 15:13:10 +01:00
|
|
|
import urllib.parse
|
2019-08-28 14:12:22 +02:00
|
|
|
from textwrap import indent
|
2021-09-28 15:24:40 +02:00
|
|
|
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
2018-05-31 11:04:50 +02:00
|
|
|
|
2019-07-29 18:47:27 +02:00
|
|
|
import attr
|
2019-08-28 14:12:22 +02:00
|
|
|
import yaml
|
2021-02-03 13:13:46 +01:00
|
|
|
from netaddr import AddrFormatError, IPNetwork, IPSet
|
2019-05-13 20:05:06 +02:00
|
|
|
|
2021-09-06 17:08:03 +02:00
|
|
|
from twisted.conch.ssh.keys import Key
|
|
|
|
|
2019-05-23 16:00:20 +02:00
|
|
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
2023-05-18 12:11:30 +02:00
|
|
|
from synapse.types import JsonDict, StrSequence
|
2021-04-06 15:38:30 +02:00
|
|
|
from synapse.util.module_loader import load_module
|
2021-01-20 14:15:14 +01:00
|
|
|
from synapse.util.stringutils import parse_and_validate_server_name
|
2018-07-09 08:09:20 +02:00
|
|
|
|
2016-06-07 16:45:56 +02:00
|
|
|
from ._base import Config, ConfigError
|
2021-09-06 17:08:03 +02:00
|
|
|
from ._util import validate_config
|
2014-08-31 17:06:39 +02:00
|
|
|
|
2018-05-31 11:04:50 +02:00
|
|
|
logger = logging.Logger(__name__)
|
|
|
|
|
2022-09-06 09:50:02 +02:00
|
|
|
DIRECT_TCP_ERROR = """
|
|
|
|
Using direct TCP replication for workers is no longer supported.
|
|
|
|
|
|
|
|
Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis
|
|
|
|
"""
|
|
|
|
|
2019-02-11 13:50:30 +01:00
|
|
|
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
|
|
|
|
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
|
|
|
|
# on IPv6 when '::' is set.
|
|
|
|
#
|
|
|
|
# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
|
|
|
|
# in the list.
|
2019-06-20 11:32:02 +02:00
|
|
|
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
|
2019-02-11 13:50:30 +01:00
|
|
|
|
2021-02-03 13:13:46 +01:00
|
|
|
|
|
|
|
def _6to4(network: IPNetwork) -> IPNetwork:
|
|
|
|
"""Convert an IPv4 network into a 6to4 IPv6 network per RFC 3056."""
|
|
|
|
|
|
|
|
# 6to4 networks consist of:
|
|
|
|
# * 2002 as the first 16 bits
|
|
|
|
# * The first IPv4 address in the network hex-encoded as the next 32 bits
|
|
|
|
# * The new prefix length needs to include the bits from the 2002 prefix.
|
|
|
|
hex_network = hex(network.first)[2:]
|
|
|
|
hex_network = ("0" * (8 - len(hex_network))) + hex_network
|
|
|
|
return IPNetwork(
|
2021-02-16 23:32:34 +01:00
|
|
|
"2002:%s:%s::/%d"
|
|
|
|
% (
|
|
|
|
hex_network[:4],
|
|
|
|
hex_network[4:],
|
|
|
|
16 + network.prefixlen,
|
|
|
|
)
|
2021-02-03 13:13:46 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def generate_ip_set(
|
|
|
|
ip_addresses: Optional[Iterable[str]],
|
|
|
|
extra_addresses: Optional[Iterable[str]] = None,
|
2023-05-18 12:11:30 +02:00
|
|
|
config_path: Optional[StrSequence] = None,
|
2021-02-03 13:13:46 +01:00
|
|
|
) -> IPSet:
|
|
|
|
"""
|
|
|
|
Generate an IPSet from a list of IP addresses or CIDRs.
|
|
|
|
|
|
|
|
Additionally, for each IPv4 network in the list of IP addresses, also
|
|
|
|
includes the corresponding IPv6 networks.
|
|
|
|
|
|
|
|
This includes:
|
|
|
|
|
|
|
|
* IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1)
|
|
|
|
* IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2)
|
|
|
|
* 6to4 Address (see RFC 3056, section 2)
|
|
|
|
|
|
|
|
Args:
|
|
|
|
ip_addresses: An iterable of IP addresses or CIDRs.
|
|
|
|
extra_addresses: An iterable of IP addresses or CIDRs.
|
|
|
|
config_path: The path in the configuration for error messages.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
A new IP set.
|
|
|
|
"""
|
|
|
|
result = IPSet()
|
|
|
|
for ip in itertools.chain(ip_addresses or (), extra_addresses or ()):
|
|
|
|
try:
|
|
|
|
network = IPNetwork(ip)
|
|
|
|
except AddrFormatError as e:
|
|
|
|
raise ConfigError(
|
|
|
|
"Invalid IP range provided: %s." % (ip,), config_path
|
|
|
|
) from e
|
|
|
|
result.add(network)
|
|
|
|
|
|
|
|
# It is possible that these already exist in the set, but that's OK.
|
|
|
|
if ":" not in str(network):
|
|
|
|
result.add(IPNetwork(network).ipv6(ipv4_compatible=True))
|
|
|
|
result.add(IPNetwork(network).ipv6(ipv4_compatible=False))
|
|
|
|
result.add(_6to4(network))
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
# IP ranges that are considered private / unroutable / don't make sense.
|
2023-05-19 14:25:25 +02:00
|
|
|
DEFAULT_IP_RANGE_BLOCKLIST = [
|
2020-12-09 19:56:06 +01:00
|
|
|
# Localhost
|
|
|
|
"127.0.0.0/8",
|
|
|
|
# Private networks.
|
|
|
|
"10.0.0.0/8",
|
|
|
|
"172.16.0.0/12",
|
|
|
|
"192.168.0.0/16",
|
|
|
|
# Carrier grade NAT.
|
|
|
|
"100.64.0.0/10",
|
|
|
|
# Address registry.
|
|
|
|
"192.0.0.0/24",
|
|
|
|
# Link-local networks.
|
|
|
|
"169.254.0.0/16",
|
2021-02-03 13:13:46 +01:00
|
|
|
# Formerly used for 6to4 relay.
|
|
|
|
"192.88.99.0/24",
|
2020-12-09 19:56:06 +01:00
|
|
|
# Testing networks.
|
|
|
|
"198.18.0.0/15",
|
|
|
|
"192.0.2.0/24",
|
|
|
|
"198.51.100.0/24",
|
|
|
|
"203.0.113.0/24",
|
|
|
|
# Multicast.
|
|
|
|
"224.0.0.0/4",
|
|
|
|
# Localhost
|
|
|
|
"::1/128",
|
|
|
|
# Link-local addresses.
|
|
|
|
"fe80::/10",
|
|
|
|
# Unique local addresses.
|
|
|
|
"fc00::/7",
|
2021-02-03 13:13:46 +01:00
|
|
|
# Testing networks.
|
|
|
|
"2001:db8::/32",
|
|
|
|
# Multicast.
|
|
|
|
"ff00::/8",
|
|
|
|
# Site-local addresses
|
|
|
|
"fec0::/10",
|
2020-12-09 19:56:06 +01:00
|
|
|
]
|
|
|
|
|
2023-01-18 19:59:48 +01:00
|
|
|
DEFAULT_ROOM_VERSION = "10"
|
2019-05-23 16:00:20 +02:00
|
|
|
|
2019-07-29 18:47:27 +02:00
|
|
|
ROOM_COMPLEXITY_TOO_GREAT = (
|
|
|
|
"Your homeserver is unable to join rooms this large or complex. "
|
|
|
|
"Please speak to your server administrator, or upgrade your instance "
|
|
|
|
"to join this room."
|
|
|
|
)
|
|
|
|
|
2019-09-26 13:57:01 +02:00
|
|
|
METRICS_PORT_WARNING = """\
|
|
|
|
The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
|
|
|
|
a listener. Please see
|
2021-07-07 13:35:45 +02:00
|
|
|
https://matrix-org.github.io/synapse/latest/metrics-howto.html
|
2019-09-26 13:57:01 +02:00
|
|
|
on how to configure the new listener.
|
|
|
|
--------------------------------------------------------------------------------"""
|
|
|
|
|
2014-08-31 17:06:39 +02:00
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
KNOWN_LISTENER_TYPES = {
|
|
|
|
"http",
|
|
|
|
"metrics",
|
|
|
|
"manhole",
|
|
|
|
}
|
|
|
|
|
|
|
|
KNOWN_RESOURCES = {
|
|
|
|
"client",
|
|
|
|
"consent",
|
|
|
|
"federation",
|
2023-02-20 13:23:00 +01:00
|
|
|
"health",
|
2020-06-16 13:44:07 +02:00
|
|
|
"keys",
|
|
|
|
"media",
|
|
|
|
"metrics",
|
|
|
|
"openid",
|
|
|
|
"replication",
|
|
|
|
"static",
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@attr.s(frozen=True)
|
|
|
|
class HttpResourceConfig:
|
2021-09-28 15:24:40 +02:00
|
|
|
names: List[str] = attr.ib(
|
2020-06-16 13:44:07 +02:00
|
|
|
factory=list,
|
2022-04-27 15:03:44 +02:00
|
|
|
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)),
|
2020-06-16 13:44:07 +02:00
|
|
|
)
|
2021-09-28 15:24:40 +02:00
|
|
|
compress: bool = attr.ib(
|
2020-06-16 13:44:07 +02:00
|
|
|
default=False,
|
|
|
|
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2020-06-16 13:44:07 +02:00
|
|
|
class HttpListenerConfig:
|
|
|
|
"""Object describing the http-specific parts of the config of a listener"""
|
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
x_forwarded: bool = False
|
2022-01-13 14:49:28 +01:00
|
|
|
resources: List[HttpResourceConfig] = attr.Factory(list)
|
|
|
|
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
2021-09-28 15:24:40 +02:00
|
|
|
tag: Optional[str] = None
|
2022-09-15 22:32:25 +02:00
|
|
|
request_id_header: Optional[str] = None
|
2022-10-18 17:52:25 +02:00
|
|
|
# If true, the listener will return CORS response headers compatible with MSC3886:
|
|
|
|
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
|
|
|
|
experimental_cors_msc3886: bool = False
|
2020-06-16 13:44:07 +02:00
|
|
|
|
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2023-04-03 11:27:51 +02:00
|
|
|
class TCPListenerConfig:
|
|
|
|
"""Object describing the configuration of a single TCP listener."""
|
2020-06-16 13:44:07 +02:00
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
port: int = attr.ib(validator=attr.validators.instance_of(int))
|
2023-04-03 11:27:51 +02:00
|
|
|
bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List))
|
2021-09-28 15:24:40 +02:00
|
|
|
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
|
|
|
tls: bool = False
|
2020-06-16 13:44:07 +02:00
|
|
|
|
|
|
|
# http_options is only populated if type=http
|
2021-09-28 15:24:40 +02:00
|
|
|
http_options: Optional[HttpListenerConfig] = None
|
2020-06-16 13:44:07 +02:00
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
def get_site_tag(self) -> str:
|
|
|
|
"""Retrieves http_options.tag if it exists, otherwise the port number."""
|
|
|
|
if self.http_options and self.http_options.tag is not None:
|
|
|
|
return self.http_options.tag
|
|
|
|
else:
|
|
|
|
return str(self.port)
|
|
|
|
|
|
|
|
def is_tls(self) -> bool:
|
|
|
|
return self.tls
|
|
|
|
|
|
|
|
|
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
|
|
|
class UnixListenerConfig:
|
|
|
|
"""Object describing the configuration of a single Unix socket listener."""
|
|
|
|
|
|
|
|
# Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling
|
|
|
|
# reverse proxy
|
|
|
|
path: str = attr.ib()
|
|
|
|
# A default(0o666) for this is set in parse_listener_def() below
|
|
|
|
mode: int
|
|
|
|
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
|
|
|
|
|
|
|
# http_options is only populated if type=http
|
|
|
|
http_options: Optional[HttpListenerConfig] = None
|
|
|
|
|
|
|
|
def get_site_tag(self) -> str:
|
|
|
|
return "unix"
|
|
|
|
|
|
|
|
def is_tls(self) -> bool:
|
|
|
|
"""Unix sockets can't have TLS"""
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig]
|
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
2021-09-06 17:08:03 +02:00
|
|
|
class ManholeConfig:
|
|
|
|
"""Object describing the configuration of the manhole"""
|
|
|
|
|
2021-09-28 15:24:40 +02:00
|
|
|
username: str = attr.ib(validator=attr.validators.instance_of(str))
|
|
|
|
password: str = attr.ib(validator=attr.validators.instance_of(str))
|
|
|
|
priv_key: Optional[Key]
|
|
|
|
pub_key: Optional[Key]
|
|
|
|
|
|
|
|
|
|
|
|
@attr.s(frozen=True)
|
|
|
|
class LimitRemoteRoomsConfig:
|
|
|
|
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
|
|
|
complexity: Union[float, int] = attr.ib(
|
2022-04-27 15:03:44 +02:00
|
|
|
validator=attr.validators.instance_of((float, int)), # noqa
|
2021-09-28 15:24:40 +02:00
|
|
|
default=1.0,
|
|
|
|
)
|
|
|
|
complexity_error: str = attr.ib(
|
|
|
|
validator=attr.validators.instance_of(str),
|
|
|
|
default=ROOM_COMPLEXITY_TOO_GREAT,
|
|
|
|
)
|
|
|
|
admins_can_join: bool = attr.ib(
|
|
|
|
validator=attr.validators.instance_of(bool), default=False
|
|
|
|
)
|
2021-09-06 17:08:03 +02:00
|
|
|
|
|
|
|
|
2014-08-31 17:06:39 +02:00
|
|
|
class ServerConfig(Config):
|
2019-10-10 10:39:35 +02:00
|
|
|
section = "server"
|
|
|
|
|
2022-04-11 18:07:23 +02:00
|
|
|
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
2015-04-30 05:24:44 +02:00
|
|
|
self.server_name = config["server_name"]
|
2019-04-08 16:47:39 +02:00
|
|
|
self.server_context = config.get("server_context", None)
|
2018-07-04 19:15:03 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
parse_and_validate_server_name(self.server_name)
|
|
|
|
except ValueError as e:
|
|
|
|
raise ConfigError(str(e))
|
|
|
|
|
2015-04-30 05:24:44 +02:00
|
|
|
self.pid_file = self.abspath(config.get("pid_file"))
|
2019-03-19 11:06:40 +01:00
|
|
|
self.soft_file_limit = config.get("soft_file_limit", 0)
|
2022-04-11 18:07:23 +02:00
|
|
|
self.daemonize = bool(config.get("daemonize"))
|
|
|
|
self.print_pidfile = bool(config.get("print_pidfile"))
|
2015-10-06 03:49:39 +02:00
|
|
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
2016-06-17 16:11:22 +02:00
|
|
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
2021-11-01 16:10:16 +01:00
|
|
|
self.serve_server_wellknown = config.get("serve_server_wellknown", False)
|
2021-04-23 19:22:47 +02:00
|
|
|
|
2021-11-08 15:13:10 +01:00
|
|
|
# Whether we should serve a "client well-known":
|
|
|
|
# (a) at .well-known/matrix/client on our client HTTP listener
|
|
|
|
# (b) in the response to /login
|
|
|
|
#
|
|
|
|
# ... which together help ensure that clients use our public_baseurl instead of
|
|
|
|
# whatever they were told by the user.
|
|
|
|
#
|
|
|
|
# For the sake of backwards compatibility with existing installations, this is
|
|
|
|
# True if public_baseurl is specified explicitly, and otherwise False. (The
|
|
|
|
# reasoning here is that we have no way of knowing that the default
|
|
|
|
# public_baseurl is actually correct for existing installations - many things
|
|
|
|
# will not work correctly, but that's (probably?) better than sending clients
|
|
|
|
# to a completely broken URL.
|
|
|
|
self.serve_client_wellknown = False
|
|
|
|
|
|
|
|
public_baseurl = config.get("public_baseurl")
|
|
|
|
if public_baseurl is None:
|
|
|
|
public_baseurl = f"https://{self.server_name}/"
|
|
|
|
logger.info("Using default public_baseurl %s", public_baseurl)
|
|
|
|
else:
|
|
|
|
self.serve_client_wellknown = True
|
|
|
|
if public_baseurl[-1] != "/":
|
|
|
|
public_baseurl += "/"
|
|
|
|
self.public_baseurl = public_baseurl
|
|
|
|
|
|
|
|
# check that public_baseurl is valid
|
|
|
|
try:
|
|
|
|
splits = urllib.parse.urlsplit(self.public_baseurl)
|
|
|
|
except Exception as e:
|
|
|
|
raise ConfigError(f"Unable to parse URL: {e}", ("public_baseurl",))
|
|
|
|
if splits.scheme not in ("https", "http"):
|
|
|
|
raise ConfigError(
|
|
|
|
f"Invalid scheme '{splits.scheme}': only https and http are supported"
|
|
|
|
)
|
|
|
|
if splits.query or splits.fragment:
|
|
|
|
raise ConfigError(
|
|
|
|
"public_baseurl cannot contain query parameters or a #-fragment"
|
|
|
|
)
|
2016-04-27 16:09:55 +02:00
|
|
|
|
2022-06-16 12:48:18 +02:00
|
|
|
self.extra_well_known_client_content = config.get(
|
|
|
|
"extra_well_known_client_content", {}
|
|
|
|
)
|
|
|
|
|
|
|
|
if not isinstance(self.extra_well_known_client_content, dict):
|
|
|
|
raise ConfigError(
|
|
|
|
"extra_well_known_content must be a dictionary of key-value pairs"
|
|
|
|
)
|
|
|
|
|
|
|
|
if "m.homeserver" in self.extra_well_known_client_content:
|
|
|
|
raise ConfigError(
|
|
|
|
"m.homeserver is not supported in extra_well_known_content, "
|
|
|
|
"use public_baseurl in base config instead."
|
|
|
|
)
|
|
|
|
if "m.identity_server" in self.extra_well_known_client_content:
|
|
|
|
raise ConfigError(
|
|
|
|
"m.identity_server is not supported in extra_well_known_content, "
|
|
|
|
"use default_identity_server in base config instead."
|
|
|
|
)
|
|
|
|
|
2018-08-17 17:08:45 +02:00
|
|
|
# Whether to enable user presence.
|
2021-04-06 15:38:30 +02:00
|
|
|
presence_config = config.get("presence") or {}
|
|
|
|
self.use_presence = presence_config.get("enabled")
|
|
|
|
if self.use_presence is None:
|
|
|
|
self.use_presence = config.get("use_presence", True)
|
|
|
|
|
|
|
|
# Custom presence router module
|
2021-08-17 15:22:45 +02:00
|
|
|
# This is the legacy way of configuring it (the config should now be put in the modules section)
|
2021-04-06 15:38:30 +02:00
|
|
|
self.presence_router_module_class = None
|
|
|
|
self.presence_router_config = None
|
|
|
|
presence_router_config = presence_config.get("presence_router")
|
|
|
|
if presence_router_config:
|
|
|
|
(
|
|
|
|
self.presence_router_module_class,
|
|
|
|
self.presence_router_config,
|
|
|
|
) = load_module(presence_router_config, ("presence", "presence_router"))
|
2018-08-17 17:08:45 +02:00
|
|
|
|
2017-11-21 14:29:39 +01:00
|
|
|
# whether to enable the media repository endpoints. This should be set
|
|
|
|
# to false if the media repository is running as a separate endpoint;
|
|
|
|
# doing so ensures that we will not run cache cleanup jobs on the
|
|
|
|
# master, potentially causing inconsistency.
|
|
|
|
self.enable_media_repo = config.get("enable_media_repo", True)
|
|
|
|
|
2019-05-08 19:26:56 +02:00
|
|
|
# Whether to require authentication to retrieve profile data (avatars,
|
|
|
|
# display names) of other users through the client API.
|
|
|
|
self.require_auth_for_profile_requests = config.get(
|
2019-06-20 11:32:02 +02:00
|
|
|
"require_auth_for_profile_requests", False
|
2019-05-08 19:26:56 +02:00
|
|
|
)
|
|
|
|
|
2019-12-16 17:11:55 +01:00
|
|
|
# Whether to require sharing a room with a user to retrieve their
|
|
|
|
# profile data
|
|
|
|
self.limit_profile_requests_to_users_who_share_rooms = config.get(
|
2021-02-16 23:32:34 +01:00
|
|
|
"limit_profile_requests_to_users_who_share_rooms",
|
|
|
|
False,
|
2019-12-16 17:11:55 +01:00
|
|
|
)
|
|
|
|
|
2021-02-19 10:50:41 +01:00
|
|
|
# Whether to retrieve and display profile data for a user when they
|
|
|
|
# are invited to a room
|
|
|
|
self.include_profile_data_on_invite = config.get(
|
|
|
|
"include_profile_data_on_invite", True
|
|
|
|
)
|
|
|
|
|
2019-06-24 12:45:11 +02:00
|
|
|
if "restrict_public_rooms_to_local_users" in config and (
|
|
|
|
"allow_public_rooms_without_auth" in config
|
|
|
|
or "allow_public_rooms_over_federation" in config
|
|
|
|
):
|
|
|
|
raise ConfigError(
|
|
|
|
"Can't use 'restrict_public_rooms_to_local_users' if"
|
|
|
|
" 'allow_public_rooms_without_auth' and/or"
|
|
|
|
" 'allow_public_rooms_over_federation' is set."
|
|
|
|
)
|
|
|
|
|
|
|
|
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
|
|
|
|
# flag is now obsolete but we need to check it for backward-compatibility.
|
|
|
|
if config.get("restrict_public_rooms_to_local_users", False):
|
|
|
|
self.allow_public_rooms_without_auth = False
|
|
|
|
self.allow_public_rooms_over_federation = False
|
|
|
|
else:
|
2019-12-04 10:46:16 +01:00
|
|
|
# If set to 'true', removes the need for authentication to access the server's
|
|
|
|
# public rooms directory through the client API, meaning that anyone can
|
|
|
|
# query the room directory. Defaults to 'false'.
|
2019-06-24 12:45:11 +02:00
|
|
|
self.allow_public_rooms_without_auth = config.get(
|
2019-12-04 10:46:16 +01:00
|
|
|
"allow_public_rooms_without_auth", False
|
2019-06-24 12:45:11 +02:00
|
|
|
)
|
2019-12-04 10:46:16 +01:00
|
|
|
# If set to 'true', allows any other homeserver to fetch the server's public
|
|
|
|
# rooms directory via federation. Defaults to 'false'.
|
2019-06-24 12:45:11 +02:00
|
|
|
self.allow_public_rooms_over_federation = config.get(
|
2019-12-04 10:46:16 +01:00
|
|
|
"allow_public_rooms_over_federation", False
|
2019-06-24 12:45:11 +02:00
|
|
|
)
|
2019-05-08 19:26:56 +02:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
|
2019-05-23 16:00:20 +02:00
|
|
|
|
|
|
|
# Ensure room version is a str
|
|
|
|
default_room_version = str(default_room_version)
|
|
|
|
|
|
|
|
if default_room_version not in KNOWN_ROOM_VERSIONS:
|
|
|
|
raise ConfigError(
|
2019-06-20 11:32:02 +02:00
|
|
|
"Unknown default_room_version: %s, known room versions: %s"
|
|
|
|
% (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
|
2019-05-23 16:00:20 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Get the actual room version object rather than just the identifier
|
|
|
|
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
|
|
|
|
|
2018-12-04 12:01:02 +01:00
|
|
|
# whether to enable search. If disabled, new entries will not be inserted
|
|
|
|
# into the search tables and they will not be indexed. Users will receive
|
|
|
|
# errors when attempting to search for messages.
|
|
|
|
self.enable_search = config.get("enable_search", True)
|
|
|
|
|
2020-07-17 13:59:23 +02:00
|
|
|
self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
|
2017-05-13 18:17:54 +02:00
|
|
|
|
2017-09-19 17:08:14 +02:00
|
|
|
# Whether we should block invites sent to users on this server
|
|
|
|
# (other than those sent by local server admins)
|
2019-06-20 11:32:02 +02:00
|
|
|
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
|
2017-09-19 17:08:14 +02:00
|
|
|
|
2018-07-30 16:55:57 +02:00
|
|
|
# Options to control access by tracking MAU
|
|
|
|
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
|
2018-08-03 15:59:17 +02:00
|
|
|
self.max_mau_value = 0
|
2018-08-01 12:46:59 +02:00
|
|
|
if self.limit_usage_by_mau:
|
2019-06-20 11:32:02 +02:00
|
|
|
self.max_mau_value = config.get("max_mau_value", 0)
|
2018-11-15 19:08:27 +01:00
|
|
|
self.mau_stats_only = config.get("mau_stats_only", False)
|
2018-08-23 20:17:08 +02:00
|
|
|
|
2018-08-06 23:55:05 +02:00
|
|
|
self.mau_limits_reserved_threepids = config.get(
|
2018-08-07 18:49:43 +02:00
|
|
|
"mau_limit_reserved_threepids", []
|
2018-08-06 23:55:05 +02:00
|
|
|
)
|
2018-08-03 15:59:17 +02:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
self.mau_trial_days = config.get("mau_trial_days", 0)
|
2022-05-04 20:33:26 +02:00
|
|
|
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
|
2019-10-24 12:48:46 +02:00
|
|
|
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
|
2018-08-23 20:17:08 +02:00
|
|
|
|
2019-09-05 18:16:03 +02:00
|
|
|
# How long to keep redacted events in the database in unredacted form
|
|
|
|
# before redacting them.
|
2019-09-09 14:40:05 +02:00
|
|
|
redaction_retention_period = config.get("redaction_retention_period", "7d")
|
2019-09-09 14:23:41 +02:00
|
|
|
if redaction_retention_period is not None:
|
2021-11-23 16:21:19 +01:00
|
|
|
self.redaction_retention_period: Optional[int] = self.parse_duration(
|
2019-09-05 18:16:03 +02:00
|
|
|
redaction_retention_period
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.redaction_retention_period = None
|
|
|
|
|
2023-09-15 15:37:44 +02:00
|
|
|
# How long to keep locally forgotten rooms before purging them from the DB.
|
|
|
|
forgotten_room_retention_period = config.get(
|
|
|
|
"forgotten_room_retention_period", None
|
|
|
|
)
|
|
|
|
if forgotten_room_retention_period is not None:
|
|
|
|
self.forgotten_room_retention_period: Optional[int] = self.parse_duration(
|
|
|
|
forgotten_room_retention_period
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.forgotten_room_retention_period = None
|
|
|
|
|
2019-09-24 16:20:40 +02:00
|
|
|
# How long to keep entries in the `users_ips` table.
|
|
|
|
user_ips_max_age = config.get("user_ips_max_age", "28d")
|
|
|
|
if user_ips_max_age is not None:
|
2021-11-23 16:21:19 +01:00
|
|
|
self.user_ips_max_age: Optional[int] = self.parse_duration(user_ips_max_age)
|
2019-09-24 16:20:40 +02:00
|
|
|
else:
|
|
|
|
self.user_ips_max_age = None
|
|
|
|
|
2018-08-04 23:07:04 +02:00
|
|
|
# Options to disable HS
|
|
|
|
self.hs_disabled = config.get("hs_disabled", False)
|
|
|
|
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
|
|
|
|
2018-08-15 12:41:18 +02:00
|
|
|
# Admin uri to direct users at should their instance become blocked
|
2018-08-13 19:00:23 +02:00
|
|
|
# due to resource constraints
|
2018-08-24 17:51:27 +02:00
|
|
|
self.admin_contact = config.get("admin_contact", None)
|
2018-08-13 19:00:23 +02:00
|
|
|
|
2023-05-19 14:25:25 +02:00
|
|
|
ip_range_blocklist = config.get(
|
|
|
|
"ip_range_blacklist", DEFAULT_IP_RANGE_BLOCKLIST
|
2020-12-09 19:56:06 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
# Attempt to create an IPSet from the given ranges
|
2021-02-03 13:13:46 +01:00
|
|
|
|
2023-05-19 14:25:25 +02:00
|
|
|
# Always block 0.0.0.0, ::
|
|
|
|
self.ip_range_blocklist = generate_ip_set(
|
|
|
|
ip_range_blocklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
|
2021-02-03 13:13:46 +01:00
|
|
|
)
|
2020-12-09 19:56:06 +01:00
|
|
|
|
2023-05-19 14:25:25 +02:00
|
|
|
self.ip_range_allowlist = generate_ip_set(
|
2021-02-03 13:13:46 +01:00
|
|
|
config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",)
|
|
|
|
)
|
2020-12-09 19:56:06 +01:00
|
|
|
# The federation_ip_range_blacklist is used for backwards-compatibility
|
2021-06-15 09:53:55 +02:00
|
|
|
# and only applies to federation and identity servers.
|
|
|
|
if "federation_ip_range_blacklist" in config:
|
2023-05-19 14:25:25 +02:00
|
|
|
# Always block 0.0.0.0, ::
|
|
|
|
self.federation_ip_range_blocklist = generate_ip_set(
|
2021-06-15 09:53:55 +02:00
|
|
|
config["federation_ip_range_blacklist"],
|
|
|
|
["0.0.0.0", "::"],
|
|
|
|
config_path=("federation_ip_range_blacklist",),
|
|
|
|
)
|
|
|
|
# 'federation_ip_range_whitelist' was never a supported configuration option.
|
2023-05-19 14:25:25 +02:00
|
|
|
self.federation_ip_range_allowlist = None
|
2021-06-15 09:53:55 +02:00
|
|
|
else:
|
|
|
|
# No backwards-compatiblity requrired, as federation_ip_range_blacklist
|
|
|
|
# is not given. Default to ip_range_blacklist and ip_range_whitelist.
|
2023-05-19 14:25:25 +02:00
|
|
|
self.federation_ip_range_blocklist = self.ip_range_blocklist
|
|
|
|
self.federation_ip_range_allowlist = self.ip_range_allowlist
|
2020-12-09 19:56:06 +01:00
|
|
|
|
2019-03-20 17:04:35 +01:00
|
|
|
# (undocumented) option for torturing the worker-mode replication a bit,
|
|
|
|
# for testing. The value defines the number of milliseconds to pause before
|
|
|
|
# sending out any replication updates.
|
|
|
|
self.replication_torture_level = config.get("replication_torture_level")
|
|
|
|
|
2019-05-02 10:21:29 +02:00
|
|
|
# Whether to require a user to be in the room to add an alias to it.
|
|
|
|
# Defaults to True.
|
|
|
|
self.require_membership_for_aliases = config.get(
|
2019-06-20 11:32:02 +02:00
|
|
|
"require_membership_for_aliases", True
|
2019-05-02 10:21:29 +02:00
|
|
|
)
|
|
|
|
|
2019-05-16 15:26:41 +02:00
|
|
|
# Whether to allow per-room membership profiles through the send of membership
|
|
|
|
# events with profile information that differ from the target's global profile.
|
|
|
|
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
|
|
|
|
|
2022-01-28 15:41:33 +01:00
|
|
|
# The maximum size an avatar can have, in bytes.
|
|
|
|
self.max_avatar_size = config.get("max_avatar_size")
|
|
|
|
if self.max_avatar_size is not None:
|
|
|
|
self.max_avatar_size = self.parse_size(self.max_avatar_size)
|
|
|
|
|
|
|
|
# The MIME types allowed for an avatar.
|
|
|
|
self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes")
|
|
|
|
if self.allowed_avatar_mimetypes and not isinstance(
|
|
|
|
self.allowed_avatar_mimetypes,
|
|
|
|
list,
|
|
|
|
):
|
|
|
|
raise ConfigError("allowed_avatar_mimetypes must be a list")
|
|
|
|
|
2022-09-15 22:32:25 +02:00
|
|
|
listeners = config.get("listeners", [])
|
|
|
|
if not isinstance(listeners, list):
|
|
|
|
raise ConfigError("Expected a list", ("listeners",))
|
|
|
|
|
|
|
|
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
|
2019-02-11 22:13:53 +01:00
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
# no_tls is not really supported anymore, but let's grandfather it in here.
|
2020-06-16 13:44:07 +02:00
|
|
|
if config.get("no_tls", False):
|
|
|
|
l2 = []
|
|
|
|
for listener in self.listeners:
|
2023-04-03 11:27:51 +02:00
|
|
|
if isinstance(listener, TCPListenerConfig) and listener.tls:
|
|
|
|
# Use isinstance() as the assertion this *has* a listener.port
|
2019-02-11 18:57:58 +01:00
|
|
|
logger.info(
|
2020-06-16 13:44:07 +02:00
|
|
|
"Ignoring TLS-enabled listener on port %i due to no_tls",
|
|
|
|
listener.port,
|
2019-02-11 18:57:58 +01:00
|
|
|
)
|
2019-02-13 12:48:56 +01:00
|
|
|
else:
|
2020-06-16 13:44:07 +02:00
|
|
|
l2.append(listener)
|
|
|
|
self.listeners = l2
|
2019-02-11 18:57:58 +01:00
|
|
|
|
2022-01-20 15:21:06 +01:00
|
|
|
self.web_client_location = config.get("web_client_location", None)
|
2022-02-03 19:36:49 +01:00
|
|
|
# Non-HTTP(S) web client location is not supported.
|
|
|
|
if self.web_client_location and not (
|
2022-01-20 15:21:06 +01:00
|
|
|
self.web_client_location.startswith("http://")
|
|
|
|
or self.web_client_location.startswith("https://")
|
2022-02-03 19:36:49 +01:00
|
|
|
):
|
|
|
|
raise ConfigError("web_client_location must point to a HTTP(S) URL.")
|
2018-12-11 14:18:48 +01:00
|
|
|
|
2016-06-16 12:06:12 +02:00
|
|
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
2021-05-05 17:53:45 +02:00
|
|
|
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
2016-06-07 16:45:56 +02:00
|
|
|
|
2019-07-29 18:47:27 +02:00
|
|
|
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
2020-05-22 11:11:50 +02:00
|
|
|
**(config.get("limit_remote_rooms") or {})
|
2019-07-29 18:47:27 +02:00
|
|
|
)
|
|
|
|
|
2015-06-12 16:33:07 +02:00
|
|
|
bind_port = config.get("bind_port")
|
|
|
|
if bind_port:
|
2019-02-11 18:57:58 +01:00
|
|
|
if config.get("no_tls", False):
|
|
|
|
raise ConfigError("no_tls is incompatible with bind_port")
|
|
|
|
|
2015-06-12 16:33:07 +02:00
|
|
|
self.listeners = []
|
|
|
|
bind_host = config.get("bind_host", "")
|
|
|
|
gzip_responses = config.get("gzip_responses", True)
|
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
http_options = HttpListenerConfig(
|
|
|
|
resources=[
|
|
|
|
HttpResourceConfig(names=["client"], compress=gzip_responses),
|
|
|
|
HttpResourceConfig(names=["federation"]),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
self.listeners.append(
|
2023-04-03 11:27:51 +02:00
|
|
|
TCPListenerConfig(
|
2020-06-16 13:44:07 +02:00
|
|
|
port=bind_port,
|
|
|
|
bind_addresses=[bind_host],
|
|
|
|
tls=True,
|
|
|
|
type="http",
|
|
|
|
http_options=http_options,
|
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
unsecure_port = config.get("unsecure_port", bind_port - 400)
|
|
|
|
if unsecure_port:
|
|
|
|
self.listeners.append(
|
2023-04-03 11:27:51 +02:00
|
|
|
TCPListenerConfig(
|
2020-06-16 13:44:07 +02:00
|
|
|
port=unsecure_port,
|
|
|
|
bind_addresses=[bind_host],
|
|
|
|
tls=False,
|
|
|
|
type="http",
|
|
|
|
http_options=http_options,
|
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2015-04-30 05:24:44 +02:00
|
|
|
|
2015-06-12 18:41:36 +02:00
|
|
|
manhole = config.get("manhole")
|
|
|
|
if manhole:
|
2019-06-20 11:32:02 +02:00
|
|
|
self.listeners.append(
|
2023-04-03 11:27:51 +02:00
|
|
|
TCPListenerConfig(
|
2021-02-16 23:32:34 +01:00
|
|
|
port=manhole,
|
|
|
|
bind_addresses=["127.0.0.1"],
|
|
|
|
type="manhole",
|
2020-06-16 13:44:07 +02:00
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2015-06-12 18:41:36 +02:00
|
|
|
|
2021-09-06 17:08:03 +02:00
|
|
|
manhole_settings = config.get("manhole_settings") or {}
|
|
|
|
validate_config(
|
|
|
|
_MANHOLE_SETTINGS_SCHEMA, manhole_settings, ("manhole_settings",)
|
|
|
|
)
|
|
|
|
|
|
|
|
manhole_username = manhole_settings.get("username", "matrix")
|
|
|
|
manhole_password = manhole_settings.get("password", "rabbithole")
|
|
|
|
manhole_priv_key_path = manhole_settings.get("ssh_priv_key_path")
|
|
|
|
manhole_pub_key_path = manhole_settings.get("ssh_pub_key_path")
|
|
|
|
|
|
|
|
manhole_priv_key = None
|
|
|
|
if manhole_priv_key_path is not None:
|
|
|
|
try:
|
|
|
|
manhole_priv_key = Key.fromFile(manhole_priv_key_path)
|
|
|
|
except Exception as e:
|
|
|
|
raise ConfigError(
|
|
|
|
f"Failed to read manhole private key file {manhole_priv_key_path}"
|
|
|
|
) from e
|
|
|
|
|
|
|
|
manhole_pub_key = None
|
|
|
|
if manhole_pub_key_path is not None:
|
|
|
|
try:
|
|
|
|
manhole_pub_key = Key.fromFile(manhole_pub_key_path)
|
|
|
|
except Exception as e:
|
|
|
|
raise ConfigError(
|
|
|
|
f"Failed to read manhole public key file {manhole_pub_key_path}"
|
|
|
|
) from e
|
|
|
|
|
|
|
|
self.manhole_settings = ManholeConfig(
|
|
|
|
username=manhole_username,
|
|
|
|
password=manhole_password,
|
|
|
|
priv_key=manhole_priv_key,
|
|
|
|
pub_key=manhole_pub_key,
|
|
|
|
)
|
|
|
|
|
2015-06-12 18:41:36 +02:00
|
|
|
metrics_port = config.get("metrics_port")
|
|
|
|
if metrics_port:
|
2019-09-26 13:57:01 +02:00
|
|
|
logger.warning(METRICS_PORT_WARNING)
|
2019-06-20 11:32:02 +02:00
|
|
|
|
|
|
|
self.listeners.append(
|
2023-04-03 11:27:51 +02:00
|
|
|
TCPListenerConfig(
|
2020-06-16 13:44:07 +02:00
|
|
|
port=metrics_port,
|
|
|
|
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
|
|
|
|
type="http",
|
|
|
|
http_options=HttpListenerConfig(
|
|
|
|
resources=[HttpResourceConfig(names=["metrics"])]
|
|
|
|
),
|
|
|
|
)
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2015-06-12 18:41:36 +02:00
|
|
|
|
2019-06-17 19:04:42 +02:00
|
|
|
self.cleanup_extremities_with_dummy_events = config.get(
|
2019-09-25 18:27:35 +02:00
|
|
|
"cleanup_extremities_with_dummy_events", True
|
2019-06-17 19:04:42 +02:00
|
|
|
)
|
|
|
|
|
2020-05-07 11:35:23 +02:00
|
|
|
# The number of forward extremities in a room needed to send a dummy event.
|
|
|
|
self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
|
|
|
|
|
2019-12-03 20:19:45 +01:00
|
|
|
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
|
|
|
|
|
2020-04-23 11:23:53 +02:00
|
|
|
# Inhibits the /requestToken endpoints from returning an error that might leak
|
|
|
|
# information about whether an e-mail address is in use or not on this
|
|
|
|
# homeserver, and instead return a 200 with a fake sid if this kind of error is
|
|
|
|
# met, without sending anything.
|
|
|
|
# This is a compromise between sending an email, which could be a spam vector,
|
|
|
|
# and letting the client know which email address is bound to an account and
|
|
|
|
# which one isn't.
|
|
|
|
self.request_token_inhibit_3pid_errors = config.get(
|
2021-02-16 23:32:34 +01:00
|
|
|
"request_token_inhibit_3pid_errors",
|
|
|
|
False,
|
2020-04-23 11:23:53 +02:00
|
|
|
)
|
|
|
|
|
2020-09-08 17:03:09 +02:00
|
|
|
# Whitelist of domain names that given next_link parameters must have
|
2021-07-15 12:02:43 +02:00
|
|
|
next_link_domain_whitelist: Optional[List[str]] = config.get(
|
2020-09-08 17:03:09 +02:00
|
|
|
"next_link_domain_whitelist"
|
2021-07-15 12:02:43 +02:00
|
|
|
)
|
2020-09-08 17:03:09 +02:00
|
|
|
|
2021-07-15 12:02:43 +02:00
|
|
|
self.next_link_domain_whitelist: Optional[Set[str]] = None
|
2020-09-08 17:03:09 +02:00
|
|
|
if next_link_domain_whitelist is not None:
|
|
|
|
if not isinstance(next_link_domain_whitelist, list):
|
|
|
|
raise ConfigError("'next_link_domain_whitelist' must be a list")
|
|
|
|
|
|
|
|
# Turn the list into a set to improve lookup speed.
|
|
|
|
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
|
|
|
|
|
2021-08-17 14:45:24 +02:00
|
|
|
templates_config = config.get("templates") or {}
|
|
|
|
if not isinstance(templates_config, dict):
|
|
|
|
raise ConfigError("The 'templates' section must be a dictionary")
|
|
|
|
|
2021-09-06 17:08:03 +02:00
|
|
|
self.custom_template_directory: Optional[str] = templates_config.get(
|
2021-08-17 14:45:24 +02:00
|
|
|
"custom_template_directory"
|
|
|
|
)
|
|
|
|
if self.custom_template_directory is not None and not isinstance(
|
|
|
|
self.custom_template_directory, str
|
|
|
|
):
|
|
|
|
raise ConfigError("'custom_template_directory' must be a string")
|
|
|
|
|
2022-03-24 11:19:41 +01:00
|
|
|
self.use_account_validity_in_account_status: bool = (
|
|
|
|
config.get("use_account_validity_in_account_status") or False
|
|
|
|
)
|
|
|
|
|
2022-03-30 11:43:04 +02:00
|
|
|
self.rooms_to_exclude_from_sync: List[str] = (
|
|
|
|
config.get("exclude_rooms_from_sync") or []
|
|
|
|
)
|
|
|
|
|
2022-05-27 17:47:32 +02:00
|
|
|
delete_stale_devices_after: Optional[str] = (
|
|
|
|
config.get("delete_stale_devices_after") or None
|
|
|
|
)
|
|
|
|
|
|
|
|
if delete_stale_devices_after is not None:
|
|
|
|
self.delete_stale_devices_after: Optional[int] = self.parse_duration(
|
|
|
|
delete_stale_devices_after
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.delete_stale_devices_after = None
|
|
|
|
|
2019-10-02 14:29:01 +02:00
|
|
|
def has_tls_listener(self) -> bool:
|
2023-04-03 11:27:51 +02:00
|
|
|
return any(listener.is_tls() for listener in self.listeners)
|
2019-02-11 18:57:58 +01:00
|
|
|
|
2019-06-21 14:46:39 +02:00
|
|
|
def generate_config_section(
|
2021-09-06 17:08:03 +02:00
|
|
|
self,
|
2022-04-11 18:07:23 +02:00
|
|
|
config_dir_path: str,
|
|
|
|
data_dir_path: str,
|
|
|
|
server_name: str,
|
|
|
|
open_private_ports: bool,
|
|
|
|
listeners: Optional[List[dict]],
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> str:
|
2018-07-04 19:15:03 +02:00
|
|
|
_, bind_port = parse_and_validate_server_name(server_name)
|
|
|
|
if bind_port is not None:
|
2015-04-30 05:24:44 +02:00
|
|
|
unsecure_port = bind_port - 400
|
|
|
|
else:
|
|
|
|
bind_port = 8448
|
|
|
|
unsecure_port = 8008
|
|
|
|
|
2018-12-21 16:04:57 +01:00
|
|
|
pid_file = os.path.join(data_dir_path, "homeserver.pid")
|
2019-05-23 16:00:20 +02:00
|
|
|
|
2019-08-28 14:12:22 +02:00
|
|
|
secure_listeners = []
|
|
|
|
unsecure_listeners = []
|
|
|
|
private_addresses = ["::1", "127.0.0.1"]
|
|
|
|
if listeners:
|
|
|
|
for listener in listeners:
|
|
|
|
if listener["tls"]:
|
|
|
|
secure_listeners.append(listener)
|
|
|
|
else:
|
|
|
|
# If we don't want open ports we need to bind the listeners
|
|
|
|
# to some address other than 0.0.0.0. Here we chose to use
|
|
|
|
# localhost.
|
|
|
|
# If the addresses are already bound we won't overwrite them
|
|
|
|
# however.
|
|
|
|
if not open_private_ports:
|
|
|
|
listener.setdefault("bind_addresses", private_addresses)
|
|
|
|
|
|
|
|
unsecure_listeners.append(listener)
|
|
|
|
|
|
|
|
secure_http_bindings = indent(
|
|
|
|
yaml.dump(secure_listeners), " " * 10
|
|
|
|
).lstrip()
|
|
|
|
|
|
|
|
unsecure_http_bindings = indent(
|
|
|
|
yaml.dump(unsecure_listeners), " " * 10
|
|
|
|
).lstrip()
|
|
|
|
|
|
|
|
if not unsecure_listeners:
|
|
|
|
unsecure_http_bindings = (
|
|
|
|
"""- port: %(unsecure_port)s
|
|
|
|
tls: false
|
|
|
|
type: http
|
|
|
|
x_forwarded: true"""
|
|
|
|
% locals()
|
|
|
|
)
|
|
|
|
|
|
|
|
if not open_private_ports:
|
|
|
|
unsecure_http_bindings += (
|
|
|
|
"\n bind_addresses: ['::1', '127.0.0.1']"
|
|
|
|
)
|
|
|
|
|
|
|
|
unsecure_http_bindings += """
|
|
|
|
|
|
|
|
resources:
|
|
|
|
- names: [client, federation]
|
|
|
|
compress: false"""
|
|
|
|
|
|
|
|
if listeners:
|
2022-06-14 16:53:42 +02:00
|
|
|
unsecure_http_bindings = ""
|
2019-06-21 14:46:39 +02:00
|
|
|
|
2019-08-28 14:12:22 +02:00
|
|
|
if not secure_listeners:
|
2022-06-14 16:53:42 +02:00
|
|
|
secure_http_bindings = ""
|
2019-06-21 14:46:39 +02:00
|
|
|
|
2019-06-20 11:32:02 +02:00
|
|
|
return (
|
|
|
|
"""\
|
2015-04-30 05:24:44 +02:00
|
|
|
server_name: "%(server_name)s"
|
|
|
|
pid_file: %(pid_file)s
|
2015-06-11 16:48:52 +02:00
|
|
|
listeners:
|
2019-08-28 14:12:22 +02:00
|
|
|
%(secure_http_bindings)s
|
|
|
|
%(unsecure_http_bindings)s
|
2019-06-20 11:32:02 +02:00
|
|
|
"""
|
|
|
|
% locals()
|
|
|
|
)
|
2015-04-30 05:24:44 +02:00
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
def read_arguments(self, args: argparse.Namespace) -> None:
|
2015-04-30 05:24:44 +02:00
|
|
|
if args.manhole is not None:
|
|
|
|
self.manhole = args.manhole
|
2015-05-01 15:34:48 +02:00
|
|
|
if args.daemonize is not None:
|
|
|
|
self.daemonize = args.daemonize
|
2015-08-07 17:36:42 +02:00
|
|
|
if args.print_pidfile is not None:
|
|
|
|
self.print_pidfile = args.print_pidfile
|
2014-09-03 12:57:23 +02:00
|
|
|
|
2019-07-15 14:15:34 +02:00
|
|
|
@staticmethod
|
2021-12-01 13:28:23 +01:00
|
|
|
def add_arguments(parser: argparse.ArgumentParser) -> None:
|
2014-08-31 17:06:39 +02:00
|
|
|
server_group = parser.add_argument_group("server")
|
2019-06-20 11:32:02 +02:00
|
|
|
server_group.add_argument(
|
|
|
|
"-D",
|
|
|
|
"--daemonize",
|
|
|
|
action="store_true",
|
|
|
|
default=None,
|
2019-11-12 14:08:12 +01:00
|
|
|
help="Daemonize the homeserver",
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
|
|
|
server_group.add_argument(
|
|
|
|
"--print-pidfile",
|
|
|
|
action="store_true",
|
|
|
|
default=None,
|
2019-11-21 13:00:14 +01:00
|
|
|
help="Print the path to the pidfile just before daemonizing",
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
|
|
|
server_group.add_argument(
|
|
|
|
"--manhole",
|
|
|
|
metavar="PORT",
|
|
|
|
dest="manhole",
|
|
|
|
type=int,
|
2019-11-21 13:00:14 +01:00
|
|
|
help="Turn on the twisted telnet manhole service on the given port.",
|
2019-06-20 11:32:02 +02:00
|
|
|
)
|
2016-06-16 12:06:12 +02:00
|
|
|
|
2021-12-09 17:15:46 +01:00
|
|
|
def read_gc_intervals(self, durations: Any) -> Optional[Tuple[float, float, float]]:
|
2021-05-05 17:53:45 +02:00
|
|
|
"""Reads the three durations for the GC min interval option, returning seconds."""
|
|
|
|
if durations is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
try:
|
|
|
|
if len(durations) != 3:
|
|
|
|
raise ValueError()
|
|
|
|
return (
|
|
|
|
self.parse_duration(durations[0]) / 1000,
|
|
|
|
self.parse_duration(durations[1]) / 1000,
|
|
|
|
self.parse_duration(durations[2]) / 1000,
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
raise ConfigError(
|
|
|
|
"Value of `gc_min_interval` must be a list of three durations if set"
|
|
|
|
)
|
|
|
|
|
2016-06-16 12:06:12 +02:00
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
def is_threepid_reserved(
|
|
|
|
reserved_threepids: List[JsonDict], threepid: JsonDict
|
|
|
|
) -> bool:
|
2018-08-31 18:11:11 +02:00
|
|
|
"""Check the threepid against the reserved threepid config
|
|
|
|
Args:
|
2021-12-01 13:28:23 +01:00
|
|
|
reserved_threepids: List of reserved threepids
|
|
|
|
threepid: The threepid to test for
|
2018-08-31 18:11:11 +02:00
|
|
|
|
|
|
|
Returns:
|
2021-12-01 13:28:23 +01:00
|
|
|
Is the threepid undertest reserved_user
|
2018-08-31 18:11:11 +02:00
|
|
|
"""
|
|
|
|
|
2019-01-22 17:52:29 +01:00
|
|
|
for tp in reserved_threepids:
|
2019-06-20 11:32:02 +02:00
|
|
|
if threepid["medium"] == tp["medium"] and threepid["address"] == tp["address"]:
|
2018-08-31 18:11:11 +02:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2021-12-01 13:28:23 +01:00
|
|
|
def read_gc_thresholds(
|
|
|
|
thresholds: Optional[List[Any]],
|
|
|
|
) -> Optional[Tuple[int, int, int]]:
|
2016-06-16 12:06:12 +02:00
|
|
|
"""Reads the three integer thresholds for garbage collection. Ensures that
|
|
|
|
the thresholds are integers if thresholds are supplied.
|
|
|
|
"""
|
|
|
|
if thresholds is None:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
assert len(thresholds) == 3
|
2021-09-23 12:59:07 +02:00
|
|
|
return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
|
2017-10-23 16:52:32 +02:00
|
|
|
except Exception:
|
2016-06-16 12:06:12 +02:00
|
|
|
raise ConfigError(
|
|
|
|
"Value of `gc_threshold` must be a list of three integers if set"
|
|
|
|
)
|
2018-12-11 14:18:48 +01:00
|
|
|
|
|
|
|
|
2022-09-06 09:50:02 +02:00
|
|
|
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
2020-06-16 13:44:07 +02:00
|
|
|
"""parse a listener config from the config file"""
|
2022-09-15 22:32:25 +02:00
|
|
|
if not isinstance(listener, dict):
|
|
|
|
raise ConfigError("Expected a dictionary", ("listeners", str(num)))
|
|
|
|
|
2020-06-16 13:44:07 +02:00
|
|
|
listener_type = listener["type"]
|
2022-09-06 09:50:02 +02:00
|
|
|
# Raise a helpful error if direct TCP replication is still configured.
|
|
|
|
if listener_type == "replication":
|
|
|
|
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
|
2020-06-16 13:44:07 +02:00
|
|
|
|
|
|
|
port = listener.get("port")
|
2023-04-03 11:27:51 +02:00
|
|
|
socket_path = listener.get("path")
|
|
|
|
# Either a port or a path should be declared at a minimum. Using both would be bad.
|
|
|
|
if port is not None and not isinstance(port, int):
|
2020-06-16 13:44:07 +02:00
|
|
|
raise ConfigError("Listener configuration is lacking a valid 'port' option")
|
2023-04-03 11:27:51 +02:00
|
|
|
if socket_path is not None and not isinstance(socket_path, str):
|
|
|
|
raise ConfigError("Listener configuration is lacking a valid 'path' option")
|
|
|
|
if port and socket_path:
|
|
|
|
raise ConfigError(
|
|
|
|
"Can not have both a UNIX socket and an IP/port declared for the same "
|
|
|
|
"resource!"
|
|
|
|
)
|
|
|
|
if port is None and socket_path is None:
|
|
|
|
raise ConfigError(
|
|
|
|
"Must have either a UNIX socket or an IP/port declared for a given "
|
|
|
|
"resource!"
|
|
|
|
)
|
2020-06-16 13:44:07 +02:00
|
|
|
|
|
|
|
tls = listener.get("tls", False)
|
|
|
|
|
|
|
|
http_config = None
|
|
|
|
if listener_type == "http":
|
2022-02-03 19:36:49 +01:00
|
|
|
try:
|
|
|
|
resources = [
|
|
|
|
HttpResourceConfig(**res) for res in listener.get("resources", [])
|
|
|
|
]
|
|
|
|
except ValueError as e:
|
|
|
|
raise ConfigError("Unknown listener resource") from e
|
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
# For a unix socket, default x_forwarded to True, as this is the only way of
|
|
|
|
# getting a client IP.
|
|
|
|
# Note: a reverse proxy is required anyway, as there is no way of exposing a
|
|
|
|
# unix socket to the internet.
|
2020-06-16 13:44:07 +02:00
|
|
|
http_config = HttpListenerConfig(
|
2023-04-03 11:27:51 +02:00
|
|
|
x_forwarded=listener.get("x_forwarded", (True if socket_path else False)),
|
2022-02-03 19:36:49 +01:00
|
|
|
resources=resources,
|
2020-06-16 13:44:07 +02:00
|
|
|
additional_resources=listener.get("additional_resources", {}),
|
|
|
|
tag=listener.get("tag"),
|
2022-09-15 22:32:25 +02:00
|
|
|
request_id_header=listener.get("request_id_header"),
|
2022-10-18 17:52:25 +02:00
|
|
|
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
2020-06-16 13:44:07 +02:00
|
|
|
)
|
|
|
|
|
2023-04-03 11:27:51 +02:00
|
|
|
if socket_path:
|
|
|
|
# TODO: Add in path validation, like if the directory exists and is writable?
|
|
|
|
# Set a default for the permission, in case it's left out
|
|
|
|
socket_mode = listener.get("mode", 0o666)
|
|
|
|
|
|
|
|
return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config)
|
|
|
|
|
|
|
|
else:
|
|
|
|
assert port is not None
|
|
|
|
bind_addresses = listener.get("bind_addresses", [])
|
|
|
|
bind_address = listener.get("bind_address")
|
|
|
|
# if bind_address was specified, add it to the list of addresses
|
|
|
|
if bind_address:
|
|
|
|
bind_addresses.append(bind_address)
|
|
|
|
|
|
|
|
# if we still have an empty list of addresses, use the default list
|
|
|
|
if not bind_addresses:
|
|
|
|
if listener_type == "metrics":
|
|
|
|
# the metrics listener doesn't support IPv6
|
|
|
|
bind_addresses.append("0.0.0.0")
|
|
|
|
else:
|
|
|
|
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
|
|
|
|
|
|
|
|
return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
2020-06-16 13:44:07 +02:00
|
|
|
|
|
|
|
|
2021-09-06 17:08:03 +02:00
|
|
|
_MANHOLE_SETTINGS_SCHEMA = {
|
|
|
|
"type": "object",
|
|
|
|
"properties": {
|
|
|
|
"username": {"type": "string"},
|
|
|
|
"password": {"type": "string"},
|
|
|
|
"ssh_priv_key_path": {"type": "string"},
|
|
|
|
"ssh_pub_key_path": {"type": "string"},
|
|
|
|
},
|
|
|
|
}
|