Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.16.1
commit
00c281f6a4
|
@ -80,6 +80,7 @@ echo >&2 "Running sytest with PostgreSQL";
|
||||||
--synapse-directory $WORKSPACE \
|
--synapse-directory $WORKSPACE \
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
--dendron $WORKSPACE/dendron/bin/dendron \
|
||||||
--pusher \
|
--pusher \
|
||||||
|
--synchrotron \
|
||||||
--port-base $PORT_BASE
|
--port-base $PORT_BASE
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
|
|
@ -18,10 +18,8 @@ import synapse
|
||||||
|
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.database import DatabaseConfig
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.config.logger import LoggingConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.emailconfig import EmailConfig
|
|
||||||
from synapse.config.key import KeyConfig
|
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
||||||
from synapse.storage.roommember import RoomMemberStore
|
from synapse.storage.roommember import RoomMemberStore
|
||||||
|
@ -43,98 +41,13 @@ from twisted.web.resource import Resource
|
||||||
|
|
||||||
from daemonize import Daemonize
|
from daemonize import Daemonize
|
||||||
|
|
||||||
import gc
|
|
||||||
import sys
|
import sys
|
||||||
import logging
|
import logging
|
||||||
|
import gc
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.pusher")
|
logger = logging.getLogger("synapse.app.pusher")
|
||||||
|
|
||||||
|
|
||||||
class SlaveConfig(DatabaseConfig):
|
|
||||||
def read_config(self, config):
|
|
||||||
self.replication_url = config["replication_url"]
|
|
||||||
self.server_name = config["server_name"]
|
|
||||||
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
|
||||||
"use_insecure_ssl_client_just_for_testing_do_not_use", False
|
|
||||||
)
|
|
||||||
self.user_agent_suffix = None
|
|
||||||
self.start_pushers = True
|
|
||||||
self.listeners = config["listeners"]
|
|
||||||
self.soft_file_limit = config.get("soft_file_limit")
|
|
||||||
self.daemonize = config.get("daemonize")
|
|
||||||
self.pid_file = self.abspath(config.get("pid_file"))
|
|
||||||
self.public_baseurl = config["public_baseurl"]
|
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
# some things used by the auth handler but not actually used in the
|
|
||||||
# pusher codebase
|
|
||||||
self.bcrypt_rounds = None
|
|
||||||
self.ldap_enabled = None
|
|
||||||
self.ldap_server = None
|
|
||||||
self.ldap_port = None
|
|
||||||
self.ldap_tls = None
|
|
||||||
self.ldap_search_base = None
|
|
||||||
self.ldap_search_property = None
|
|
||||||
self.ldap_email_property = None
|
|
||||||
self.ldap_full_name_property = None
|
|
||||||
|
|
||||||
# We would otherwise try to use the registration shared secret as the
|
|
||||||
# macaroon shared secret if there was no macaroon_shared_secret, but
|
|
||||||
# that means pulling in RegistrationConfig too. We don't need to be
|
|
||||||
# backwards compaitible in the pusher codebase so just make people set
|
|
||||||
# macaroon_shared_secret. We set this to None to prevent it referencing
|
|
||||||
# an undefined key.
|
|
||||||
self.registration_shared_secret = None
|
|
||||||
|
|
||||||
def default_config(self, server_name, **kwargs):
|
|
||||||
pid_file = self.abspath("pusher.pid")
|
|
||||||
return """\
|
|
||||||
# Slave configuration
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
|
||||||
|
|
||||||
server_name: "%(server_name)s"
|
|
||||||
|
|
||||||
listeners: []
|
|
||||||
# Enable a ssh manhole listener on the pusher.
|
|
||||||
# - type: manhole
|
|
||||||
# port: {manhole_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# Enable a metric listener on the pusher.
|
|
||||||
# - type: http
|
|
||||||
# port: {metrics_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# resources:
|
|
||||||
# - names: ["metrics"]
|
|
||||||
# compress: False
|
|
||||||
|
|
||||||
report_stats: False
|
|
||||||
|
|
||||||
daemonize: False
|
|
||||||
|
|
||||||
pid_file: %(pid_file)s
|
|
||||||
|
|
||||||
""" % locals()
|
|
||||||
|
|
||||||
|
|
||||||
class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class PusherSlaveStore(
|
class PusherSlaveStore(
|
||||||
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
||||||
SlavedAccountDataStore
|
SlavedAccountDataStore
|
||||||
|
@ -199,7 +112,7 @@ class PusherServer(HomeServer):
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
url = replication_url + "/remove_pushers"
|
url = replication_url + "/remove_pushers"
|
||||||
return http_client.post_json_get_json(url, {
|
return http_client.post_json_get_json(url, {
|
||||||
"remove": [{
|
"remove": [{
|
||||||
|
@ -232,8 +145,8 @@ class PusherServer(HomeServer):
|
||||||
)
|
)
|
||||||
logger.info("Synapse pusher now listening on port %d", port)
|
logger.info("Synapse pusher now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self, listeners):
|
||||||
for listener in self.config.listeners:
|
for listener in listeners:
|
||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
@ -253,7 +166,7 @@ class PusherServer(HomeServer):
|
||||||
def replicate(self):
|
def replicate(self):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
store = self.get_datastore()
|
store = self.get_datastore()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
pusher_pool = self.get_pusherpool()
|
pusher_pool = self.get_pusherpool()
|
||||||
clock = self.get_clock()
|
clock = self.get_clock()
|
||||||
|
|
||||||
|
@ -329,19 +242,30 @@ class PusherServer(HomeServer):
|
||||||
yield sleep(30)
|
yield sleep(30)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = PusherSlaveConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
"Synapse pusher", config_options
|
"Synapse pusher", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not config:
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
config.setup_logging()
|
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||||
|
|
||||||
|
if config.start_pushers:
|
||||||
|
sys.stderr.write(
|
||||||
|
"\nThe pushers must be disabled in the main synapse process"
|
||||||
|
"\nbefore they can be run in a separate worker."
|
||||||
|
"\nPlease add ``start_pushers: false`` to the main config"
|
||||||
|
"\n"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Force the pushers to start since they will be disabled in the main config
|
||||||
|
config.start_pushers = True
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
@ -354,11 +278,15 @@ def setup(config_options):
|
||||||
)
|
)
|
||||||
|
|
||||||
ps.setup()
|
ps.setup()
|
||||||
ps.start_listening()
|
ps.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
change_resource_limit(ps.config.soft_file_limit)
|
def run():
|
||||||
if ps.config.gc_thresholds:
|
with LoggingContext("run"):
|
||||||
gc.set_threshold(*ps.config.gc_thresholds)
|
logger.info("Running")
|
||||||
|
change_resource_limit(config.soft_file_limit)
|
||||||
|
if config.gc_thresholds:
|
||||||
|
gc.set_threshold(*config.gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.replicate()
|
ps.replicate()
|
||||||
|
@ -367,30 +295,20 @@ def setup(config_options):
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
return ps
|
if config.worker_daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app="synapse-pusher",
|
||||||
|
pid=config.worker_pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with LoggingContext("main"):
|
with LoggingContext("main"):
|
||||||
ps = setup(sys.argv[1:])
|
ps = start(sys.argv[1:])
|
||||||
|
|
||||||
if ps.config.daemonize:
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(ps.config.soft_file_limit)
|
|
||||||
if ps.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*ps.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-pusher",
|
|
||||||
pid=ps.config.pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
reactor.run()
|
|
||||||
|
|
|
@ -18,9 +18,8 @@ import synapse
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, PresenceState
|
from synapse.api.constants import EventTypes, PresenceState
|
||||||
from synapse.config._base import ConfigError
|
from synapse.config._base import ConfigError
|
||||||
from synapse.config.database import DatabaseConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import LoggingConfig
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.config.appservice import AppServiceConfig
|
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
from synapse.handlers.presence import PresenceHandler
|
from synapse.handlers.presence import PresenceHandler
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
@ -63,70 +62,6 @@ import ujson as json
|
||||||
logger = logging.getLogger("synapse.app.synchrotron")
|
logger = logging.getLogger("synapse.app.synchrotron")
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig):
|
|
||||||
def read_config(self, config):
|
|
||||||
self.replication_url = config["replication_url"]
|
|
||||||
self.server_name = config["server_name"]
|
|
||||||
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
|
||||||
"use_insecure_ssl_client_just_for_testing_do_not_use", False
|
|
||||||
)
|
|
||||||
self.user_agent_suffix = None
|
|
||||||
self.listeners = config["listeners"]
|
|
||||||
self.soft_file_limit = config.get("soft_file_limit")
|
|
||||||
self.daemonize = config.get("daemonize")
|
|
||||||
self.pid_file = self.abspath(config.get("pid_file"))
|
|
||||||
self.macaroon_secret_key = config["macaroon_secret_key"]
|
|
||||||
self.expire_access_token = config.get("expire_access_token", False)
|
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
def default_config(self, server_name, **kwargs):
|
|
||||||
pid_file = self.abspath("synchroton.pid")
|
|
||||||
return """\
|
|
||||||
# Slave configuration
|
|
||||||
|
|
||||||
# The replication listener on the synapse to talk to.
|
|
||||||
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
|
||||||
|
|
||||||
server_name: "%(server_name)s"
|
|
||||||
|
|
||||||
listeners:
|
|
||||||
# Enable a /sync listener on the synchrontron
|
|
||||||
#- type: http
|
|
||||||
# port: {http_port}
|
|
||||||
# bind_address: ""
|
|
||||||
# Enable a ssh manhole listener on the synchrotron
|
|
||||||
# - type: manhole
|
|
||||||
# port: {manhole_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# Enable a metric listener on the synchrotron
|
|
||||||
# - type: http
|
|
||||||
# port: {metrics_port}
|
|
||||||
# bind_address: 127.0.0.1
|
|
||||||
# resources:
|
|
||||||
# - names: ["metrics"]
|
|
||||||
# compress: False
|
|
||||||
|
|
||||||
report_stats: False
|
|
||||||
|
|
||||||
daemonize: False
|
|
||||||
|
|
||||||
pid_file: %(pid_file)s
|
|
||||||
""" % locals()
|
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronSlavedStore(
|
class SynchrotronSlavedStore(
|
||||||
SlavedPushRuleStore,
|
SlavedPushRuleStore,
|
||||||
SlavedEventStore,
|
SlavedEventStore,
|
||||||
|
@ -163,7 +98,7 @@ class SynchrotronPresence(object):
|
||||||
self.http_client = hs.get_simple_http_client()
|
self.http_client = hs.get_simple_http_client()
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.user_to_num_current_syncs = {}
|
self.user_to_num_current_syncs = {}
|
||||||
self.syncing_users_url = hs.config.replication_url + "/syncing_users"
|
self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
active_presence = self.store.take_presence_startup_info()
|
active_presence = self.store.take_presence_startup_info()
|
||||||
|
@ -350,8 +285,8 @@ class SynchrotronServer(HomeServer):
|
||||||
)
|
)
|
||||||
logger.info("Synapse synchrotron now listening on port %d", port)
|
logger.info("Synapse synchrotron now listening on port %d", port)
|
||||||
|
|
||||||
def start_listening(self):
|
def start_listening(self, listeners):
|
||||||
for listener in self.config.listeners:
|
for listener in listeners:
|
||||||
if listener["type"] == "http":
|
if listener["type"] == "http":
|
||||||
self._listen_http(listener)
|
self._listen_http(listener)
|
||||||
elif listener["type"] == "manhole":
|
elif listener["type"] == "manhole":
|
||||||
|
@ -371,7 +306,7 @@ class SynchrotronServer(HomeServer):
|
||||||
def replicate(self):
|
def replicate(self):
|
||||||
http_client = self.get_simple_http_client()
|
http_client = self.get_simple_http_client()
|
||||||
store = self.get_datastore()
|
store = self.get_datastore()
|
||||||
replication_url = self.config.replication_url
|
replication_url = self.config.worker_replication_url
|
||||||
clock = self.get_clock()
|
clock = self.get_clock()
|
||||||
notifier = self.get_notifier()
|
notifier = self.get_notifier()
|
||||||
presence_handler = self.get_presence_handler()
|
presence_handler = self.get_presence_handler()
|
||||||
|
@ -470,19 +405,18 @@ class SynchrotronServer(HomeServer):
|
||||||
return SynchrotronTyping(self)
|
return SynchrotronTyping(self)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def start(config_options):
|
||||||
try:
|
try:
|
||||||
config = SynchrotronConfig.load_config(
|
config = HomeServerConfig.load_config(
|
||||||
"Synapse synchrotron", config_options
|
"Synapse synchrotron", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + e.message + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not config:
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
config.setup_logging()
|
setup_logging(config.worker_log_config, config.worker_log_file)
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
|
||||||
|
@ -496,11 +430,15 @@ def setup(config_options):
|
||||||
)
|
)
|
||||||
|
|
||||||
ss.setup()
|
ss.setup()
|
||||||
ss.start_listening()
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
change_resource_limit(ss.config.soft_file_limit)
|
def run():
|
||||||
if ss.config.gc_thresholds:
|
with LoggingContext("run"):
|
||||||
ss.set_threshold(*ss.config.gc_thresholds)
|
logger.info("Running")
|
||||||
|
change_resource_limit(config.soft_file_limit)
|
||||||
|
if config.gc_thresholds:
|
||||||
|
gc.set_threshold(*config.gc_thresholds)
|
||||||
|
reactor.run()
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
@ -508,30 +446,20 @@ def setup(config_options):
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
return ss
|
if config.worker_daemonize:
|
||||||
|
daemon = Daemonize(
|
||||||
|
app="synapse-synchrotron",
|
||||||
|
pid=config.worker_pid_file,
|
||||||
|
action=run,
|
||||||
|
auto_close_fds=False,
|
||||||
|
verbose=True,
|
||||||
|
logger=logger,
|
||||||
|
)
|
||||||
|
daemon.start()
|
||||||
|
else:
|
||||||
|
run()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with LoggingContext("main"):
|
with LoggingContext("main"):
|
||||||
ss = setup(sys.argv[1:])
|
start(sys.argv[1:])
|
||||||
|
|
||||||
if ss.config.daemonize:
|
|
||||||
def run():
|
|
||||||
with LoggingContext("run"):
|
|
||||||
change_resource_limit(ss.config.soft_file_limit)
|
|
||||||
if ss.config.gc_thresholds:
|
|
||||||
gc.set_threshold(*ss.config.gc_thresholds)
|
|
||||||
reactor.run()
|
|
||||||
|
|
||||||
daemon = Daemonize(
|
|
||||||
app="synapse-synchrotron",
|
|
||||||
pid=ss.config.pid_file,
|
|
||||||
action=run,
|
|
||||||
auto_close_fds=False,
|
|
||||||
verbose=True,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
reactor.run()
|
|
||||||
|
|
|
@ -32,13 +32,15 @@ from .password import PasswordConfig
|
||||||
from .jwt import JWTConfig
|
from .jwt import JWTConfig
|
||||||
from .ldap import LDAPConfig
|
from .ldap import LDAPConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
|
from .workers import WorkerConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,):
|
JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
|
||||||
|
WorkerConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -126,54 +126,58 @@ class LoggingConfig(Config):
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_logging(self):
|
def setup_logging(self):
|
||||||
log_format = (
|
setup_logging(self.log_config, self.log_file, self.verbosity)
|
||||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
|
||||||
" - %(message)s"
|
|
||||||
)
|
|
||||||
if self.log_config is None:
|
|
||||||
|
|
||||||
level = logging.INFO
|
|
||||||
level_for_storage = logging.INFO
|
|
||||||
if self.verbosity:
|
|
||||||
level = logging.DEBUG
|
|
||||||
if self.verbosity > 1:
|
|
||||||
level_for_storage = logging.DEBUG
|
|
||||||
|
|
||||||
# FIXME: we need a logging.WARN for a -q quiet option
|
def setup_logging(log_config=None, log_file=None, verbosity=None):
|
||||||
logger = logging.getLogger('')
|
log_format = (
|
||||||
logger.setLevel(level)
|
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
|
||||||
|
" - %(message)s"
|
||||||
|
)
|
||||||
|
if log_config is None:
|
||||||
|
|
||||||
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
level = logging.INFO
|
||||||
|
level_for_storage = logging.INFO
|
||||||
|
if verbosity:
|
||||||
|
level = logging.DEBUG
|
||||||
|
if verbosity > 1:
|
||||||
|
level_for_storage = logging.DEBUG
|
||||||
|
|
||||||
formatter = logging.Formatter(log_format)
|
# FIXME: we need a logging.WARN for a -q quiet option
|
||||||
if self.log_file:
|
logger = logging.getLogger('')
|
||||||
# TODO: Customisable file size / backup count
|
logger.setLevel(level)
|
||||||
handler = logging.handlers.RotatingFileHandler(
|
|
||||||
self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
|
||||||
)
|
|
||||||
|
|
||||||
def sighup(signum, stack):
|
logging.getLogger('synapse.storage').setLevel(level_for_storage)
|
||||||
logger.info("Closing log file due to SIGHUP")
|
|
||||||
handler.doRollover()
|
|
||||||
logger.info("Opened new log file due to SIGHUP")
|
|
||||||
|
|
||||||
# TODO(paul): obviously this is a terrible mechanism for
|
formatter = logging.Formatter(log_format)
|
||||||
# stealing SIGHUP, because it means no other part of synapse
|
if log_file:
|
||||||
# can use it instead. If we want to catch SIGHUP anywhere
|
# TODO: Customisable file size / backup count
|
||||||
# else as well, I'd suggest we find a nicer way to broadcast
|
handler = logging.handlers.RotatingFileHandler(
|
||||||
# it around.
|
log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
|
||||||
if getattr(signal, "SIGHUP"):
|
)
|
||||||
signal.signal(signal.SIGHUP, sighup)
|
|
||||||
else:
|
|
||||||
handler = logging.StreamHandler()
|
|
||||||
handler.setFormatter(formatter)
|
|
||||||
|
|
||||||
handler.addFilter(LoggingContextFilter(request=""))
|
def sighup(signum, stack):
|
||||||
|
logger.info("Closing log file due to SIGHUP")
|
||||||
|
handler.doRollover()
|
||||||
|
logger.info("Opened new log file due to SIGHUP")
|
||||||
|
|
||||||
logger.addHandler(handler)
|
# TODO(paul): obviously this is a terrible mechanism for
|
||||||
|
# stealing SIGHUP, because it means no other part of synapse
|
||||||
|
# can use it instead. If we want to catch SIGHUP anywhere
|
||||||
|
# else as well, I'd suggest we find a nicer way to broadcast
|
||||||
|
# it around.
|
||||||
|
if getattr(signal, "SIGHUP"):
|
||||||
|
signal.signal(signal.SIGHUP, sighup)
|
||||||
else:
|
else:
|
||||||
with open(self.log_config, 'r') as f:
|
handler = logging.StreamHandler()
|
||||||
logging.config.dictConfig(yaml.load(f))
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
observer = PythonLoggingObserver()
|
handler.addFilter(LoggingContextFilter(request=""))
|
||||||
observer.start()
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
else:
|
||||||
|
with open(log_config, 'r') as f:
|
||||||
|
logging.config.dictConfig(yaml.load(f))
|
||||||
|
|
||||||
|
observer = PythonLoggingObserver()
|
||||||
|
observer.start()
|
||||||
|
|
|
@ -27,7 +27,7 @@ class ServerConfig(Config):
|
||||||
self.daemonize = config.get("daemonize")
|
self.daemonize = config.get("daemonize")
|
||||||
self.print_pidfile = config.get("print_pidfile")
|
self.print_pidfile = config.get("print_pidfile")
|
||||||
self.user_agent_suffix = config.get("user_agent_suffix")
|
self.user_agent_suffix = config.get("user_agent_suffix")
|
||||||
self.use_frozen_dicts = config.get("use_frozen_dicts", True)
|
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
|
||||||
self.public_baseurl = config.get("public_baseurl")
|
self.public_baseurl = config.get("public_baseurl")
|
||||||
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
|
||||||
|
|
||||||
|
@ -38,19 +38,7 @@ class ServerConfig(Config):
|
||||||
|
|
||||||
self.listeners = config.get("listeners", [])
|
self.listeners = config.get("listeners", [])
|
||||||
|
|
||||||
thresholds = config.get("gc_thresholds", None)
|
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||||
if thresholds is not None:
|
|
||||||
try:
|
|
||||||
assert len(thresholds) == 3
|
|
||||||
self.gc_thresholds = (
|
|
||||||
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
raise ConfigError(
|
|
||||||
"Value of `gc_threshold` must be a list of three integers if set"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.gc_thresholds = None
|
|
||||||
|
|
||||||
bind_port = config.get("bind_port")
|
bind_port = config.get("bind_port")
|
||||||
if bind_port:
|
if bind_port:
|
||||||
|
@ -264,3 +252,20 @@ class ServerConfig(Config):
|
||||||
type=int,
|
type=int,
|
||||||
help="Turn on the twisted telnet manhole"
|
help="Turn on the twisted telnet manhole"
|
||||||
" service on the given port.")
|
" service on the given port.")
|
||||||
|
|
||||||
|
|
||||||
|
def read_gc_thresholds(thresholds):
|
||||||
|
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||||
|
the thresholds are integers if thresholds are supplied.
|
||||||
|
"""
|
||||||
|
if thresholds is None:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
assert len(thresholds) == 3
|
||||||
|
return (
|
||||||
|
int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
|
||||||
|
)
|
||||||
|
except:
|
||||||
|
raise ConfigError(
|
||||||
|
"Value of `gc_threshold` must be a list of three integers if set"
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 matrix.org
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerConfig(Config):
|
||||||
|
"""The workers are processes run separately to the main synapse process.
|
||||||
|
They have their own pid_file and listener configuration. They use the
|
||||||
|
replication_url to talk to the main synapse process."""
|
||||||
|
|
||||||
|
def read_config(self, config):
|
||||||
|
self.worker_app = config.get("worker_app")
|
||||||
|
self.worker_listeners = config.get("worker_listeners")
|
||||||
|
self.worker_daemonize = config.get("worker_daemonize")
|
||||||
|
self.worker_pid_file = config.get("worker_pid_file")
|
||||||
|
self.worker_log_file = config.get("worker_log_file")
|
||||||
|
self.worker_log_config = config.get("worker_log_config")
|
||||||
|
self.worker_replication_url = config.get("worker_replication_url")
|
|
@ -193,13 +193,16 @@ class FederationServer(FederationBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
for event in auth_chain:
|
for event in auth_chain:
|
||||||
event.signatures.update(
|
# We sign these again because there was a bug where we
|
||||||
compute_event_signature(
|
# incorrectly signed things the first time round
|
||||||
event,
|
if self.hs.is_mine_id(event.event_id):
|
||||||
self.hs.hostname,
|
event.signatures.update(
|
||||||
self.hs.config.signing_key[0]
|
compute_event_signature(
|
||||||
|
event,
|
||||||
|
self.hs.hostname,
|
||||||
|
self.hs.config.signing_key[0]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("Specify an event")
|
raise NotImplementedError("Specify an event")
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ class TransportLayerServer(JsonResource):
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.clock = hs.get_clock()
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
super(TransportLayerServer, self).__init__(hs)
|
super(TransportLayerServer, self).__init__(hs, canonical_json=False)
|
||||||
|
|
||||||
self.authenticator = Authenticator(hs)
|
self.authenticator = Authenticator(hs)
|
||||||
self.ratelimiter = FederationRateLimiter(
|
self.ratelimiter = FederationRateLimiter(
|
||||||
|
|
|
@ -626,6 +626,6 @@ class AuthHandler(BaseHandler):
|
||||||
Whether self.hash(password) == stored_hash (bool).
|
Whether self.hash(password) == stored_hash (bool).
|
||||||
"""
|
"""
|
||||||
if stored_hash:
|
if stored_hash:
|
||||||
return bcrypt.hashpw(password, stored_hash) == stored_hash
|
return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -1018,13 +1018,16 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
res = results.values()
|
res = results.values()
|
||||||
for event in res:
|
for event in res:
|
||||||
event.signatures.update(
|
# We sign these again because there was a bug where we
|
||||||
compute_event_signature(
|
# incorrectly signed things the first time round
|
||||||
event,
|
if self.hs.is_mine_id(event.event_id):
|
||||||
self.hs.hostname,
|
event.signatures.update(
|
||||||
self.hs.config.signing_key[0]
|
compute_event_signature(
|
||||||
|
event,
|
||||||
|
self.hs.hostname,
|
||||||
|
self.hs.config.signing_key[0]
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler):
|
||||||
"profile", self.on_profile_query
|
"profile", self.on_profile_query
|
||||||
)
|
)
|
||||||
|
|
||||||
distributor = hs.get_distributor()
|
|
||||||
|
|
||||||
distributor.observe("registered_user", self.registered_user)
|
|
||||||
|
|
||||||
def registered_user(self, user):
|
|
||||||
return self.store.create_profile(user.localpart)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_displayname(self, target_user):
|
def get_displayname(self, target_user):
|
||||||
if self.hs.is_mine(target_user):
|
if self.hs.is_mine(target_user):
|
||||||
|
|
|
@ -23,7 +23,6 @@ from synapse.api.errors import (
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
from synapse.util.async import run_on_reactor
|
from synapse.util.async import run_on_reactor
|
||||||
from synapse.http.client import CaptchaServerHttpClient
|
from synapse.http.client import CaptchaServerHttpClient
|
||||||
from synapse.util.distributor import registered_user
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
import urllib
|
||||||
|
@ -37,8 +36,6 @@ class RegistrationHandler(BaseHandler):
|
||||||
super(RegistrationHandler, self).__init__(hs)
|
super(RegistrationHandler, self).__init__(hs)
|
||||||
|
|
||||||
self.auth = hs.get_auth()
|
self.auth = hs.get_auth()
|
||||||
self.distributor = hs.get_distributor()
|
|
||||||
self.distributor.declare("registered_user")
|
|
||||||
self.captcha_client = CaptchaServerHttpClient(hs)
|
self.captcha_client = CaptchaServerHttpClient(hs)
|
||||||
|
|
||||||
self._next_generated_user_id = None
|
self._next_generated_user_id = None
|
||||||
|
@ -140,9 +137,11 @@ class RegistrationHandler(BaseHandler):
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
|
create_profile_with_localpart=(
|
||||||
|
# If the user was a guest then they already have a profile
|
||||||
|
None if was_guest else user.localpart
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
else:
|
else:
|
||||||
# autogen a sequential user ID
|
# autogen a sequential user ID
|
||||||
attempts = 0
|
attempts = 0
|
||||||
|
@ -160,7 +159,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
make_guest=make_guest
|
make_guest=make_guest,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
# if user id is taken, just generate another
|
# if user id is taken, just generate another
|
||||||
|
@ -168,7 +168,6 @@ class RegistrationHandler(BaseHandler):
|
||||||
user_id = None
|
user_id = None
|
||||||
token = None
|
token = None
|
||||||
attempts += 1
|
attempts += 1
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
|
|
||||||
# We used to generate default identicons here, but nowadays
|
# We used to generate default identicons here, but nowadays
|
||||||
# we want clients to generate their own as part of their branding
|
# we want clients to generate their own as part of their branding
|
||||||
|
@ -201,8 +200,8 @@ class RegistrationHandler(BaseHandler):
|
||||||
token=token,
|
token=token,
|
||||||
password_hash="",
|
password_hash="",
|
||||||
appservice_id=service_id,
|
appservice_id=service_id,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
defer.returnValue((user_id, token))
|
defer.returnValue((user_id, token))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -248,9 +247,9 @@ class RegistrationHandler(BaseHandler):
|
||||||
yield self.store.register(
|
yield self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=None
|
password_hash=None,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield self.store.add_access_token_to_user(user_id, token)
|
yield self.store.add_access_token_to_user(user_id, token)
|
||||||
# Ignore Registration errors
|
# Ignore Registration errors
|
||||||
|
@ -395,10 +394,9 @@ class RegistrationHandler(BaseHandler):
|
||||||
yield self.store.register(
|
yield self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
token=token,
|
||||||
password_hash=None
|
password_hash=None,
|
||||||
|
create_profile_with_localpart=user.localpart,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield registered_user(self.distributor, user)
|
|
||||||
else:
|
else:
|
||||||
yield self.store.user_delete_access_tokens(user_id=user_id)
|
yield self.store.user_delete_access_tokens(user_id=user_id)
|
||||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
||||||
|
|
|
@ -273,16 +273,16 @@ class Mailer(object):
|
||||||
|
|
||||||
sender_state_event = room_state[("m.room.member", event.sender)]
|
sender_state_event = room_state[("m.room.member", event.sender)]
|
||||||
sender_name = name_from_member_event(sender_state_event)
|
sender_name = name_from_member_event(sender_state_event)
|
||||||
sender_avatar_url = None
|
sender_avatar_url = sender_state_event.content.get("avatar_url")
|
||||||
if "avatar_url" in sender_state_event.content:
|
|
||||||
sender_avatar_url = sender_state_event.content["avatar_url"]
|
|
||||||
|
|
||||||
# 'hash' for deterministically picking default images: use
|
# 'hash' for deterministically picking default images: use
|
||||||
# sender_hash % the number of default images to choose from
|
# sender_hash % the number of default images to choose from
|
||||||
sender_hash = string_ordinal_total(event.sender)
|
sender_hash = string_ordinal_total(event.sender)
|
||||||
|
|
||||||
|
msgtype = event.content.get("msgtype")
|
||||||
|
|
||||||
ret = {
|
ret = {
|
||||||
"msgtype": event.content["msgtype"],
|
"msgtype": msgtype,
|
||||||
"is_historical": event.event_id != notif['event_id'],
|
"is_historical": event.event_id != notif['event_id'],
|
||||||
"id": event.event_id,
|
"id": event.event_id,
|
||||||
"ts": event.origin_server_ts,
|
"ts": event.origin_server_ts,
|
||||||
|
@ -291,9 +291,9 @@ class Mailer(object):
|
||||||
"sender_hash": sender_hash,
|
"sender_hash": sender_hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
if event.content["msgtype"] == "m.text":
|
if msgtype == "m.text":
|
||||||
self.add_text_message_vars(ret, event)
|
self.add_text_message_vars(ret, event)
|
||||||
elif event.content["msgtype"] == "m.image":
|
elif msgtype == "m.image":
|
||||||
self.add_image_message_vars(ret, event)
|
self.add_image_message_vars(ret, event)
|
||||||
|
|
||||||
if "body" in event.content:
|
if "body" in event.content:
|
||||||
|
@ -302,16 +302,17 @@ class Mailer(object):
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def add_text_message_vars(self, messagevars, event):
|
def add_text_message_vars(self, messagevars, event):
|
||||||
if "format" in event.content:
|
msgformat = event.content.get("format")
|
||||||
msgformat = event.content["format"]
|
|
||||||
else:
|
|
||||||
msgformat = None
|
|
||||||
messagevars["format"] = msgformat
|
messagevars["format"] = msgformat
|
||||||
|
|
||||||
if msgformat == "org.matrix.custom.html":
|
formatted_body = event.content.get("formatted_body")
|
||||||
messagevars["body_text_html"] = safe_markup(event.content["formatted_body"])
|
body = event.content.get("body")
|
||||||
else:
|
|
||||||
messagevars["body_text_html"] = safe_text(event.content["body"])
|
if msgformat == "org.matrix.custom.html" and formatted_body:
|
||||||
|
messagevars["body_text_html"] = safe_markup(formatted_body)
|
||||||
|
elif body:
|
||||||
|
messagevars["body_text_html"] = safe_text(body)
|
||||||
|
|
||||||
return messagevars
|
return messagevars
|
||||||
|
|
||||||
|
|
|
@ -252,7 +252,8 @@ class PreviewUrlResource(Resource):
|
||||||
|
|
||||||
og = {}
|
og = {}
|
||||||
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
|
||||||
og[tag.attrib['property']] = tag.attrib['content']
|
if 'content' in tag.attrib:
|
||||||
|
og[tag.attrib['property']] = tag.attrib['content']
|
||||||
|
|
||||||
# TODO: grab article: meta tags too, e.g.:
|
# TODO: grab article: meta tags too, e.g.:
|
||||||
|
|
||||||
|
@ -279,7 +280,7 @@ class PreviewUrlResource(Resource):
|
||||||
# TODO: consider inlined CSS styles as well as width & height attribs
|
# TODO: consider inlined CSS styles as well as width & height attribs
|
||||||
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
|
||||||
images = sorted(images, key=lambda i: (
|
images = sorted(images, key=lambda i: (
|
||||||
-1 * int(i.attrib['width']) * int(i.attrib['height'])
|
-1 * float(i.attrib['width']) * float(i.attrib['height'])
|
||||||
))
|
))
|
||||||
if not images:
|
if not images:
|
||||||
images = tree.xpath("//img[@src]")
|
images = tree.xpath("//img[@src]")
|
||||||
|
@ -287,9 +288,9 @@ class PreviewUrlResource(Resource):
|
||||||
og['og:image'] = images[0].attrib['src']
|
og['og:image'] = images[0].attrib['src']
|
||||||
|
|
||||||
# pre-cache the image for posterity
|
# pre-cache the image for posterity
|
||||||
# FIXME: it might be cleaner to use the same flow as the main /preview_url request
|
# FIXME: it might be cleaner to use the same flow as the main /preview_url
|
||||||
# itself and benefit from the same caching etc. But for now we just rely on the
|
# request itself and benefit from the same caching etc. But for now we
|
||||||
# caching on the master request to speed things up.
|
# just rely on the caching on the master request to speed things up.
|
||||||
if 'og:image' in og and og['og:image']:
|
if 'og:image' in og and og['og:image']:
|
||||||
image_info = yield self._download_url(
|
image_info = yield self._download_url(
|
||||||
self._rebase_url(og['og:image'], media_info['uri']), requester.user
|
self._rebase_url(og['og:image'], media_info['uri']), requester.user
|
||||||
|
|
|
@ -76,7 +76,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def register(self, user_id, token, password_hash,
|
def register(self, user_id, token, password_hash,
|
||||||
was_guest=False, make_guest=False, appservice_id=None):
|
was_guest=False, make_guest=False, appservice_id=None,
|
||||||
|
create_profile_with_localpart=None):
|
||||||
"""Attempts to register an account.
|
"""Attempts to register an account.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -88,6 +89,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
make_guest (boolean): True if the the new user should be guest,
|
make_guest (boolean): True if the the new user should be guest,
|
||||||
false to add a regular user account.
|
false to add a regular user account.
|
||||||
appservice_id (str): The ID of the appservice registering the user.
|
appservice_id (str): The ID of the appservice registering the user.
|
||||||
|
create_profile_with_localpart (str): Optionally create a profile for
|
||||||
|
the given localpart.
|
||||||
Raises:
|
Raises:
|
||||||
StoreError if the user_id could not be registered.
|
StoreError if the user_id could not be registered.
|
||||||
"""
|
"""
|
||||||
|
@ -99,7 +102,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
password_hash,
|
password_hash,
|
||||||
was_guest,
|
was_guest,
|
||||||
make_guest,
|
make_guest,
|
||||||
appservice_id
|
appservice_id,
|
||||||
|
create_profile_with_localpart,
|
||||||
)
|
)
|
||||||
self.get_user_by_id.invalidate((user_id,))
|
self.get_user_by_id.invalidate((user_id,))
|
||||||
self.is_guest.invalidate((user_id,))
|
self.is_guest.invalidate((user_id,))
|
||||||
|
@ -112,7 +116,8 @@ class RegistrationStore(SQLBaseStore):
|
||||||
password_hash,
|
password_hash,
|
||||||
was_guest,
|
was_guest,
|
||||||
make_guest,
|
make_guest,
|
||||||
appservice_id
|
appservice_id,
|
||||||
|
create_profile_with_localpart,
|
||||||
):
|
):
|
||||||
now = int(self.clock.time())
|
now = int(self.clock.time())
|
||||||
|
|
||||||
|
@ -157,6 +162,12 @@ class RegistrationStore(SQLBaseStore):
|
||||||
(next_id, user_id, token,)
|
(next_id, user_id, token,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if create_profile_with_localpart:
|
||||||
|
txn.execute(
|
||||||
|
"INSERT INTO profiles(user_id) VALUES (?)",
|
||||||
|
(create_profile_with_localpart,)
|
||||||
|
)
|
||||||
|
|
||||||
@cached()
|
@cached()
|
||||||
def get_user_by_id(self, user_id):
|
def get_user_by_id(self, user_id):
|
||||||
return self._simple_select_one(
|
return self._simple_select_one(
|
||||||
|
|
|
@ -27,10 +27,6 @@ import logging
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def registered_user(distributor, user):
|
|
||||||
return distributor.fire("registered_user", user)
|
|
||||||
|
|
||||||
|
|
||||||
def user_left_room(distributor, user, room_id):
|
def user_left_room(distributor, user, room_id):
|
||||||
return preserve_context_over_fn(
|
return preserve_context_over_fn(
|
||||||
distributor.fire,
|
distributor.fire,
|
||||||
|
|
|
@ -54,6 +54,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
|
||||||
config.trusted_third_party_id_servers = []
|
config.trusted_third_party_id_servers = []
|
||||||
config.room_invite_state_types = []
|
config.room_invite_state_types = []
|
||||||
|
|
||||||
|
config.use_frozen_dicts = True
|
||||||
config.database_config = {"name": "sqlite3"}
|
config.database_config = {"name": "sqlite3"}
|
||||||
|
|
||||||
if "clock" not in kargs:
|
if "clock" not in kargs:
|
||||||
|
|
Loading…
Reference in New Issue