2016-04-21 18:21:02 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# Copyright 2016 OpenMarket Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import synapse
|
|
|
|
|
|
|
|
from synapse.server import HomeServer
|
|
|
|
from synapse.config._base import ConfigError
|
|
|
|
from synapse.config.database import DatabaseConfig
|
|
|
|
from synapse.config.logger import LoggingConfig
|
2016-05-04 15:52:10 +02:00
|
|
|
from synapse.config.emailconfig import EmailConfig
|
2016-04-25 18:34:25 +02:00
|
|
|
from synapse.http.site import SynapseSite
|
|
|
|
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
|
2016-05-13 12:36:50 +02:00
|
|
|
from synapse.storage.roommember import RoomMemberStore
|
2016-04-21 18:21:02 +02:00
|
|
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
|
|
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
|
|
|
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
2016-05-13 18:01:28 +02:00
|
|
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
2016-04-21 18:21:02 +02:00
|
|
|
from synapse.storage.engines import create_engine
|
|
|
|
from synapse.storage import DataStore
|
|
|
|
from synapse.util.async import sleep
|
2016-04-25 18:34:25 +02:00
|
|
|
from synapse.util.httpresourcetree import create_resource_tree
|
|
|
|
from synapse.util.logcontext import LoggingContext, preserve_fn
|
|
|
|
from synapse.util.manhole import manhole
|
|
|
|
from synapse.util.rlimit import change_resource_limit
|
|
|
|
from synapse.util.versionstring import get_version_string
|
2016-04-21 18:21:02 +02:00
|
|
|
|
|
|
|
from twisted.internet import reactor, defer
|
2016-04-25 18:34:25 +02:00
|
|
|
from twisted.web.resource import Resource
|
2016-04-21 18:21:02 +02:00
|
|
|
|
2016-04-26 16:37:41 +02:00
|
|
|
from daemonize import Daemonize
|
|
|
|
|
2016-04-21 18:21:02 +02:00
|
|
|
import sys
|
|
|
|
import logging
|
|
|
|
|
|
|
|
logger = logging.getLogger("synapse.app.pusher")
|
|
|
|
|
|
|
|
|
|
|
|
class SlaveConfig(DatabaseConfig):
|
|
|
|
def read_config(self, config):
|
|
|
|
self.replication_url = config["replication_url"]
|
|
|
|
self.server_name = config["server_name"]
|
|
|
|
self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
|
|
|
|
"use_insecure_ssl_client_just_for_testing_do_not_use", False
|
|
|
|
)
|
|
|
|
self.user_agent_suffix = None
|
|
|
|
self.start_pushers = True
|
2016-04-25 18:34:25 +02:00
|
|
|
self.listeners = config["listeners"]
|
|
|
|
self.soft_file_limit = config.get("soft_file_limit")
|
2016-04-26 16:37:41 +02:00
|
|
|
self.daemonize = config.get("daemonize")
|
|
|
|
self.pid_file = self.abspath(config.get("pid_file"))
|
2016-05-13 12:36:50 +02:00
|
|
|
self.public_baseurl = config["public_baseurl"]
|
2016-04-21 18:21:02 +02:00
|
|
|
|
2016-04-26 16:37:41 +02:00
|
|
|
def default_config(self, server_name, **kwargs):
|
|
|
|
pid_file = self.abspath("pusher.pid")
|
2016-04-21 18:21:02 +02:00
|
|
|
return """\
|
2016-04-26 16:37:41 +02:00
|
|
|
# Slave configuration
|
|
|
|
|
2016-04-25 18:34:25 +02:00
|
|
|
# The replication listener on the synapse to talk to.
|
2016-04-21 18:21:02 +02:00
|
|
|
#replication_url: https://localhost:{replication_port}/_synapse/replication
|
|
|
|
|
2016-04-26 16:37:41 +02:00
|
|
|
server_name: "%(server_name)s"
|
|
|
|
|
2016-04-25 18:34:25 +02:00
|
|
|
listeners: []
|
2016-04-25 18:56:24 +02:00
|
|
|
# Enable a ssh manhole listener on the pusher.
|
2016-04-25 18:34:25 +02:00
|
|
|
# - type: manhole
|
|
|
|
# port: {manhole_port}
|
|
|
|
# bind_address: 127.0.0.1
|
2016-04-25 18:56:24 +02:00
|
|
|
# Enable a metric listener on the pusher.
|
2016-04-25 18:34:25 +02:00
|
|
|
# - type: http
|
|
|
|
# port: {metrics_port}
|
|
|
|
# bind_address: 127.0.0.1
|
|
|
|
# resources:
|
2016-04-26 18:06:04 +02:00
|
|
|
# - names: ["metrics"]
|
2016-04-25 18:34:25 +02:00
|
|
|
# compress: False
|
|
|
|
|
2016-04-21 18:21:02 +02:00
|
|
|
report_stats: False
|
2016-04-26 16:37:41 +02:00
|
|
|
|
|
|
|
daemonize: False
|
|
|
|
|
|
|
|
pid_file: %(pid_file)s
|
|
|
|
|
|
|
|
""" % locals()
|
2016-04-21 18:21:02 +02:00
|
|
|
|
|
|
|
|
2016-05-04 15:52:10 +02:00
|
|
|
class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig):
|
2016-04-21 18:21:02 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
class PusherSlaveStore(
|
2016-05-13 18:01:28 +02:00
|
|
|
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
|
|
|
|
SlavedAccountDataStore
|
2016-04-21 18:21:02 +02:00
|
|
|
):
|
|
|
|
update_pusher_last_stream_ordering_and_success = (
|
|
|
|
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
|
|
|
)
|
|
|
|
|
2016-04-26 11:45:02 +02:00
|
|
|
update_pusher_failing_since = (
|
|
|
|
DataStore.update_pusher_failing_since.__func__
|
|
|
|
)
|
|
|
|
|
|
|
|
update_pusher_last_stream_ordering = (
|
|
|
|
DataStore.update_pusher_last_stream_ordering.__func__
|
|
|
|
)
|
|
|
|
|
2016-05-04 12:47:59 +02:00
|
|
|
get_throttle_params_by_room = (
|
|
|
|
DataStore.get_throttle_params_by_room.__func__
|
|
|
|
)
|
|
|
|
|
|
|
|
set_throttle_params = (
|
|
|
|
DataStore.set_throttle_params.__func__
|
|
|
|
)
|
|
|
|
|
|
|
|
get_time_of_last_push_action_before = (
|
|
|
|
DataStore.get_time_of_last_push_action_before.__func__
|
|
|
|
)
|
|
|
|
|
2016-05-10 18:51:14 +02:00
|
|
|
get_profile_displayname = (
|
|
|
|
DataStore.get_profile_displayname.__func__
|
|
|
|
)
|
|
|
|
|
2016-05-13 18:16:27 +02:00
|
|
|
# XXX: This is a bit broken because we don't persist forgotten rooms
|
|
|
|
# in a way that they can be streamed. This means that we don't have a
|
|
|
|
# way to invalidate the forgotten rooms cache correctly.
|
|
|
|
# For now we expire the cache every 10 minutes.
|
|
|
|
BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
|
2016-05-13 12:36:50 +02:00
|
|
|
who_forgot_in_room = (
|
|
|
|
RoomMemberStore.__dict__["who_forgot_in_room"]
|
|
|
|
)
|
|
|
|
|
2016-04-21 18:21:02 +02:00
|
|
|
|
|
|
|
class PusherServer(HomeServer):
|
|
|
|
|
|
|
|
def get_db_conn(self, run_new_connection=True):
|
|
|
|
# Any param beginning with cp_ is a parameter for adbapi, and should
|
|
|
|
# not be passed to the database engine.
|
|
|
|
db_params = {
|
|
|
|
k: v for k, v in self.db_config.get("args", {}).items()
|
|
|
|
if not k.startswith("cp_")
|
|
|
|
}
|
|
|
|
db_conn = self.database_engine.module.connect(**db_params)
|
|
|
|
|
|
|
|
if run_new_connection:
|
|
|
|
self.database_engine.on_new_connection(db_conn)
|
|
|
|
return db_conn
|
|
|
|
|
|
|
|
def setup(self):
|
|
|
|
logger.info("Setting up.")
|
|
|
|
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
|
|
|
logger.info("Finished setting up.")
|
|
|
|
|
|
|
|
def remove_pusher(self, app_id, push_key, user_id):
|
|
|
|
http_client = self.get_simple_http_client()
|
|
|
|
replication_url = self.config.replication_url
|
|
|
|
url = replication_url + "/remove_pushers"
|
|
|
|
return http_client.post_json_get_json(url, {
|
|
|
|
"remove": [{
|
|
|
|
"app_id": app_id,
|
|
|
|
"push_key": push_key,
|
|
|
|
"user_id": user_id,
|
|
|
|
}]
|
|
|
|
})
|
|
|
|
|
2016-04-25 18:34:25 +02:00
|
|
|
def _listen_http(self, listener_config):
|
|
|
|
port = listener_config["port"]
|
|
|
|
bind_address = listener_config.get("bind_address", "")
|
|
|
|
site_tag = listener_config.get("tag", port)
|
|
|
|
resources = {}
|
|
|
|
for res in listener_config["resources"]:
|
|
|
|
for name in res["names"]:
|
|
|
|
if name == "metrics":
|
|
|
|
resources[METRICS_PREFIX] = MetricsResource(self)
|
|
|
|
|
|
|
|
root_resource = create_resource_tree(resources, Resource())
|
|
|
|
reactor.listenTCP(
|
|
|
|
port,
|
|
|
|
SynapseSite(
|
|
|
|
"synapse.access.http.%s" % (site_tag,),
|
|
|
|
site_tag,
|
|
|
|
listener_config,
|
|
|
|
root_resource,
|
|
|
|
),
|
|
|
|
interface=bind_address
|
|
|
|
)
|
|
|
|
logger.info("Synapse pusher now listening on port %d", port)
|
|
|
|
|
|
|
|
def start_listening(self):
|
|
|
|
for listener in self.config.listeners:
|
|
|
|
if listener["type"] == "http":
|
|
|
|
self._listen_http(listener)
|
|
|
|
elif listener["type"] == "manhole":
|
|
|
|
reactor.listenTCP(
|
|
|
|
listener["port"],
|
|
|
|
manhole(
|
|
|
|
username="matrix",
|
|
|
|
password="rabbithole",
|
|
|
|
globals={"hs": self},
|
|
|
|
),
|
|
|
|
interface=listener.get("bind_address", '127.0.0.1')
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
logger.warn("Unrecognized listener type: %s", listener["type"])
|
|
|
|
|
2016-04-21 18:21:02 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def replicate(self):
|
|
|
|
http_client = self.get_simple_http_client()
|
|
|
|
store = self.get_datastore()
|
|
|
|
replication_url = self.config.replication_url
|
|
|
|
pusher_pool = self.get_pusherpool()
|
2016-05-13 18:16:27 +02:00
|
|
|
clock = self.get_clock()
|
2016-04-21 18:21:02 +02:00
|
|
|
|
|
|
|
def stop_pusher(user_id, app_id, pushkey):
|
|
|
|
key = "%s:%s" % (app_id, pushkey)
|
|
|
|
pushers_for_user = pusher_pool.pushers.get(user_id, {})
|
|
|
|
pusher = pushers_for_user.pop(key, None)
|
|
|
|
if pusher is None:
|
|
|
|
return
|
|
|
|
logger.info("Stopping pusher %r / %r", user_id, key)
|
|
|
|
pusher.on_stop()
|
|
|
|
|
|
|
|
def start_pusher(user_id, app_id, pushkey):
|
|
|
|
key = "%s:%s" % (app_id, pushkey)
|
|
|
|
logger.info("Starting pusher %r / %r", user_id, key)
|
|
|
|
return pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def poke_pushers(results):
|
|
|
|
pushers_rows = set(
|
|
|
|
map(tuple, results.get("pushers", {}).get("rows", []))
|
|
|
|
)
|
|
|
|
deleted_pushers_rows = set(
|
|
|
|
map(tuple, results.get("deleted_pushers", {}).get("rows", []))
|
|
|
|
)
|
|
|
|
for row in sorted(pushers_rows | deleted_pushers_rows):
|
|
|
|
if row in deleted_pushers_rows:
|
|
|
|
user_id, app_id, pushkey = row[1:4]
|
|
|
|
stop_pusher(user_id, app_id, pushkey)
|
|
|
|
elif row in pushers_rows:
|
|
|
|
user_id = row[1]
|
|
|
|
app_id = row[5]
|
|
|
|
pushkey = row[8]
|
|
|
|
yield start_pusher(user_id, app_id, pushkey)
|
|
|
|
|
|
|
|
stream = results.get("events")
|
|
|
|
if stream:
|
|
|
|
min_stream_id = stream["rows"][0][0]
|
|
|
|
max_stream_id = stream["position"]
|
|
|
|
preserve_fn(pusher_pool.on_new_notifications)(
|
|
|
|
min_stream_id, max_stream_id
|
|
|
|
)
|
|
|
|
|
|
|
|
stream = results.get("receipts")
|
|
|
|
if stream:
|
|
|
|
rows = stream["rows"]
|
|
|
|
affected_room_ids = set(row[1] for row in rows)
|
|
|
|
min_stream_id = rows[0][0]
|
|
|
|
max_stream_id = stream["position"]
|
|
|
|
preserve_fn(pusher_pool.on_new_receipts)(
|
|
|
|
min_stream_id, max_stream_id, affected_room_ids
|
|
|
|
)
|
|
|
|
|
2016-05-13 18:16:27 +02:00
|
|
|
def expire_broken_caches():
|
|
|
|
store.who_forgot_in_room.invalidate_all()
|
|
|
|
|
|
|
|
next_expire_broken_caches_ms = 0
|
2016-04-21 18:21:02 +02:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
args = store.stream_positions()
|
|
|
|
args["timeout"] = 30000
|
|
|
|
result = yield http_client.get_json(replication_url, args=args)
|
2016-05-13 18:16:27 +02:00
|
|
|
now_ms = clock.time_msec()
|
|
|
|
if now_ms > next_expire_broken_caches_ms:
|
|
|
|
expire_broken_caches()
|
|
|
|
next_expire_broken_caches_ms = (
|
|
|
|
now_ms + store.BROKEN_CACHE_EXPIRY_MS
|
|
|
|
)
|
2016-04-21 18:21:02 +02:00
|
|
|
yield store.process_replication(result)
|
|
|
|
poke_pushers(result)
|
|
|
|
except:
|
|
|
|
logger.exception("Error replicating from %r", replication_url)
|
|
|
|
sleep(30)
|
|
|
|
|
|
|
|
|
|
|
|
def setup(config_options):
|
|
|
|
try:
|
|
|
|
config = PusherSlaveConfig.load_config(
|
|
|
|
"Synapse pusher", config_options
|
|
|
|
)
|
|
|
|
except ConfigError as e:
|
|
|
|
sys.stderr.write("\n" + e.message + "\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
2016-04-26 16:37:41 +02:00
|
|
|
if not config:
|
|
|
|
sys.exit(0)
|
|
|
|
|
2016-04-21 18:21:02 +02:00
|
|
|
config.setup_logging()
|
|
|
|
|
|
|
|
database_engine = create_engine(config.database_config)
|
|
|
|
|
|
|
|
ps = PusherServer(
|
|
|
|
config.server_name,
|
|
|
|
db_config=config.database_config,
|
|
|
|
config=config,
|
|
|
|
version_string=get_version_string("Synapse", synapse),
|
|
|
|
database_engine=database_engine,
|
|
|
|
)
|
|
|
|
|
|
|
|
ps.setup()
|
2016-04-25 18:34:25 +02:00
|
|
|
ps.start_listening()
|
|
|
|
|
|
|
|
change_resource_limit(ps.config.soft_file_limit)
|
2016-04-21 18:21:02 +02:00
|
|
|
|
|
|
|
def start():
|
|
|
|
ps.replicate()
|
|
|
|
ps.get_pusherpool().start()
|
|
|
|
ps.get_datastore().start_profiling()
|
|
|
|
|
|
|
|
reactor.callWhenRunning(start)
|
|
|
|
|
|
|
|
return ps
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
with LoggingContext("main"):
|
|
|
|
ps = setup(sys.argv[1:])
|
2016-04-26 16:37:41 +02:00
|
|
|
|
|
|
|
if ps.config.daemonize:
|
2016-04-26 16:57:28 +02:00
|
|
|
def run():
|
|
|
|
with LoggingContext("run"):
|
|
|
|
change_resource_limit(ps.config.soft_file_limit)
|
|
|
|
reactor.run()
|
|
|
|
|
2016-04-26 16:37:41 +02:00
|
|
|
daemon = Daemonize(
|
|
|
|
app="synapse-pusher",
|
|
|
|
pid=ps.config.pid_file,
|
2016-04-26 16:57:28 +02:00
|
|
|
action=run,
|
2016-04-26 16:37:41 +02:00
|
|
|
auto_close_fds=False,
|
|
|
|
verbose=True,
|
|
|
|
logger=logger,
|
|
|
|
)
|
2016-04-26 18:07:09 +02:00
|
|
|
|
|
|
|
daemon.start()
|
2016-04-26 16:37:41 +02:00
|
|
|
else:
|
|
|
|
reactor.run()
|