Merge branch 'release-v1.4.0' of github.com:matrix-org/synapse into matrix-org-hotfixes
commit
b464afe283
|
@ -2,7 +2,7 @@ Synapse 1.4.0rc1 (2019-09-26)
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
Note that this release includes significant changes around 3pid
|
Note that this release includes significant changes around 3pid
|
||||||
verification. Administrators are reminded to review the [upgrade notes](UPGRADE.rst##upgrading-to-v140).
|
verification. Administrators are reminded to review the [upgrade notes](UPGRADE.rst#upgrading-to-v140).
|
||||||
|
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
|
@ -48,7 +48,7 @@ Features
|
||||||
- Let synctl accept a directory of config files. ([\#5904](https://github.com/matrix-org/synapse/issues/5904))
|
- Let synctl accept a directory of config files. ([\#5904](https://github.com/matrix-org/synapse/issues/5904))
|
||||||
- Increase max display name size to 256. ([\#5906](https://github.com/matrix-org/synapse/issues/5906))
|
- Increase max display name size to 256. ([\#5906](https://github.com/matrix-org/synapse/issues/5906))
|
||||||
- Add admin API endpoint for getting whether or not a user is a server administrator. ([\#5914](https://github.com/matrix-org/synapse/issues/5914))
|
- Add admin API endpoint for getting whether or not a user is a server administrator. ([\#5914](https://github.com/matrix-org/synapse/issues/5914))
|
||||||
- Redact events in the database that have been redacted for a month. ([\#5934](https://github.com/matrix-org/synapse/issues/5934))
|
- Redact events in the database that have been redacted for a week. ([\#5934](https://github.com/matrix-org/synapse/issues/5934))
|
||||||
- New prometheus metrics:
|
- New prometheus metrics:
|
||||||
- `synapse_federation_known_servers`: represents the total number of servers your server knows about (i.e. is in rooms with), including itself. Enable by setting `metrics_flags.known_servers` to True in the configuration.([\#5981](https://github.com/matrix-org/synapse/issues/5981))
|
- `synapse_federation_known_servers`: represents the total number of servers your server knows about (i.e. is in rooms with), including itself. Enable by setting `metrics_flags.known_servers` to True in the configuration.([\#5981](https://github.com/matrix-org/synapse/issues/5981))
|
||||||
- `synapse_build_info`: exposes the Python version, OS version, and Synapse version of the running server. ([\#6005](https://github.com/matrix-org/synapse/issues/6005))
|
- `synapse_build_info`: exposes the Python version, OS version, and Synapse version of the running server. ([\#6005](https://github.com/matrix-org/synapse/issues/6005))
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Fix up sample config entry for `redaction_retention_period` option.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix bug in background update that adds last seen information to the `devices` table, and improve its performance on Postgres.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix bad performance of censoring redactions background task.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix fetching censored redactions from DB, which caused APIs like initial sync to fail if it tried to include the censored redaction.
|
|
@ -0,0 +1 @@
|
||||||
|
Fix exceptions when storing large retry intervals for down remote servers.
|
|
@ -314,7 +314,7 @@ listeners:
|
||||||
#
|
#
|
||||||
# Defaults to `7d`. Set to `null` to disable.
|
# Defaults to `7d`. Set to `null` to disable.
|
||||||
#
|
#
|
||||||
redaction_retention_period: 7d
|
#redaction_retention_period: 28d
|
||||||
|
|
||||||
# How long to track users' last seen time and IPs in the database.
|
# How long to track users' last seen time and IPs in the database.
|
||||||
#
|
#
|
||||||
|
|
|
@ -742,7 +742,7 @@ class ServerConfig(Config):
|
||||||
#
|
#
|
||||||
# Defaults to `7d`. Set to `null` to disable.
|
# Defaults to `7d`. Set to `null` to disable.
|
||||||
#
|
#
|
||||||
redaction_retention_period: 7d
|
#redaction_retention_period: 28d
|
||||||
|
|
||||||
# How long to track users' last seen time and IPs in the database.
|
# How long to track users' last seen time and IPs in the database.
|
||||||
#
|
#
|
||||||
|
|
|
@ -463,14 +463,46 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
|
||||||
last_device_id = progress.get("last_device_id", "")
|
last_device_id = progress.get("last_device_id", "")
|
||||||
|
|
||||||
def _devices_last_seen_update_txn(txn):
|
def _devices_last_seen_update_txn(txn):
|
||||||
|
# This consists of two queries:
|
||||||
|
#
|
||||||
|
# 1. The sub-query searches for the next N devices and joins
|
||||||
|
# against user_ips to find the max last_seen associated with
|
||||||
|
# that device.
|
||||||
|
# 2. The outer query then joins again against user_ips on
|
||||||
|
# user/device/last_seen. This *should* hopefully only
|
||||||
|
# return one row, but if it does return more than one then
|
||||||
|
# we'll just end up updating the same device row multiple
|
||||||
|
# times, which is fine.
|
||||||
|
|
||||||
|
if self.database_engine.supports_tuple_comparison:
|
||||||
|
where_clause = "(user_id, device_id) > (?, ?)"
|
||||||
|
where_args = [last_user_id, last_device_id]
|
||||||
|
else:
|
||||||
|
# We explicitly do a `user_id >= ? AND (...)` here to ensure
|
||||||
|
# that an index is used, as doing `user_id > ? OR (user_id = ? AND ...)`
|
||||||
|
# makes it hard for query optimiser to tell that it can use the
|
||||||
|
# index on user_id
|
||||||
|
where_clause = "user_id >= ? AND (user_id > ? OR device_id > ?)"
|
||||||
|
where_args = [last_user_id, last_user_id, last_device_id]
|
||||||
|
|
||||||
sql = """
|
sql = """
|
||||||
SELECT u.last_seen, u.ip, u.user_agent, user_id, device_id FROM devices
|
SELECT
|
||||||
INNER JOIN user_ips AS u USING (user_id, device_id)
|
last_seen, ip, user_agent, user_id, device_id
|
||||||
WHERE user_id > ? OR (user_id = ? AND device_id > ?)
|
FROM (
|
||||||
ORDER BY user_id ASC, device_id ASC
|
SELECT
|
||||||
LIMIT ?
|
user_id, device_id, MAX(u.last_seen) AS last_seen
|
||||||
"""
|
FROM devices
|
||||||
txn.execute(sql, (last_user_id, last_user_id, last_device_id, batch_size))
|
INNER JOIN user_ips AS u USING (user_id, device_id)
|
||||||
|
WHERE %(where_clause)s
|
||||||
|
GROUP BY user_id, device_id
|
||||||
|
ORDER BY user_id ASC, device_id ASC
|
||||||
|
LIMIT ?
|
||||||
|
) c
|
||||||
|
INNER JOIN user_ips AS u USING (user_id, device_id, last_seen)
|
||||||
|
""" % {
|
||||||
|
"where_clause": where_clause
|
||||||
|
}
|
||||||
|
txn.execute(sql, where_args + [batch_size])
|
||||||
|
|
||||||
rows = txn.fetchall()
|
rows = txn.fetchall()
|
||||||
if not rows:
|
if not rows:
|
||||||
|
|
|
@ -72,6 +72,13 @@ class PostgresEngine(object):
|
||||||
"""
|
"""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def supports_tuple_comparison(self):
|
||||||
|
"""
|
||||||
|
Do we support comparing tuples, i.e. `(a, b) > (c, d)`?
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
|
||||||
def is_deadlock(self, error):
|
def is_deadlock(self, error):
|
||||||
if isinstance(error, self.module.DatabaseError):
|
if isinstance(error, self.module.DatabaseError):
|
||||||
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
||||||
|
|
|
@ -38,6 +38,14 @@ class Sqlite3Engine(object):
|
||||||
"""
|
"""
|
||||||
return self.module.sqlite_version_info >= (3, 24, 0)
|
return self.module.sqlite_version_info >= (3, 24, 0)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def supports_tuple_comparison(self):
|
||||||
|
"""
|
||||||
|
Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires
|
||||||
|
SQLite 3.15+.
|
||||||
|
"""
|
||||||
|
return self.module.sqlite_version_info >= (3, 15, 0)
|
||||||
|
|
||||||
def check_database(self, txn):
|
def check_database(self, txn):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -1389,6 +1389,18 @@ class EventsStore(
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for event, _ in events_and_contexts:
|
||||||
|
if not event.internal_metadata.is_redacted():
|
||||||
|
# If we're persisting an unredacted event we go and ensure
|
||||||
|
# that we mark any redactions that reference this event as
|
||||||
|
# requiring censoring.
|
||||||
|
self._simple_update_txn(
|
||||||
|
txn,
|
||||||
|
table="redactions",
|
||||||
|
keyvalues={"redacts": event.event_id},
|
||||||
|
updatevalues={"have_censored": False},
|
||||||
|
)
|
||||||
|
|
||||||
def _store_rejected_events_txn(self, txn, events_and_contexts):
|
def _store_rejected_events_txn(self, txn, events_and_contexts):
|
||||||
"""Add rows to the 'rejections' table for received events which were
|
"""Add rows to the 'rejections' table for received events which were
|
||||||
rejected
|
rejected
|
||||||
|
@ -1552,9 +1564,15 @@ class EventsStore(
|
||||||
def _store_redaction(self, txn, event):
|
def _store_redaction(self, txn, event):
|
||||||
# invalidate the cache for the redacted event
|
# invalidate the cache for the redacted event
|
||||||
txn.call_after(self._invalidate_get_event_cache, event.redacts)
|
txn.call_after(self._invalidate_get_event_cache, event.redacts)
|
||||||
txn.execute(
|
|
||||||
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
|
self._simple_insert_txn(
|
||||||
(event.event_id, event.redacts),
|
txn,
|
||||||
|
table="redactions",
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"redacts": event.redacts,
|
||||||
|
"received_ts": self._clock.time_msec(),
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -1571,36 +1589,29 @@ class EventsStore(
|
||||||
if self.hs.config.redaction_retention_period is None:
|
if self.hs.config.redaction_retention_period is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
max_pos = yield self.find_first_stream_ordering_after_ts(
|
before_ts = self._clock.time_msec() - self.hs.config.redaction_retention_period
|
||||||
self._clock.time_msec() - self.hs.config.redaction_retention_period
|
|
||||||
)
|
|
||||||
|
|
||||||
# We fetch all redactions that:
|
# We fetch all redactions that:
|
||||||
# 1. point to an event we have,
|
# 1. point to an event we have,
|
||||||
# 2. has a stream ordering from before the cut off, and
|
# 2. has a received_ts from before the cut off, and
|
||||||
# 3. we haven't yet censored.
|
# 3. we haven't yet censored.
|
||||||
#
|
#
|
||||||
# This is limited to 100 events to ensure that we don't try and do too
|
# This is limited to 100 events to ensure that we don't try and do too
|
||||||
# much at once. We'll get called again so this should eventually catch
|
# much at once. We'll get called again so this should eventually catch
|
||||||
# up.
|
# up.
|
||||||
#
|
|
||||||
# We use the range [-max_pos, max_pos] to handle backfilled events,
|
|
||||||
# which are given negative stream ordering.
|
|
||||||
sql = """
|
sql = """
|
||||||
SELECT redact_event.event_id, redacts FROM redactions
|
SELECT redactions.event_id, redacts FROM redactions
|
||||||
INNER JOIN events AS redact_event USING (event_id)
|
LEFT JOIN events AS original_event ON (
|
||||||
INNER JOIN events AS original_event ON (
|
redacts = original_event.event_id
|
||||||
redact_event.room_id = original_event.room_id
|
|
||||||
AND redacts = original_event.event_id
|
|
||||||
)
|
)
|
||||||
WHERE NOT have_censored
|
WHERE NOT have_censored
|
||||||
AND ? <= redact_event.stream_ordering AND redact_event.stream_ordering <= ?
|
AND redactions.received_ts <= ?
|
||||||
ORDER BY redact_event.stream_ordering ASC
|
ORDER BY redactions.received_ts ASC
|
||||||
LIMIT ?
|
LIMIT ?
|
||||||
"""
|
"""
|
||||||
|
|
||||||
rows = yield self._execute(
|
rows = yield self._execute(
|
||||||
"_censor_redactions_fetch", None, sql, -max_pos, max_pos, 100
|
"_censor_redactions_fetch", None, sql, before_ts, 100
|
||||||
)
|
)
|
||||||
|
|
||||||
updates = []
|
updates = []
|
||||||
|
|
|
@ -67,6 +67,10 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
|
||||||
self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
|
self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.register_background_update_handler(
|
||||||
|
"redactions_received_ts", self._redactions_received_ts
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _background_reindex_fields_sender(self, progress, batch_size):
|
def _background_reindex_fields_sender(self, progress, batch_size):
|
||||||
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
||||||
|
@ -397,3 +401,60 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
|
||||||
)
|
)
|
||||||
|
|
||||||
return num_handled
|
return num_handled
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _redactions_received_ts(self, progress, batch_size):
|
||||||
|
"""Handles filling out the `received_ts` column in redactions.
|
||||||
|
"""
|
||||||
|
last_event_id = progress.get("last_event_id", "")
|
||||||
|
|
||||||
|
def _redactions_received_ts_txn(txn):
|
||||||
|
# Fetch the set of event IDs that we want to update
|
||||||
|
sql = """
|
||||||
|
SELECT event_id FROM redactions
|
||||||
|
WHERE event_id > ?
|
||||||
|
ORDER BY event_id ASC
|
||||||
|
LIMIT ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
txn.execute(sql, (last_event_id, batch_size))
|
||||||
|
|
||||||
|
rows = txn.fetchall()
|
||||||
|
if not rows:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
upper_event_id, = rows[-1]
|
||||||
|
|
||||||
|
# Update the redactions with the received_ts.
|
||||||
|
#
|
||||||
|
# Note: Not all events have an associated received_ts, so we
|
||||||
|
# fallback to using origin_server_ts. If we for some reason don't
|
||||||
|
# have an origin_server_ts, lets just use the current timestamp.
|
||||||
|
#
|
||||||
|
# We don't want to leave it null, as then we'll never try and
|
||||||
|
# censor those redactions.
|
||||||
|
sql = """
|
||||||
|
UPDATE redactions
|
||||||
|
SET received_ts = (
|
||||||
|
SELECT COALESCE(received_ts, origin_server_ts, ?) FROM events
|
||||||
|
WHERE events.event_id = redactions.event_id
|
||||||
|
)
|
||||||
|
WHERE ? <= event_id AND event_id <= ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
|
||||||
|
|
||||||
|
self._background_update_progress_txn(
|
||||||
|
txn, "redactions_received_ts", {"last_event_id": upper_event_id}
|
||||||
|
)
|
||||||
|
|
||||||
|
return len(rows)
|
||||||
|
|
||||||
|
count = yield self.runInteraction(
|
||||||
|
"_redactions_received_ts", _redactions_received_ts_txn
|
||||||
|
)
|
||||||
|
|
||||||
|
if not count:
|
||||||
|
yield self._end_background_update("redactions_received_ts")
|
||||||
|
|
||||||
|
return count
|
||||||
|
|
|
@ -238,6 +238,20 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
# we have to recheck auth now.
|
# we have to recheck auth now.
|
||||||
|
|
||||||
if not allow_rejected and entry.event.type == EventTypes.Redaction:
|
if not allow_rejected and entry.event.type == EventTypes.Redaction:
|
||||||
|
if not hasattr(entry.event, "redacts"):
|
||||||
|
# A redacted redaction doesn't have a `redacts` key, in
|
||||||
|
# which case lets just withhold the event.
|
||||||
|
#
|
||||||
|
# Note: Most of the time if the redactions has been
|
||||||
|
# redacted we still have the un-redacted event in the DB
|
||||||
|
# and so we'll still see the `redacts` key. However, this
|
||||||
|
# isn't always true e.g. if we have censored the event.
|
||||||
|
logger.debug(
|
||||||
|
"Withholding redaction event %s as we don't have redacts key",
|
||||||
|
event_id,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
redacted_event_id = entry.event.redacts
|
redacted_event_id = entry.event.redacts
|
||||||
event_map = yield self._get_events_from_cache_or_db([redacted_event_id])
|
event_map = yield self._get_events_from_cache_or_db([redacted_event_id])
|
||||||
original_event_entry = event_map.get(redacted_event_id)
|
original_event_entry = event_map.get(redacted_event_id)
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
-- We want to store large retry intervals so we upgrade the column from INT
|
||||||
|
-- to BIGINT. We don't need to do this on SQLite.
|
||||||
|
ALTER TABLE destinations ALTER retry_interval SET DATA TYPE BIGINT;
|
|
@ -0,0 +1,20 @@
|
||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ALTER TABLE redactions ADD COLUMN received_ts BIGINT;
|
||||||
|
CREATE INDEX redactions_have_censored_ts ON redactions(received_ts) WHERE not have_censored;
|
||||||
|
|
||||||
|
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||||
|
('redactions_received_ts', '{}');
|
|
@ -29,7 +29,7 @@ MIN_RETRY_INTERVAL = 10 * 60 * 1000
|
||||||
RETRY_MULTIPLIER = 5
|
RETRY_MULTIPLIER = 5
|
||||||
|
|
||||||
# a cap on the backoff. (Essentially none)
|
# a cap on the backoff. (Essentially none)
|
||||||
MAX_RETRY_INTERVAL = 2 ** 63
|
MAX_RETRY_INTERVAL = 2 ** 62
|
||||||
|
|
||||||
|
|
||||||
class NotRetryingDestination(Exception):
|
class NotRetryingDestination(Exception):
|
||||||
|
|
|
@ -118,6 +118,8 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
||||||
|
|
||||||
self.get_success(self.store.persist_event(event, context))
|
self.get_success(self.store.persist_event(event, context))
|
||||||
|
|
||||||
|
return event
|
||||||
|
|
||||||
def test_redact(self):
|
def test_redact(self):
|
||||||
self.get_success(
|
self.get_success(
|
||||||
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
|
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
|
||||||
|
@ -361,3 +363,37 @@ class RedactionTestCase(unittest.HomeserverTestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assert_dict({"content": {}}, json.loads(event_json))
|
self.assert_dict({"content": {}}, json.loads(event_json))
|
||||||
|
|
||||||
|
def test_redact_redaction(self):
|
||||||
|
"""Tests that we can redact a redaction and can fetch it again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.get_success(
|
||||||
|
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
|
||||||
|
)
|
||||||
|
|
||||||
|
msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
|
||||||
|
|
||||||
|
first_redact_event = self.get_success(
|
||||||
|
self.inject_redaction(
|
||||||
|
self.room1, msg_event.event_id, self.u_alice, "Redacting message"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.get_success(
|
||||||
|
self.inject_redaction(
|
||||||
|
self.room1,
|
||||||
|
first_redact_event.event_id,
|
||||||
|
self.u_alice,
|
||||||
|
"Redacting redaction",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now lets jump to the future where we have censored the redaction event
|
||||||
|
# in the DB.
|
||||||
|
self.reactor.advance(60 * 60 * 24 * 31)
|
||||||
|
|
||||||
|
# We just want to check that fetching the event doesn't raise an exception.
|
||||||
|
self.get_success(
|
||||||
|
self.store.get_event(first_redact_event.event_id, allow_none=True)
|
||||||
|
)
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util.retryutils import MAX_RETRY_INTERVAL
|
||||||
|
|
||||||
from tests.unittest import HomeserverTestCase
|
from tests.unittest import HomeserverTestCase
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,3 +47,12 @@ class TransactionStoreTestCase(HomeserverTestCase):
|
||||||
"""
|
"""
|
||||||
d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
|
d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
|
||||||
self.get_success(d)
|
self.get_success(d)
|
||||||
|
|
||||||
|
def test_large_destination_retry(self):
|
||||||
|
d = self.store.set_destination_retry_timings(
|
||||||
|
"example.com", MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL
|
||||||
|
)
|
||||||
|
self.get_success(d)
|
||||||
|
|
||||||
|
d = self.store.get_destination_retry_timings("example.com")
|
||||||
|
self.get_success(d)
|
||||||
|
|
Loading…
Reference in New Issue