Merge remote-tracking branch 'origin/release-v1.69' into develop

pull/14187/head
Patrick Cloke 2022-10-14 14:11:27 -04:00
commit bc2bd92b93
9 changed files with 145 additions and 22 deletions

View File

@ -9,6 +9,7 @@
!pyproject.toml
!poetry.lock
!Cargo.lock
!Cargo.toml
!build_rust.py
rust/target

View File

@ -55,3 +55,8 @@ jobs:
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
# arm64 builds OOM without the git fetch setting. c.f.
# https://github.com/rust-lang/cargo/issues/10583
build-args: |
CARGO_NET_GIT_FETCH_WITH_CLI=true

View File

@ -1,4 +1,4 @@
Synapse 1.69.0rc2 (2022-10-06)
Synapse 1.69.0rc4 (2022-10-14)
==============================
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
@ -6,6 +6,38 @@ Server administrators should update their dashboards and alerting rules to avoid
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
Bugfixes
--------
- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
Updates to the Docker image
---------------------------
- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
Synapse 1.69.0rc3 (2022-10-12)
==============================
Bugfixes
--------
- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
Internal Changes
----------------
- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
Synapse 1.69.0rc2 (2022-10-06)
==============================
Deprecations and Removals
-------------------------

12
debian/changelog vendored
View File

@ -1,3 +1,15 @@
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
* New Synapse release 1.69.0rc4.
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
* New Synapse release 1.69.0rc3.
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
* New Synapse release 1.69.0rc2.

View File

@ -108,6 +108,12 @@ RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
# set to true, so we expose it as a build-arg.
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
# To speed up rebuilds, install all of the dependencies before we copy over
# the whole synapse project, so that this layer in the Docker cache can be
# used while you develop on the source
@ -121,7 +127,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
# ... and what we need to `pip install`.
COPY pyproject.toml README.rst build_rust.py /synapse/
COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
# Repeat of earlier build argument declaration, as this is a new build stage.
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE

View File

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.69.0rc2"
version = "1.69.0rc4"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@ -219,7 +219,7 @@ oidc = ["authlib"]
# `systemd.journal.JournalHandler`, as is documented in
# `contrib/systemd/log_config.yaml`.
systemd = ["systemd-python"]
url_preview = ["lxml"]
url-preview = ["lxml"]
sentry = ["sentry-sdk"]
opentracing = ["jaeger-client", "opentracing"]
jwt = ["authlib"]
@ -250,7 +250,7 @@ all = [
"pysaml2",
# oidc and jwt
"authlib",
# url_preview
# url-preview
"lxml",
# sentry
"sentry-sdk",
@ -307,7 +307,12 @@ twine = "*"
towncrier = ">=18.6.0rc1"
[build-system]
requires = ["poetry-core==1.2.0", "setuptools_rust==1.5.2"]
# The upper bounds here are defensive, intended to prevent situations like
# #13849 and #14079 where we see buildtime or runtime errors caused by build
# system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.0.0,<=1.3.1", "setuptools_rust>=1.3,<=1.5.2"]
build-backend = "poetry.core.masonry.api"

View File

@ -205,7 +205,7 @@ class ContentRepositoryConfig(Config):
)
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
check_requirements("url_preview")
check_requirements("url-preview")
proxy_env = getproxies_environment()
if "url_preview_ip_range_blacklist" not in config:

View File

@ -39,6 +39,16 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
self.store = hs.get_datastores().main
self.registration_handler = hs.get_registration_handler()
# Default value if the worker that sent the replication request did not include
# an 'approved' property.
if (
hs.config.experimental.msc3866.enabled
and hs.config.experimental.msc3866.require_approval_for_new_accounts
):
self._approval_default = False
else:
self._approval_default = True
@staticmethod
async def _serialize_payload( # type: ignore[override]
user_id: str,
@ -92,6 +102,12 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
await self.registration_handler.check_registration_ratelimit(content["address"])
# Always default admin users to approved (since it means they were created by
# an admin).
approved_default = self._approval_default
if content["admin"]:
approved_default = True
await self.registration_handler.register_with_store(
user_id=user_id,
password_hash=content["password_hash"],
@ -103,7 +119,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type=content["user_type"],
address=content["address"],
shadow_banned=content["shadow_banned"],
approved=content["approved"],
approved=content.get("approved", approved_default),
)
return 200, {}

View File

@ -310,11 +310,11 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
event_push_actions_done = progress.get("event_push_actions_done", False)
def add_thread_id_txn(
txn: LoggingTransaction, table_name: str, start_stream_ordering: int
txn: LoggingTransaction, start_stream_ordering: int
) -> int:
sql = f"""
sql = """
SELECT stream_ordering
FROM {table_name}
FROM event_push_actions
WHERE
thread_id IS NULL
AND stream_ordering > ?
@ -326,7 +326,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# No more rows to process.
rows = txn.fetchall()
if not rows:
progress[f"{table_name}_done"] = True
progress["event_push_actions_done"] = True
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
@ -335,16 +335,65 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# Update the thread ID for any of those rows.
max_stream_ordering = rows[-1][0]
sql = f"""
UPDATE {table_name}
sql = """
UPDATE event_push_actions
SET thread_id = 'main'
WHERE stream_ordering <= ? AND thread_id IS NULL
WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
"""
txn.execute(sql, (max_stream_ordering,))
txn.execute(
sql,
(
start_stream_ordering,
max_stream_ordering,
),
)
# Update progress.
processed_rows = txn.rowcount
progress[f"max_{table_name}_stream_ordering"] = max_stream_ordering
progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
return processed_rows
def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
min_user_id = progress.get("max_summary_user_id", "")
min_room_id = progress.get("max_summary_room_id", "")
# Slightly overcomplicated query for getting the Nth user ID / room
# ID tuple, or the last if there are less than N remaining.
sql = """
SELECT user_id, room_id FROM (
SELECT user_id, room_id FROM event_push_summary
WHERE (user_id, room_id) > (?, ?)
AND thread_id IS NULL
ORDER BY user_id, room_id
LIMIT ?
) AS e
ORDER BY user_id DESC, room_id DESC
LIMIT 1
"""
txn.execute(sql, (min_user_id, min_room_id, batch_size))
row = txn.fetchone()
if not row:
return 0
max_user_id, max_room_id = row
sql = """
UPDATE event_push_summary
SET thread_id = 'main'
WHERE
(?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
AND thread_id IS NULL
"""
txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
processed_rows = txn.rowcount
progress["max_summary_user_id"] = max_user_id
progress["max_summary_room_id"] = max_room_id
self.db_pool.updates._background_update_progress_txn(
txn, "event_push_backfill_thread_id", progress
)
@ -360,15 +409,12 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id",
add_thread_id_txn,
"event_push_actions",
progress.get("max_event_push_actions_stream_ordering", 0),
)
else:
result = await self.db_pool.runInteraction(
"event_push_backfill_thread_id",
add_thread_id_txn,
"event_push_summary",
progress.get("max_event_push_summary_stream_ordering", 0),
add_thread_id_summary_txn,
)
# Only done after the event_push_summary table is done.