Merge remote-tracking branch 'origin/release-v1.69' into matrix-org-hotfixes
commit
dedd4cd061
|
@ -0,0 +1,17 @@
|
|||
version: 2
|
||||
updates:
|
||||
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
|
||||
package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
|
@ -0,0 +1,46 @@
|
|||
name: Write changelog for dependabot PR
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened # For debugging!
|
||||
|
||||
permissions:
|
||||
# Needed to be able to push the commit. See
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
|
||||
# for a similar example
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
add-changelog:
|
||||
runs-on: 'ubuntu-latest'
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
- name: Write, commit and push changelog
|
||||
run: |
|
||||
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
|
||||
git add changelog.d
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
git commit -m "Changelog"
|
||||
git push
|
||||
shell: bash
|
||||
# The `git push` above does not trigger CI on the dependabot PR.
|
||||
#
|
||||
# By default, workflows can't trigger other workflows when they're just using the
|
||||
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
|
||||
# recursive workflow loops by accident, because that'll get very expensive very
|
||||
# quickly.) Instead, you have to manually call out to another workflow, or else
|
||||
# make your changes (i.e. the `git push` above) using a personal access token.
|
||||
# See
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
#
|
||||
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
|
||||
# See git commit history for previous attempts. If anyone desperately wants to try
|
||||
# again in the future, make a matrix-bot account and use its access token to git push.
|
||||
|
||||
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
|
||||
# are sufficiently locked down to dependabot only as above.
|
|
@ -17,19 +17,19 @@ jobs:
|
|||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
|
|
|
@ -25,7 +25,7 @@ jobs:
|
|||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
|
@ -59,7 +59,7 @@ jobs:
|
|||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -133,7 +133,7 @@ jobs:
|
|||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -155,7 +155,7 @@ jobs:
|
|||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
|
@ -182,8 +182,8 @@ jobs:
|
|||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
|
@ -210,7 +210,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -11,6 +11,7 @@ on:
|
|||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
@ -24,7 +25,7 @@ jobs:
|
|||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
|
@ -49,18 +50,18 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
@ -84,7 +85,7 @@ jobs:
|
|||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
@ -145,7 +146,7 @@ jobs:
|
|||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
@ -162,7 +163,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
|
|
|
@ -4,6 +4,7 @@ on:
|
|||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
@ -30,7 +31,7 @@ jobs:
|
|||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
|
@ -41,7 +42,7 @@ jobs:
|
|||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
@ -54,15 +55,15 @@ jobs:
|
|||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
@ -75,7 +76,7 @@ jobs:
|
|||
lint-pydantic:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
|
@ -89,7 +90,7 @@ jobs:
|
|||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -107,7 +108,7 @@ jobs:
|
|||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -140,7 +141,7 @@ jobs:
|
|||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v2
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
|
@ -157,7 +158,7 @@ jobs:
|
|||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
|
@ -199,7 +200,7 @@ jobs:
|
|||
needs: linting-done
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -270,7 +271,7 @@ jobs:
|
|||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
|
@ -313,7 +314,7 @@ jobs:
|
|||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
|
@ -331,7 +332,7 @@ jobs:
|
|||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
|
@ -361,7 +362,7 @@ jobs:
|
|||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
|
@ -402,7 +403,7 @@ jobs:
|
|||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
|
@ -444,8 +445,8 @@ jobs:
|
|||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
|
@ -473,7 +474,7 @@ jobs:
|
|||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
|
|
@ -15,7 +15,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
|
@ -81,7 +81,7 @@ jobs:
|
|||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
@ -112,7 +112,7 @@ jobs:
|
|||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
|
@ -138,8 +138,8 @@ jobs:
|
|||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
|
@ -177,7 +177,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
107
CHANGES.md
107
CHANGES.md
|
@ -1,3 +1,110 @@
|
|||
Synapse 1.69.0rc1 (2022-10-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`, per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
|
||||
- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
|
||||
- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
|
||||
- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
|
||||
- Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722))
|
||||
- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
|
||||
- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
|
||||
- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
|
||||
- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
|
||||
- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
|
||||
- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
|
||||
- Ask mail servers receiving emails from Synapse to not send automatic reply (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Send invite push notifications for invite over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
|
||||
- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
|
||||
- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
|
||||
- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
|
||||
- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
|
||||
- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
|
||||
- Fix a bug introduced in 1.66 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
|
||||
- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
|
||||
- Fix long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
|
||||
- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
|
||||
- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
|
||||
- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
|
||||
- Fix bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
|
||||
- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
|
||||
- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
|
||||
- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
|
||||
- Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
|
||||
- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
|
||||
- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
|
||||
- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
|
||||
- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
|
||||
- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
|
||||
- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
|
||||
- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
|
||||
- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
|
||||
- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
|
||||
- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
|
||||
- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
|
||||
- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
|
||||
- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
|
||||
- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
|
||||
- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
|
||||
- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
|
||||
- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
|
||||
- Fix unstable MSC3882 endpoint being incorrectly available on stable API versions. ([\#13868](https://github.com/matrix-org/synapse/issues/13868))
|
||||
- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
|
||||
- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
|
||||
- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
|
||||
- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
|
||||
- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
|
||||
- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
|
||||
- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
|
||||
- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
|
||||
- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
|
||||
- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
|
||||
- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
|
||||
- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
|
||||
- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
|
||||
- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
|
||||
- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
|
||||
- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
|
||||
- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
|
||||
- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
|
||||
- Bump docker/login-action from 1 to 2. ([\#13978](https://github.com/matrix-org/synapse/issues/13978))
|
||||
- Bump actions/download-artifact from 2 to 3. ([\#13979](https://github.com/matrix-org/synapse/issues/13979))
|
||||
- Bump actions/cache from 2 to 3. ([\#13980](https://github.com/matrix-org/synapse/issues/13980))
|
||||
- Bump actions/checkout from 2 to 3. ([\#13982](https://github.com/matrix-org/synapse/issues/13982))
|
||||
- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
|
||||
- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
|
||||
- Bump docker/setup-buildx-action from 1 to 2. ([\#14015](https://github.com/matrix-org/synapse/issues/14015))
|
||||
- Bump docker/setup-qemu-action from 1 to 2. ([\#14019](https://github.com/matrix-org/synapse/issues/14019))
|
||||
- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
|
||||
- Bump docker/build-push-action from 2 to 3. ([\#14022](https://github.com/matrix-org/synapse/issues/14022))
|
||||
- Bump actions/upload-artifact from 2 to 3. ([\#14023](https://github.com/matrix-org/synapse/issues/14023))
|
||||
|
||||
|
||||
Synapse 1.68.0 (2022-09-27)
|
||||
===========================
|
||||
|
||||
|
|
|
@ -15,6 +15,9 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
|
|||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
# We force always building in release mode, as we can't tell the
|
||||
# difference between using `poetry` in development vs production.
|
||||
debug=False,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Speed up creation of DM rooms.
|
|
@ -1 +0,0 @@
|
|||
Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)).
|
|
@ -1 +0,0 @@
|
|||
Exponentially backoff from backfilling the same event over and over.
|
|
@ -1 +0,0 @@
|
|||
Add cache invalidation across workers to module API.
|
|
@ -1 +0,0 @@
|
|||
Send invite push notifications for invite over federation.
|
|
@ -1 +0,0 @@
|
|||
Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session.
|
|
@ -1 +0,0 @@
|
|||
Port push rules to using Rust.
|
|
@ -1 +0,0 @@
|
|||
Add `worker_main_http_uri` for the worker generator bash script.
|
|
@ -1 +0,0 @@
|
|||
Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
|
|
@ -1 +0,0 @@
|
|||
Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar).
|
|
@ -1 +0,0 @@
|
|||
Update the script which makes full schema dumps.
|
|
@ -1 +0,0 @@
|
|||
Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar).
|
|
@ -1 +0,0 @@
|
|||
Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
|
|
@ -1 +0,0 @@
|
|||
Speed up creation of DM rooms.
|
|
@ -1 +0,0 @@
|
|||
Improve the `synapse.api.auth.Auth` mock used in unit tests.
|
|
@ -1 +0,0 @@
|
|||
Update URL for the NixOS module for Synapse.
|
|
@ -1 +0,0 @@
|
|||
Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join.
|
|
@ -1 +0,0 @@
|
|||
Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
|
|
@ -1 +0,0 @@
|
|||
Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint.
|
|
@ -1 +0,0 @@
|
|||
Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`.
|
|
@ -1 +0,0 @@
|
|||
Port push rules to using Rust.
|
|
@ -1 +0,0 @@
|
|||
Carry IdP Session IDs through user-mapping sessions.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward.
|
|
@ -1 +0,0 @@
|
|||
Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0.
|
|
@ -1 +0,0 @@
|
|||
Fix the release script not publishing binary wheels.
|
|
@ -1 +0,0 @@
|
|||
Fix access token leak to logs from proxy agent.
|
|
@ -1 +0,0 @@
|
|||
Raise issue if complement fails with latest deps.
|
|
@ -1 +0,0 @@
|
|||
Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881).
|
|
@ -1 +0,0 @@
|
|||
Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls.
|
|
@ -1 +0,0 @@
|
|||
Correct the comments in the complement dockerfile.
|
|
@ -1 +0,0 @@
|
|||
Fix unstable MSC3882 endpoint being incorrectly available on stable API versions.
|
|
@ -1 +0,0 @@
|
|||
Fix a cross-link from the register admin API to the `registration_shared_secret` configuration documentation.
|
|
@ -1 +0,0 @@
|
|||
Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room.
|
|
@ -1 +0,0 @@
|
|||
Create a new snapshot of the database schema.
|
|
@ -1 +0,0 @@
|
|||
Faster room joins: Send device list updates to most servers in rooms with partial state.
|
|
@ -1 +0,0 @@
|
|||
Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console.
|
|
@ -1 +0,0 @@
|
|||
Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`.
|
|
@ -1 +0,0 @@
|
|||
Correctly handle a race with device lists when a remote user leaves during a partial join.
|
|
@ -1 +0,0 @@
|
|||
Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests.
|
|
@ -1 +0,0 @@
|
|||
Port push rules to using Rust.
|
|
@ -1 +0,0 @@
|
|||
Improve backfill robustness by trying more servers when we get a `4xx` error back.
|
|
@ -1 +0,0 @@
|
|||
Faster remote room joins: record _when_ we first partial-join to a room.
|
|
@ -1 +0,0 @@
|
|||
Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
|
|
@ -1 +0,0 @@
|
|||
Fix a bug introduced in 1.66 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico.
|
|
@ -1 +0,0 @@
|
|||
Fix mypy errors with canonicaljson 1.6.3.
|
|
@ -1 +0,0 @@
|
|||
Fix packaging to include `Cargo.lock` in `sdist`.
|
|
@ -1 +0,0 @@
|
|||
Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
|
|
@ -1 +0,0 @@
|
|||
Faster remote room joins: correctly handle remote device list updates during a partial join.
|
|
@ -1 +0,0 @@
|
|||
Complement image: propagate SIGTERM to all workers.
|
|
@ -1 +0,0 @@
|
|||
Emphasize the right reasons when to use `(room_id, event_id)` in a database schema.
|
|
@ -1 +0,0 @@
|
|||
Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715).
|
|
@ -1 +0,0 @@
|
|||
Fix long-standing bug where device updates could cause delays sending out to-device messages over federation.
|
|
@ -1 +0,0 @@
|
|||
Update an innaccurate comment in Synapse's upsert database helper.
|
|
@ -1 +0,0 @@
|
|||
Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame.
|
|
@ -1 +0,0 @@
|
|||
Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed.
|
|
@ -1 +0,0 @@
|
|||
Clarify that the `auto_join_rooms` config option can also be used with Space aliases.
|
|
@ -1 +0,0 @@
|
|||
Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
|
|
@ -1 +0,0 @@
|
|||
Correctly handle sending local device list updates to remote servers during a partial join.
|
|
@ -1 +0,0 @@
|
|||
Exponentially backoff from backfilling the same event over and over.
|
|
@ -1 +0,0 @@
|
|||
Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
|
|
@ -1 +0,0 @@
|
|||
Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)).
|
|
@ -1 +0,0 @@
|
|||
Add cache invalidation across workers to module API.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time.
|
|
@ -1 +0,0 @@
|
|||
Ask mail servers receiving emails from Synapse to not send automatic reply (e.g. out-of-office responses).
|
|
@ -1 +0,0 @@
|
|||
Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0.
|
|
@ -1 +0,0 @@
|
|||
Speed up calculating push actions in large rooms.
|
|
@ -1 +0,0 @@
|
|||
Add some cross references to worker documentation.
|
|
@ -1,9 +1,10 @@
|
|||
matrix-synapse-py3 (1.69.0~rc1+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
|
||||
|
||||
* The man page for the hash_password script has been updated to reflect
|
||||
the correct default value of 'bcrypt_rounds'.
|
||||
* New Synapse release 1.69.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 26 Sep 2022 18:05:09 +0100
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
|
||||
|
||||
matrix-synapse-py3 (1.68.0) stable; urgency=medium
|
||||
|
||||
|
|
|
@ -135,6 +135,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
|
|||
convention of the upstream `prometheus_client`. The old names are
|
||||
considered deprecated and will be removed in a future version of
|
||||
Synapse.
|
||||
**The old names will be disabled by default in Synapse v1.71.0 and removed
|
||||
altogether in Synapse v1.73.0.**
|
||||
|
||||
| New Name | Old Name |
|
||||
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
|
@ -146,6 +148,13 @@ Synapse.
|
|||
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
|
||||
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
|
||||
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
|
||||
| synapse_util_caches_cache | synapse_util_caches_cache:total |
|
||||
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
|
||||
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
|
||||
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
|
||||
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||
|
@ -261,7 +270,7 @@ Standard Metric Names
|
|||
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has
|
||||
been changed to fit prometheus standard naming conventions. Additionally
|
||||
the units have been changed to seconds, from miliseconds.
|
||||
the units have been changed to seconds, from milliseconds.
|
||||
|
||||
| New name | Old name |
|
||||
| ---------------------------------------- | --------------------------------- |
|
||||
|
|
|
@ -100,6 +100,34 @@ vice versa.
|
|||
Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
|
||||
replication will resume as normal.
|
||||
|
||||
|
||||
## Deprecation of legacy Prometheus metric names
|
||||
|
||||
In current versions of Synapse, some Prometheus metrics are emitted under two different names,
|
||||
with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
|
||||
and one of the names being newer but compliant.
|
||||
|
||||
Synapse v1.71.0 will turn the old metric names off *by default*.
|
||||
For administrators that still rely on them and have not had chance to update their
|
||||
uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
|
||||
be possible to re-enable them.
|
||||
|
||||
The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
|
||||
in the `contrib` directory in the Synapse repository have been updated to no longer
|
||||
rely on the legacy names. These can be used on a current version of Synapse
|
||||
because current versions of Synapse emit both old and new names.
|
||||
|
||||
You may need to update your alerting rules or any other rules that depend on
|
||||
the names of Prometheus metrics.
|
||||
If you want to test your changes before legacy names are disabled by default,
|
||||
you may specify `enable_legacy_metrics: false` in your homeserver configuration.
|
||||
|
||||
A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
|
||||
|
||||
|
||||
# Upgrading to v1.68.0
|
||||
|
||||
Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
|
||||
|
|
|
@ -179,7 +179,7 @@ This will tell other servers to send traffic to port 443 instead.
|
|||
|
||||
This option currently defaults to false.
|
||||
|
||||
See https://matrix-org.github.io/synapse/latest/delegate.html for more
|
||||
See [Delegation of incoming federation traffic](../../delegate.md) for more
|
||||
information.
|
||||
|
||||
Example configuration:
|
||||
|
@ -2436,6 +2436,31 @@ Example configuration:
|
|||
enable_metrics: true
|
||||
```
|
||||
---
|
||||
### `enable_legacy_metrics`
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
|
||||
**Will be removed in v1.73.0.**
|
||||
---
|
||||
### `sentry`
|
||||
|
||||
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
|
||||
|
@ -2952,7 +2977,7 @@ Options for each entry include:
|
|||
|
||||
* `module`: The class name of a custom mapping module. Default is
|
||||
`synapse.handlers.oidc.JinjaOidcMappingProvider`.
|
||||
See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
|
||||
See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
|
||||
for information on implementing a custom mapping provider.
|
||||
|
||||
* `config`: Configuration for the mapping provider module. This section will
|
||||
|
@ -3393,13 +3418,15 @@ This option has the following sub-options:
|
|||
the user directory. If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
|
||||
NB. If you set this to true, and the last time the user_directory search
|
||||
indexes were (re)built was before Synapse 1.44, you'll have to
|
||||
rebuild the indexes in order to search through all known users.
|
||||
|
||||
These indexes are built the first time Synapse starts; admins can
|
||||
manually trigger a rebuild via API following the instructions at
|
||||
https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
|
||||
Set to true to return search results containing all known users, even if that
|
||||
manually trigger a rebuild via the API following the instructions
|
||||
[for running background updates](../administration/admin_api/background_updates.md#run),
|
||||
set to true to return search results containing all known users, even if that
|
||||
user does not share a room with the requester.
|
||||
* `prefer_local_users`: Defines whether to prefer local users in search query results.
|
||||
If set to true, local users are more likely to appear above remote users when searching the
|
||||
|
|
|
@ -573,11 +573,11 @@ python-versions = "*"
|
|||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "0.950"
|
||||
version = "0.981"
|
||||
description = "Optional static typing for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
|
@ -600,14 +600,14 @@ python-versions = "*"
|
|||
|
||||
[[package]]
|
||||
name = "mypy-zope"
|
||||
version = "0.3.7"
|
||||
version = "0.3.11"
|
||||
description = "Plugin for mypy to support zope interfaces"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
mypy = "0.950"
|
||||
mypy = "0.981"
|
||||
"zope.interface" = "*"
|
||||
"zope.schema" = "*"
|
||||
|
||||
|
@ -2162,37 +2162,38 @@ msgpack = [
|
|||
{file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
|
||||
]
|
||||
mypy = [
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
|
||||
{file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
|
||||
{file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
|
||||
{file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
|
||||
{file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
|
||||
{file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
|
||||
{file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
|
||||
{file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
|
||||
{file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
|
||||
{file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
|
||||
{file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
|
||||
{file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
|
||||
{file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
|
||||
{file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
|
||||
{file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
|
||||
{file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
|
||||
{file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
|
||||
{file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
|
||||
{file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
|
||||
{file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
|
||||
{file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
|
||||
{file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
|
||||
{file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
|
||||
{file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
|
||||
{file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
|
||||
{file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
|
||||
{file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
|
||||
{file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
|
||||
{file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
|
||||
{file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
|
||||
{file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
|
||||
{file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
|
||||
{file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
|
||||
{file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
|
||||
{file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
|
||||
{file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
|
||||
{file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
|
||||
{file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
|
||||
{file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
|
||||
{file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
|
||||
{file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
|
||||
{file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
|
||||
]
|
||||
mypy-extensions = [
|
||||
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
|
||||
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
|
||||
]
|
||||
mypy-zope = [
|
||||
{file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
|
||||
{file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
|
||||
{file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
|
||||
{file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
|
||||
]
|
||||
netaddr = [
|
||||
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},
|
||||
|
|
|
@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.68.0"
|
||||
version = "1.69.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
|
|
@ -88,10 +88,9 @@ def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
|
|||
|
||||
@functools.wraps(factory)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
|
||||
if "strict" not in kwargs: # type: ignore[attr-defined]
|
||||
if "strict" not in kwargs:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
if not kwargs["strict"]: # type: ignore[index]
|
||||
if not kwargs["strict"]:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
return factory(*args, **kwargs)
|
||||
|
||||
|
|
|
@ -98,9 +98,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
|||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
|
|
|
@ -43,32 +43,6 @@ class MetricsConfig(Config):
|
|||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
"""
|
||||
### `enable_legacy_metrics` (experimental)
|
||||
|
||||
**Experimental: this option may be removed or have its behaviour
|
||||
changed at any time, with no notice.**
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
"""
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Optional
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
|
@ -58,7 +58,12 @@ class FederationBase:
|
|||
|
||||
@trace
|
||||
async def _check_sigs_and_hash(
|
||||
self, room_version: RoomVersion, pdu: EventBase
|
||||
self,
|
||||
room_version: RoomVersion,
|
||||
pdu: EventBase,
|
||||
record_failure_callback: Optional[
|
||||
Callable[[EventBase, str], Awaitable[None]]
|
||||
] = None,
|
||||
) -> EventBase:
|
||||
"""Checks that event is correctly signed by the sending server.
|
||||
|
||||
|
@ -70,6 +75,11 @@ class FederationBase:
|
|||
Args:
|
||||
room_version: The room version of the PDU
|
||||
pdu: the event to be checked
|
||||
record_failure_callback: A callback to run whenever the given event
|
||||
fails signature or hash checks. This includes exceptions
|
||||
that would be normally be thrown/raised but also things like
|
||||
checking for event tampering where we just return the redacted
|
||||
event.
|
||||
|
||||
Returns:
|
||||
* the original event if the checks pass
|
||||
|
@ -80,7 +90,12 @@ class FederationBase:
|
|||
InvalidEventSignatureError if the signature check failed. Nothing
|
||||
will be logged in this case.
|
||||
"""
|
||||
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||
try:
|
||||
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||
except InvalidEventSignatureError as exc:
|
||||
if record_failure_callback:
|
||||
await record_failure_callback(pdu, str(exc))
|
||||
raise exc
|
||||
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
|
@ -116,6 +131,10 @@ class FederationBase:
|
|||
"event_id": pdu.event_id,
|
||||
}
|
||||
)
|
||||
if record_failure_callback:
|
||||
await record_failure_callback(
|
||||
pdu, "Event content has been tampered with"
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
|
|
@ -278,7 +278,7 @@ class FederationClient(FederationBase):
|
|||
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
|
||||
|
||||
# Check signatures and hash of pdus, removing any from the list that fail checks
|
||||
pdus[:] = await self._check_sigs_and_hash_and_fetch(
|
||||
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
dest, pdus, room_version=room_version
|
||||
)
|
||||
|
||||
|
@ -328,7 +328,17 @@ class FederationClient(FederationBase):
|
|||
|
||||
# Check signatures are correct.
|
||||
try:
|
||||
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
async def _record_failure_callback(
|
||||
event: EventBase, cause: str
|
||||
) -> None:
|
||||
await self.store.record_event_failed_pull_attempt(
|
||||
event.room_id, event.event_id, cause
|
||||
)
|
||||
|
||||
signed_pdu = await self._check_sigs_and_hash(
|
||||
room_version, pdu, _record_failure_callback
|
||||
)
|
||||
except InvalidEventSignatureError as e:
|
||||
errmsg = f"event id {pdu.event_id}: {e}"
|
||||
logger.warning("%s", errmsg)
|
||||
|
@ -547,24 +557,28 @@ class FederationClient(FederationBase):
|
|||
len(auth_event_map),
|
||||
)
|
||||
|
||||
valid_auth_events = await self._check_sigs_and_hash_and_fetch(
|
||||
valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, auth_event_map.values(), room_version
|
||||
)
|
||||
|
||||
valid_state_events = await self._check_sigs_and_hash_and_fetch(
|
||||
destination, state_event_map.values(), room_version
|
||||
valid_state_events = (
|
||||
await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, state_event_map.values(), room_version
|
||||
)
|
||||
)
|
||||
|
||||
return valid_state_events, valid_auth_events
|
||||
|
||||
@trace
|
||||
async def _check_sigs_and_hash_and_fetch(
|
||||
async def _check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
self,
|
||||
origin: str,
|
||||
pdus: Collection[EventBase],
|
||||
room_version: RoomVersion,
|
||||
) -> List[EventBase]:
|
||||
"""Checks the signatures and hashes of a list of events.
|
||||
"""
|
||||
Checks the signatures and hashes of a list of pulled events we got from
|
||||
federation and records any signature failures as failed pull attempts.
|
||||
|
||||
If a PDU fails its signature check then we check if we have it in
|
||||
the database, and if not then request it from the sender's server (if that
|
||||
|
@ -597,11 +611,17 @@ class FederationClient(FederationBase):
|
|||
|
||||
valid_pdus: List[EventBase] = []
|
||||
|
||||
async def _record_failure_callback(event: EventBase, cause: str) -> None:
|
||||
await self.store.record_event_failed_pull_attempt(
|
||||
event.room_id, event.event_id, cause
|
||||
)
|
||||
|
||||
async def _execute(pdu: EventBase) -> None:
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
pdu=pdu,
|
||||
origin=origin,
|
||||
room_version=room_version,
|
||||
record_failure_callback=_record_failure_callback,
|
||||
)
|
||||
|
||||
if valid_pdu:
|
||||
|
@ -618,6 +638,9 @@ class FederationClient(FederationBase):
|
|||
pdu: EventBase,
|
||||
origin: str,
|
||||
room_version: RoomVersion,
|
||||
record_failure_callback: Optional[
|
||||
Callable[[EventBase, str], Awaitable[None]]
|
||||
] = None,
|
||||
) -> Optional[EventBase]:
|
||||
"""Takes a PDU and checks its signatures and hashes.
|
||||
|
||||
|
@ -634,6 +657,11 @@ class FederationClient(FederationBase):
|
|||
origin
|
||||
pdu
|
||||
room_version
|
||||
record_failure_callback: A callback to run whenever the given event
|
||||
fails signature or hash checks. This includes exceptions
|
||||
that would be normally be thrown/raised but also things like
|
||||
checking for event tampering where we just return the redacted
|
||||
event.
|
||||
|
||||
Returns:
|
||||
The PDU (possibly redacted) if it has valid signatures and hashes.
|
||||
|
@ -641,7 +669,9 @@ class FederationClient(FederationBase):
|
|||
"""
|
||||
|
||||
try:
|
||||
return await self._check_sigs_and_hash(room_version, pdu)
|
||||
return await self._check_sigs_and_hash(
|
||||
room_version, pdu, record_failure_callback
|
||||
)
|
||||
except InvalidEventSignatureError as e:
|
||||
logger.warning(
|
||||
"Signature on retrieved event %s was invalid (%s). "
|
||||
|
@ -694,7 +724,7 @@ class FederationClient(FederationBase):
|
|||
|
||||
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
|
||||
|
||||
signed_auth = await self._check_sigs_and_hash_and_fetch(
|
||||
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, auth_chain, room_version=room_version
|
||||
)
|
||||
|
||||
|
@ -1401,7 +1431,7 @@ class FederationClient(FederationBase):
|
|||
event_from_pdu_json(e, room_version) for e in content.get("events", [])
|
||||
]
|
||||
|
||||
signed_events = await self._check_sigs_and_hash_and_fetch(
|
||||
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
|
||||
destination, events, room_version=room_version
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
|
|
|
@ -130,6 +130,9 @@ class CasHandler:
|
|||
except PartialDownloadError as pde:
|
||||
# Twisted raises this error if the connection is closed,
|
||||
# even if that's being used old-http style to signal end-of-data
|
||||
# Assertion is for mypy's benefit. Error.response is Optional[bytes],
|
||||
# but a PartialDownloadError should always have a non-None response.
|
||||
assert pde.response is not None
|
||||
body = pde.response
|
||||
except HttpResponseException as e:
|
||||
description = (
|
||||
|
|
|
@ -866,11 +866,6 @@ class FederationEventHandler:
|
|||
event.room_id, event_id, str(err)
|
||||
)
|
||||
return
|
||||
except Exception as exc:
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(exc)
|
||||
)
|
||||
raise exc
|
||||
|
||||
try:
|
||||
try:
|
||||
|
@ -913,11 +908,6 @@ class FederationEventHandler:
|
|||
logger.warning("Pulled event %s failed history check.", event_id)
|
||||
else:
|
||||
raise
|
||||
except Exception as exc:
|
||||
await self._store.record_event_failed_pull_attempt(
|
||||
event.room_id, event_id, str(exc)
|
||||
)
|
||||
raise exc
|
||||
|
||||
@trace
|
||||
async def _compute_event_context_with_maybe_missing_prevs(
|
||||
|
|
|
@ -78,6 +78,7 @@ class RelationsHandler:
|
|||
direction: str = "b",
|
||||
from_token: Optional[StreamToken] = None,
|
||||
to_token: Optional[StreamToken] = None,
|
||||
include_original_event: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Get related events of a event, ordered by topological ordering.
|
||||
|
||||
|
@ -94,6 +95,7 @@ class RelationsHandler:
|
|||
oldest first (`"f"`).
|
||||
from_token: Fetch rows from the given token, or from the start if None.
|
||||
to_token: Fetch rows up to the given token, or up to the end if None.
|
||||
include_original_event: Whether to include the parent event.
|
||||
|
||||
Returns:
|
||||
The pagination chunk.
|
||||
|
@ -138,25 +140,24 @@ class RelationsHandler:
|
|||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
now = self._clock.time_msec()
|
||||
# Do not bundle aggregations when retrieving the original event because
|
||||
# we want the content before relations are applied to it.
|
||||
original_event = self._event_serializer.serialize_event(
|
||||
event, now, bundle_aggregations=None
|
||||
)
|
||||
# The relations returned for the requested event do include their
|
||||
# bundled aggregations.
|
||||
aggregations = await self.get_bundled_aggregations(
|
||||
events, requester.user.to_string()
|
||||
)
|
||||
serialized_events = self._event_serializer.serialize_events(
|
||||
events, now, bundle_aggregations=aggregations
|
||||
)
|
||||
|
||||
return_value = {
|
||||
"chunk": serialized_events,
|
||||
"original_event": original_event,
|
||||
now = self._clock.time_msec()
|
||||
return_value: JsonDict = {
|
||||
"chunk": self._event_serializer.serialize_events(
|
||||
events, now, bundle_aggregations=aggregations
|
||||
),
|
||||
}
|
||||
if include_original_event:
|
||||
# Do not bundle aggregations when retrieving the original event because
|
||||
# we want the content before relations are applied to it.
|
||||
return_value["original_event"] = self._event_serializer.serialize_event(
|
||||
event, now, bundle_aggregations=None
|
||||
)
|
||||
|
||||
if next_token:
|
||||
return_value["next_batch"] = await next_token.to_string(self._main_store)
|
||||
|
|
|
@ -322,6 +322,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
require_consent: bool = True,
|
||||
outlier: bool = False,
|
||||
historical: bool = False,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""
|
||||
Internal membership update function to get an existing event or create
|
||||
|
@ -361,6 +362,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
historical: Indicates whether the message is being inserted
|
||||
back in time around some existing events. This is used to skip
|
||||
a few checks and mark the event as backfilled.
|
||||
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
||||
the current timestamp if set to None.
|
||||
|
||||
Returns:
|
||||
Tuple of event ID and stream ordering position
|
||||
|
@ -399,6 +402,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
"state_key": user_id,
|
||||
# For backwards compatibility:
|
||||
"membership": membership,
|
||||
"origin_server_ts": origin_server_ts,
|
||||
},
|
||||
txn_id=txn_id,
|
||||
allow_no_prev_events=allow_no_prev_events,
|
||||
|
@ -504,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
prev_event_ids: Optional[List[str]] = None,
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
depth: Optional[int] = None,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""Update a user's membership in a room.
|
||||
|
||||
|
@ -542,6 +547,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
depth: Override the depth used to order the event in the DAG.
|
||||
Should normally be set to None, which will cause the depth to be calculated
|
||||
based on the prev_events.
|
||||
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
||||
the current timestamp if set to None.
|
||||
|
||||
Returns:
|
||||
A tuple of the new event ID and stream ID.
|
||||
|
@ -597,6 +604,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
prev_event_ids=prev_event_ids,
|
||||
state_event_ids=state_event_ids,
|
||||
depth=depth,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
|
||||
return result
|
||||
|
@ -620,6 +628,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
prev_event_ids: Optional[List[str]] = None,
|
||||
state_event_ids: Optional[List[str]] = None,
|
||||
depth: Optional[int] = None,
|
||||
origin_server_ts: Optional[int] = None,
|
||||
) -> Tuple[str, int]:
|
||||
"""Helper for update_membership.
|
||||
|
||||
|
@ -660,6 +669,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
depth: Override the depth used to order the event in the DAG.
|
||||
Should normally be set to None, which will cause the depth to be calculated
|
||||
based on the prev_events.
|
||||
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
|
||||
the current timestamp if set to None.
|
||||
|
||||
Returns:
|
||||
A tuple of the new event ID and stream ID.
|
||||
|
@ -799,6 +810,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
historical=historical,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
|
||||
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
|
||||
|
@ -1044,6 +1056,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
content=content,
|
||||
require_consent=require_consent,
|
||||
outlier=outlier,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
|
||||
async def _should_perform_remote_join(
|
||||
|
@ -1164,8 +1177,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
|
||||
|
||||
# Find all local users that were in the old room and copy over each user's state
|
||||
users = await self.store.get_users_in_room(old_room_id)
|
||||
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
|
||||
local_users = await self.store.get_local_users_in_room(old_room_id)
|
||||
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
|
||||
|
||||
# Add new room to the room directory if the old room was there
|
||||
# Remove old room from the room directory
|
||||
|
|
|
@ -119,6 +119,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
|
|||
except PartialDownloadError as pde:
|
||||
# Twisted is silly
|
||||
data = pde.response
|
||||
# For mypy's benefit. A general Error.response is Optional[bytes], but
|
||||
# a PartialDownloadError.response should be bytes AFAICS.
|
||||
assert data is not None
|
||||
resp_body = json_decoder.decode(data.decode("utf-8"))
|
||||
|
||||
if "success" in resp_body:
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
import synapse.metrics
|
||||
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
|
||||
|
@ -379,7 +379,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
|||
user_id, event.content.get("displayname"), event.content.get("avatar_url")
|
||||
)
|
||||
|
||||
async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
|
||||
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
|
||||
"""Someone's just joined a room. Update `users_in_public_rooms` or
|
||||
`users_who_share_private_rooms` as appropriate.
|
||||
|
||||
|
@ -390,32 +390,44 @@ class UserDirectoryHandler(StateDeltasHandler):
|
|||
room_id
|
||||
)
|
||||
if is_public:
|
||||
await self.store.add_users_in_public_rooms(room_id, (user_id,))
|
||||
await self.store.add_users_in_public_rooms(room_id, (joining_user_id,))
|
||||
else:
|
||||
users_in_room = await self.store.get_users_in_room(room_id)
|
||||
other_users_in_room = [
|
||||
other
|
||||
for other in users_in_room
|
||||
if other != user_id
|
||||
if other != joining_user_id
|
||||
and (
|
||||
# We can't apply any special rules to remote users so
|
||||
# they're always included
|
||||
not self.is_mine_id(other)
|
||||
# Check the special rules whether the local user should be
|
||||
# included in the user directory
|
||||
or await self.store.should_include_local_user_in_dir(other)
|
||||
)
|
||||
]
|
||||
to_insert = set()
|
||||
updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set()
|
||||
|
||||
# First, if they're our user then we need to update for every user
|
||||
if self.is_mine_id(user_id):
|
||||
# First, if the joining user is our local user then we need an
|
||||
# update for every other user in the room.
|
||||
if self.is_mine_id(joining_user_id):
|
||||
for other_user_id in other_users_in_room:
|
||||
to_insert.add((user_id, other_user_id))
|
||||
updates_to_users_who_share_rooms.add(
|
||||
(joining_user_id, other_user_id)
|
||||
)
|
||||
|
||||
# Next we need to update for every local user in the room
|
||||
# Next, we need an update for every other local user in the room
|
||||
# that they now share a room with the joining user.
|
||||
for other_user_id in other_users_in_room:
|
||||
if self.is_mine_id(other_user_id):
|
||||
to_insert.add((other_user_id, user_id))
|
||||
updates_to_users_who_share_rooms.add(
|
||||
(other_user_id, joining_user_id)
|
||||
)
|
||||
|
||||
if to_insert:
|
||||
await self.store.add_users_who_share_private_room(room_id, to_insert)
|
||||
if updates_to_users_who_share_rooms:
|
||||
await self.store.add_users_who_share_private_room(
|
||||
room_id, updates_to_users_who_share_rooms
|
||||
)
|
||||
|
||||
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
|
||||
"""Called when when someone leaves a room. The user may be local or remote.
|
||||
|
|
|
@ -586,7 +586,7 @@ class LoggingContextFilter(logging.Filter):
|
|||
True to include the record in the log output.
|
||||
"""
|
||||
context = current_context()
|
||||
record.request = self._default_request # type: ignore
|
||||
record.request = self._default_request
|
||||
|
||||
# context should never be None, but if it somehow ends up being, then
|
||||
# we end up in a death spiral of infinite loops, so let's check, for
|
||||
|
@ -594,21 +594,21 @@ class LoggingContextFilter(logging.Filter):
|
|||
if context is not None:
|
||||
# Logging is interested in the request ID. Note that for backwards
|
||||
# compatibility this is stored as the "request" on the record.
|
||||
record.request = str(context) # type: ignore
|
||||
record.request = str(context)
|
||||
|
||||
# Add some data from the HTTP request.
|
||||
request = context.request
|
||||
if request is None:
|
||||
return True
|
||||
|
||||
record.ip_address = request.ip_address # type: ignore
|
||||
record.site_tag = request.site_tag # type: ignore
|
||||
record.requester = request.requester # type: ignore
|
||||
record.authenticated_entity = request.authenticated_entity # type: ignore
|
||||
record.method = request.method # type: ignore
|
||||
record.url = request.url # type: ignore
|
||||
record.protocol = request.protocol # type: ignore
|
||||
record.user_agent = request.user_agent # type: ignore
|
||||
record.ip_address = request.ip_address
|
||||
record.site_tag = request.site_tag
|
||||
record.requester = request.requester
|
||||
record.authenticated_entity = request.authenticated_entity
|
||||
record.method = request.method
|
||||
record.url = request.url
|
||||
record.protocol = request.protocol
|
||||
record.user_agent = request.user_agent
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
@ -992,9 +992,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
|
|||
# FIXME: We could update this to handle any type of function by ignoring the
|
||||
# first argument only if it's named `self` or `cls`. This isn't fool-proof
|
||||
# but handles the idiomatic cases.
|
||||
for i, arg in enumerate(args[1:], start=1): # type: ignore[index]
|
||||
for i, arg in enumerate(args[1:], start=1):
|
||||
set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
|
||||
set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) # type: ignore[index]
|
||||
set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
|
||||
set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
|
||||
yield
|
||||
|
||||
|
|
|
@ -332,6 +332,11 @@ class BulkPushRuleEvaluator:
|
|||
# Push rules say we should notify the user of this event
|
||||
actions_by_user[uid] = actions
|
||||
|
||||
# If there aren't any actions then we can skip the rest of the
|
||||
# processing.
|
||||
if not actions_by_user:
|
||||
return
|
||||
|
||||
# This is a check for the case where user joins a room without being
|
||||
# allowed to see history, and then the server receives a delayed event
|
||||
# from before the user joined, which they should not be pushed for
|
||||
|
|
|
@ -82,6 +82,11 @@ class RelationPaginationServlet(RestServlet):
|
|||
if to_token_str:
|
||||
to_token = await StreamToken.from_string(self.store, to_token_str)
|
||||
|
||||
# The unstable version of this API returns an extra field for client
|
||||
# compatibility, see https://github.com/matrix-org/synapse/issues/12930.
|
||||
assert request.path is not None
|
||||
include_original_event = request.path.startswith(b"/_matrix/client/unstable/")
|
||||
|
||||
result = await self._relations_handler.get_relations(
|
||||
requester=requester,
|
||||
event_id=parent_id,
|
||||
|
@ -92,6 +97,7 @@ class RelationPaginationServlet(RestServlet):
|
|||
direction=direction,
|
||||
from_token=from_token,
|
||||
to_token=to_token,
|
||||
include_original_event=include_original_event,
|
||||
)
|
||||
|
||||
return 200, result
|
||||
|
|
|
@ -268,15 +268,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
event_dict = {
|
||||
"type": event_type,
|
||||
"content": content,
|
||||
"room_id": room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
}
|
||||
|
||||
if state_key is not None:
|
||||
event_dict["state_key"] = state_key
|
||||
origin_server_ts = None
|
||||
if requester.app_service:
|
||||
origin_server_ts = parse_integer(request, "ts")
|
||||
|
||||
try:
|
||||
if event_type == EventTypes.Member:
|
||||
|
@ -287,8 +281,22 @@ class RoomStateEventRestServlet(TransactionRestServlet):
|
|||
room_id=room_id,
|
||||
action=membership,
|
||||
content=content,
|
||||
origin_server_ts=origin_server_ts,
|
||||
)
|
||||
else:
|
||||
event_dict: JsonDict = {
|
||||
"type": event_type,
|
||||
"content": content,
|
||||
"room_id": room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
}
|
||||
|
||||
if state_key is not None:
|
||||
event_dict["state_key"] = state_key
|
||||
|
||||
if origin_server_ts is not None:
|
||||
event_dict["origin_server_ts"] = origin_server_ts
|
||||
|
||||
(
|
||||
event,
|
||||
_,
|
||||
|
@ -333,10 +341,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
|
|||
"sender": requester.user.to_string(),
|
||||
}
|
||||
|
||||
# Twisted will have processed the args by now.
|
||||
assert request.args is not None
|
||||
if b"ts" in request.args and requester.app_service:
|
||||
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
|
||||
if requester.app_service:
|
||||
origin_server_ts = parse_integer(request, "ts")
|
||||
if origin_server_ts is not None:
|
||||
event_dict["origin_server_ts"] = origin_server_ts
|
||||
|
||||
try:
|
||||
(
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue