From 79eb6c0cdc15ccb5083368c923653862a4d2d23a Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 29 Sep 2023 12:19:38 +0100 Subject: [PATCH 001/142] Support rendering some media downloads as inline (#15988) Use an `inline` Content-Disposition header when the media is "safe" to display inline (some known text, image, video, audio formats). --- changelog.d/15988.feature | 1 + synapse/media/_base.py | 42 +++++++++++++++++++++++++++++-- tests/media/test_base.py | 29 ++++++++++++++++++++- tests/media/test_media_storage.py | 40 ++++++++++++++++++++++++++--- 4 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15988.feature diff --git a/changelog.d/15988.feature b/changelog.d/15988.feature new file mode 100644 index 0000000000..dee8fa597f --- /dev/null +++ b/changelog.d/15988.feature @@ -0,0 +1 @@ +Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. \ No newline at end of file diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 20cb8b9010..80c448de2b 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -50,6 +50,39 @@ TEXT_CONTENT_TYPES = [ "text/xml", ] +# A list of all content types that are "safe" to be rendered inline in a browser. +INLINE_CONTENT_TYPES = [ + "text/css", + "text/plain", + "text/csv", + "application/json", + "application/ld+json", + # We allow some media files deemed as safe, which comes from the matrix-react-sdk. + # https://github.com/matrix-org/matrix-react-sdk/blob/a70fcfd0bcf7f8c85986da18001ea11597989a7c/src/utils/blobs.ts#L51 + # SVGs are *intentionally* omitted. + "image/jpeg", + "image/gif", + "image/png", + "image/apng", + "image/webp", + "image/avif", + "video/mp4", + "video/webm", + "video/ogg", + "video/quicktime", + "audio/mp4", + "audio/webm", + "audio/aac", + "audio/mpeg", + "audio/ogg", + "audio/wave", + "audio/wav", + "audio/x-wav", + "audio/x-pn-wav", + "audio/flac", + "audio/x-flac", +] + def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: """Parses the server name, media ID and optional file name from the request URI @@ -153,8 +186,13 @@ def add_file_headers( request.setHeader(b"Content-Type", content_type.encode("UTF-8")) - # Use a Content-Disposition of attachment to force download of media. - disposition = "attachment" + # A strict subset of content types is allowed to be inlined so that they may + # be viewed directly in a browser. Other file types are forced to be downloads. + if media_type.lower() in INLINE_CONTENT_TYPES: + disposition = "inline" + else: + disposition = "attachment" + if upload_name: # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. # diff --git a/tests/media/test_base.py b/tests/media/test_base.py index 4728c80969..119d7ba66f 100644 --- a/tests/media/test_base.py +++ b/tests/media/test_base.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.media._base import get_filename_from_headers +from unittest.mock import Mock + +from synapse.media._base import add_file_headers, get_filename_from_headers from tests import unittest @@ -36,3 +38,28 @@ class GetFileNameFromHeadersTests(unittest.TestCase): expected, f"expected output for {hdr!r} to be {expected} but was {res}", ) + + +class AddFileHeadersTests(unittest.TestCase): + TEST_CASES = { + "text/plain": b"inline; filename=file.name", + "text/csv": b"inline; filename=file.name", + "image/png": b"inline; filename=file.name", + "text/html": b"attachment; filename=file.name", + "any/thing": b"attachment; filename=file.name", + } + + def test_content_disposition(self) -> None: + for media_type, expected in self.TEST_CASES.items(): + request = Mock() + add_file_headers(request, media_type, 0, "file.name") + request.setHeader.assert_any_call(b"Content-Disposition", expected) + + def test_no_filename(self) -> None: + request = Mock() + add_file_headers(request, "text/plain", 0, None) + request.setHeader.assert_any_call(b"Content-Disposition", b"inline") + + request.reset_mock() + add_file_headers(request, "text/html", 0, None) + request.setHeader.assert_any_call(b"Content-Disposition", b"attachment") diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index ea0051dde4..04fc7bdcef 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -129,6 +129,8 @@ class _TestImage: a 404/400 is expected. unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or False if the thumbnailing should succeed or a normal 404 is expected. + is_inline: True if we expect the file to be served using an inline + Content-Disposition or False if we expect an attachment. """ data: bytes @@ -138,6 +140,7 @@ class _TestImage: expected_scaled: Optional[bytes] = None expected_found: bool = True unable_to_thumbnail: bool = False + is_inline: bool = True @parameterized_class( @@ -198,6 +201,25 @@ class _TestImage: unable_to_thumbnail=True, ), ), + # An SVG. + ( + _TestImage( + b""" + + + + +""", + b"image/svg", + b".svg", + expected_found=False, + unable_to_thumbnail=True, + is_inline=False, + ), + ), ], ) class MediaRepoTests(unittest.HomeserverTestCase): @@ -339,7 +361,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"attachment; filename=out" + self.test_image.extension], + [ + (b"inline" if self.test_image.is_inline else b"attachment") + + b"; filename=out" + + self.test_image.extension + ], ) def test_disposition_filenamestar_utf8escaped(self) -> None: @@ -359,7 +385,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.assertEqual( headers.getRawHeaders(b"Content-Disposition"), - [b"attachment; filename*=utf-8''" + filename + self.test_image.extension], + [ + (b"inline" if self.test_image.is_inline else b"attachment") + + b"; filename*=utf-8''" + + filename + + self.test_image.extension + ], ) def test_disposition_none(self) -> None: @@ -373,7 +404,10 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.assertEqual( headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type] ) - self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), [b"attachment"]) + self.assertEqual( + headers.getRawHeaders(b"Content-Disposition"), + [b"inline" if self.test_image.is_inline else b"attachment"], + ) def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" From 20fb08ec803c324a58e0f972935a27debaac133f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Sep 2023 14:52:48 +0300 Subject: [PATCH 002/142] Downgrade repl stream time out error to warning (#16401) This is because if a worker reaches ~100% CPU then everything starts lagging and we hit the log line a lot. When at error we invoke sentry and that has a lot of overhead, which then puts even more pressure on the worker. --- changelog.d/16401.misc | 1 + synapse/replication/tcp/client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16401.misc diff --git a/changelog.d/16401.misc b/changelog.d/16401.misc new file mode 100644 index 0000000000..86d2749a08 --- /dev/null +++ b/changelog.d/16401.misc @@ -0,0 +1 @@ +Downgrade replication stream time out error log lines to warning. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1c7946522a..f4f2b29e96 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -339,7 +339,7 @@ class ReplicationDataHandler: try: await make_deferred_yieldable(deferred) except defer.TimeoutError: - logger.error( + logger.warning( "Timed out waiting for repl stream %r to reach %s (%s)" "; currently at: %s", stream_name, From 451c08d868ea6431c367d72f6bbbc1ced41469d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:33:59 +0100 Subject: [PATCH 003/142] Bump regex from 1.9.5 to 1.9.6 (#16408) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea9aa18a5c..084b8b91c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -291,9 +291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.5" +version = "1.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" dependencies = [ "aho-corasick", "memchr", @@ -303,9 +303,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" dependencies = [ "aho-corasick", "memchr", From 18b453488f27496195453af909c3ed9841970d4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:34:22 +0100 Subject: [PATCH 004/142] Bump psycopg2 from 2.9.7 to 2.9.8 (#16409) --- poetry.lock | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index bf229349cb..d5ab142faa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1749,22 +1749,22 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.7" +version = "2.9.8" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.6" files = [ - {file = "psycopg2-2.9.7-cp310-cp310-win32.whl", hash = "sha256:1a6a2d609bce44f78af4556bea0c62a5e7f05c23e5ea9c599e07678995609084"}, - {file = "psycopg2-2.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:b22ed9c66da2589a664e0f1ca2465c29b75aaab36fa209d4fb916025fb9119e5"}, - {file = "psycopg2-2.9.7-cp311-cp311-win32.whl", hash = "sha256:44d93a0109dfdf22fe399b419bcd7fa589d86895d3931b01fb321d74dadc68f1"}, - {file = "psycopg2-2.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:91e81a8333a0037babfc9fe6d11e997a9d4dac0f38c43074886b0d9dead94fe9"}, - {file = "psycopg2-2.9.7-cp37-cp37m-win32.whl", hash = "sha256:d1210fcf99aae6f728812d1d2240afc1dc44b9e6cba526a06fb8134f969957c2"}, - {file = "psycopg2-2.9.7-cp37-cp37m-win_amd64.whl", hash = "sha256:e9b04cbef584310a1ac0f0d55bb623ca3244c87c51187645432e342de9ae81a8"}, - {file = "psycopg2-2.9.7-cp38-cp38-win32.whl", hash = "sha256:d5c5297e2fbc8068d4255f1e606bfc9291f06f91ec31b2a0d4c536210ac5c0a2"}, - {file = "psycopg2-2.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:8275abf628c6dc7ec834ea63f6f3846bf33518907a2b9b693d41fd063767a866"}, - {file = "psycopg2-2.9.7-cp39-cp39-win32.whl", hash = "sha256:c7949770cafbd2f12cecc97dea410c514368908a103acf519f2a346134caa4d5"}, - {file = "psycopg2-2.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:b6bd7d9d3a7a63faae6edf365f0ed0e9b0a1aaf1da3ca146e6b043fb3eb5d723"}, - {file = "psycopg2-2.9.7.tar.gz", hash = "sha256:f00cc35bd7119f1fed17b85bd1007855194dde2cbd8de01ab8ebb17487440ad8"}, + {file = "psycopg2-2.9.8-cp310-cp310-win32.whl", hash = "sha256:2f8594f92bbb5d8b59ffec04e2686c416401e2d4297de1193f8e75235937e71d"}, + {file = "psycopg2-2.9.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9ecbf504c4eaff90139d5c9b95d47275f2b2651e14eba56392b4041fbf4c2b3"}, + {file = "psycopg2-2.9.8-cp311-cp311-win32.whl", hash = "sha256:65f81e72136d8b9ac8abf5206938d60f50da424149a43b6073f1546063c0565e"}, + {file = "psycopg2-2.9.8-cp311-cp311-win_amd64.whl", hash = "sha256:f7e62095d749359b7854143843f27edd7dccfcd3e1d833b880562aa5702d92b0"}, + {file = "psycopg2-2.9.8-cp37-cp37m-win32.whl", hash = "sha256:81b21424023a290a40884c7f8b0093ba6465b59bd785c18f757e76945f65594c"}, + {file = "psycopg2-2.9.8-cp37-cp37m-win_amd64.whl", hash = "sha256:67c2f32f3aba79afb15799575e77ee2db6b46b8acf943c21d34d02d4e1041d50"}, + {file = "psycopg2-2.9.8-cp38-cp38-win32.whl", hash = "sha256:287a64ef168ef7fb9f382964705ff664b342bfff47e7242bf0a04ef203269dd5"}, + {file = "psycopg2-2.9.8-cp38-cp38-win_amd64.whl", hash = "sha256:dcde3cad4920e29e74bf4e76c072649764914facb2069e6b7fa1ddbebcd49e9f"}, + {file = "psycopg2-2.9.8-cp39-cp39-win32.whl", hash = "sha256:d4ad050ea50a16731d219c3a85e8f2debf49415a070f0b8331ccc96c81700d9b"}, + {file = "psycopg2-2.9.8-cp39-cp39-win_amd64.whl", hash = "sha256:d39bb3959788b2c9d7bf5ff762e29f436172b241cd7b47529baac77746fd7918"}, + {file = "psycopg2-2.9.8.tar.gz", hash = "sha256:3da6488042a53b50933244085f3f91803f1b7271f970f3e5536efa69314f6a49"}, ] [[package]] @@ -2170,6 +2170,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2177,8 +2178,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2195,6 +2203,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2202,6 +2211,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, From 36c8b66403f0a59a07a76054ae8c9f00f831f579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:11 +0100 Subject: [PATCH 005/142] Bump pydantic from 2.3.0 to 2.4.2 (#16410) --- poetry.lock | 222 ++++++++++++++++++++++++++-------------------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/poetry.lock b/poetry.lock index d5ab142faa..83e8a71ed1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1832,18 +1832,18 @@ files = [ [[package]] name = "pydantic" -version = "2.3.0" +version = "2.4.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, + {file = "pydantic-2.4.2-py3-none-any.whl", hash = "sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1"}, + {file = "pydantic-2.4.2.tar.gz", hash = "sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" +pydantic-core = "2.10.1" typing-extensions = ">=4.6.1" [package.extras] @@ -1851,117 +1851,117 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.6.3" +version = "2.10.1" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, + {file = "pydantic_core-2.10.1-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63"}, + {file = "pydantic_core-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e"}, + {file = "pydantic_core-2.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6"}, + {file = "pydantic_core-2.10.1-cp310-none-win32.whl", hash = "sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b"}, + {file = "pydantic_core-2.10.1-cp310-none-win_amd64.whl", hash = "sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0"}, + {file = "pydantic_core-2.10.1-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea"}, + {file = "pydantic_core-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8"}, + {file = "pydantic_core-2.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4"}, + {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607"}, + {file = "pydantic_core-2.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f"}, + {file = "pydantic_core-2.10.1-cp311-none-win32.whl", hash = "sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6"}, + {file = "pydantic_core-2.10.1-cp311-none-win_amd64.whl", hash = "sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27"}, + {file = "pydantic_core-2.10.1-cp311-none-win_arm64.whl", hash = "sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325"}, + {file = "pydantic_core-2.10.1-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921"}, + {file = "pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901"}, + {file = "pydantic_core-2.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d"}, + {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f"}, + {file = "pydantic_core-2.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c"}, + {file = "pydantic_core-2.10.1-cp312-none-win32.whl", hash = "sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f"}, + {file = "pydantic_core-2.10.1-cp312-none-win_amd64.whl", hash = "sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430"}, + {file = "pydantic_core-2.10.1-cp312-none-win_arm64.whl", hash = "sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15"}, + {file = "pydantic_core-2.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f"}, + {file = "pydantic_core-2.10.1-cp37-none-win32.whl", hash = "sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c"}, + {file = "pydantic_core-2.10.1-cp37-none-win_amd64.whl", hash = "sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e"}, + {file = "pydantic_core-2.10.1-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc"}, + {file = "pydantic_core-2.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302"}, + {file = "pydantic_core-2.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e"}, + {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561"}, + {file = "pydantic_core-2.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de"}, + {file = "pydantic_core-2.10.1-cp38-none-win32.whl", hash = "sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee"}, + {file = "pydantic_core-2.10.1-cp38-none-win_amd64.whl", hash = "sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e"}, + {file = "pydantic_core-2.10.1-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970"}, + {file = "pydantic_core-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a"}, + {file = "pydantic_core-2.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429"}, + {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7"}, + {file = "pydantic_core-2.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595"}, + {file = "pydantic_core-2.10.1-cp39-none-win32.whl", hash = "sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a"}, + {file = "pydantic_core-2.10.1-cp39-none-win_amd64.whl", hash = "sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357"}, + {file = "pydantic_core-2.10.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2"}, + {file = "pydantic_core-2.10.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132"}, + {file = "pydantic_core-2.10.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7"}, + {file = "pydantic_core-2.10.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776"}, + {file = "pydantic_core-2.10.1.tar.gz", hash = "sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82"}, ] [package.dependencies] From 0a59372d9d57f92536d0962d0399b21356ede906 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:30 +0100 Subject: [PATCH 006/142] Bump types-netaddr from 0.8.0.9 to 0.9.0.1 (#16411) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 83e8a71ed1..f525bc874b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3070,13 +3070,13 @@ files = [ [[package]] name = "types-netaddr" -version = "0.8.0.9" +version = "0.9.0.1" description = "Typing stubs for netaddr" optional = false python-versions = "*" files = [ - {file = "types-netaddr-0.8.0.9.tar.gz", hash = "sha256:68900c267fd31627c1721c5c52b32a257657ac2777457dca49b6b096ba2faf74"}, - {file = "types_netaddr-0.8.0.9-py3-none-any.whl", hash = "sha256:63e871f064cd59473cec1177f372526f0fa3d565050247d5305bdc325be5c3f6"}, + {file = "types-netaddr-0.9.0.1.tar.gz", hash = "sha256:e04638435abad3e3b13a4a6b1b07f36619a47597fd5c10f330474196c058dfb3"}, + {file = "types_netaddr-0.9.0.1-py3-none-any.whl", hash = "sha256:81b98c959d14de96eb53507ac606e8876c91413d273554a59fd42b34e3811fe0"}, ] [[package]] From 891f42f8c84e1adbb51d15cb82673f7ff245bf38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:45 +0100 Subject: [PATCH 007/142] Bump msgpack from 1.0.6 to 1.0.7 (#16412) --- poetry.lock | 114 ++++++++++++++++++++++++++-------------------------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/poetry.lock b/poetry.lock index f525bc874b..9de5c49bf7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1390,67 +1390,67 @@ files = [ [[package]] name = "msgpack" -version = "1.0.6" +version = "1.0.7" description = "MessagePack serializer" optional = false python-versions = ">=3.8" files = [ - {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f4321692e7f299277e55f322329b2c972d93bb612d85f3fda8741bec5c6285ce"}, - {file = "msgpack-1.0.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f0e36a5fa7a182cde391a128a64f437657d2b9371dfa42eda3436245adccbf5"}, - {file = "msgpack-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5c8dd9a386a66e50bd7fa22b7a49fb8ead2b3574d6bd69eb1caced6caea0803"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f85200ea102276afdd3749ca94747f057bbb868d1c52921ee2446730b508d0f"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a006c300e82402c0c8f1ded11352a3ba2a61b87e7abb3054c845af2ca8d553c"}, - {file = "msgpack-1.0.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bbf47ea5a6ff20c23426106e81863cdbb5402de1825493026ce615039cc99d"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:04450e4b5e1e662e7c86b6aafb7c230af9334fd0becf5e6b80459a507884241c"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b06a5095a79384760625b5de3f83f40b3053a385fb893be8a106fbbd84c14980"}, - {file = "msgpack-1.0.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3910211b0ab20be3a38e0bb944ed45bd4265d8d9f11a3d1674b95b298e08dd5c"}, - {file = "msgpack-1.0.6-cp310-cp310-win32.whl", hash = "sha256:1dc67b40fe81217b308ab12651adba05e7300b3a2ccf84d6b35a878e308dd8d4"}, - {file = "msgpack-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:885de1ed5ea01c1bfe0a34c901152a264c3c1f8f1d382042b92ea354bd14bb0e"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099c3d8a027367e1a6fc55d15336f04ff65c60c4f737b5739f7db4525c65fe9e"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b88dc97ba86c96b964c3745a445d9a65f76fe21955a953064fe04adb63e9367"}, - {file = "msgpack-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00ce5f827d4f26fc094043e6f08b6069c1b148efa2631c47615ae14fb6cafc89"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd6af61388be65a8701f5787362cb54adae20007e0cc67ca9221a4b95115583b"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:652e4b7497825b0af6259e2c54700e6dc33d2fc4ed92b8839435090d4c9cc911"}, - {file = "msgpack-1.0.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b08676a17e3f791daad34d5fcb18479e9c85e7200d5a17cbe8de798643a7e37"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:229ccb6713c8b941eaa5cf13dc7478eba117f21513b5893c35e44483e2f0c9c8"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:95ade0bd4cf69e04e8b8f8ec2d197d9c9c4a9b6902e048dc7456bf6d82e12a80"}, - {file = "msgpack-1.0.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b16344032a27b2ccfd341f89dadf3e4ef6407d91e4b93563c14644a8abb3ad7"}, - {file = "msgpack-1.0.6-cp311-cp311-win32.whl", hash = "sha256:55bb4a1bf94e39447bc08238a2fb8a767460388a8192f67c103442eb36920887"}, - {file = "msgpack-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:ae97504958d0bc58c1152045c170815d5c4f8af906561ce044b6358b43d0c97e"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ecf431786019a7bfedc28281531d706627f603e3691d64eccdbce3ecd353823"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a635aecf1047255576dbb0927cbf9a7aa4a68e9d54110cc3c926652d18f144e0"}, - {file = "msgpack-1.0.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:102cfb54eaefa73e8ca1e784b9352c623524185c98e057e519545131a56fb0af"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c5e05e4f5756758c58a8088aa10dc70d851c89f842b611fdccfc0581c1846bc"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68569509dd015fcdd1e6b2b3ccc8c51fd27d9a97f461ccc909270e220ee09685"}, - {file = "msgpack-1.0.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf652839d16de91fe1cfb253e0a88db9a548796939533894e07f45d4bdf90a5f"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14db7e1b7a7ed362b2f94897bf2486c899c8bb50f6e34b2db92fe534cdab306f"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:159cfec18a6e125dd4723e2b1de6f202b34b87c850fb9d509acfd054c01135e9"}, - {file = "msgpack-1.0.6-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6a01a072b2219b65a6ff74df208f20b2cac9401c60adb676ee34e53b4c651077"}, - {file = "msgpack-1.0.6-cp312-cp312-win32.whl", hash = "sha256:e36560d001d4ba469d469b02037f2dd404421fd72277d9474efe9f03f83fced5"}, - {file = "msgpack-1.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:5e7fae9ca93258a956551708cf60dc6c8145574e32ce8c8c4d894e63bcb04341"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:40b801b768f5a765e33c68f30665d3c6ee1c8623a2d2bb78e6e59f2db4e4ceb7"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:da057d3652e698b00746e47f06dbb513314f847421e857e32e1dc61c46f6c052"}, - {file = "msgpack-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f75114c05ec56566da6b55122791cf5bb53d5aada96a98c016d6231e03132f76"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61213482b5a387ead9e250e9e3cb290292feca39dc83b41c3b1b7b8ffc8d8ecb"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae6c561f11b444b258b1b4be2bdd1e1cf93cd1d80766b7e869a79db4543a8a8"}, - {file = "msgpack-1.0.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:619a63753ba9e792fe3c6c0fc2b9ee2cfbd92153dd91bee029a89a71eb2942cd"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:70843788c85ca385846a2d2f836efebe7bb2687ca0734648bf5c9dc6c55602d2"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:fb4571efe86545b772a4630fee578c213c91cbcfd20347806e47fd4e782a18fe"}, - {file = "msgpack-1.0.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bbb4448a05d261fae423d5c0b0974ad899f60825bc77eabad5a0c518e78448c2"}, - {file = "msgpack-1.0.6-cp38-cp38-win32.whl", hash = "sha256:5cd67674db3c73026e0a2c729b909780e88bd9cbc8184256f9567640a5d299a8"}, - {file = "msgpack-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:a1cf98afa7ad5e7012454ca3fde254499a13f9d92fd50cb46118118a249a1355"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d6d25b8a5c70e2334ed61a8da4c11cd9b97c6fbd980c406033f06e4463fda006"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88cdb1da7fdb121dbb3116910722f5acab4d6e8bfcacab8fafe27e2e7744dc6a"}, - {file = "msgpack-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3b5658b1f9e486a2eec4c0c688f213a90085b9cf2fec76ef08f98fdf6c62f4b9"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76820f2ece3b0a7c948bbb6a599020e29574626d23a649476def023cbb026787"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c780d992f5d734432726b92a0c87bf1857c3d85082a8dea29cbf56e44a132b3"}, - {file = "msgpack-1.0.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0ed35d6d6122d0baa9a1b59ebca4ee302139f4cfb57dab85e4c73ab793ae7ed"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32c0aff31f33033f4961abc01f78497e5e07bac02a508632aef394b384d27428"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:35ad5aed9b52217d4cea739d0ea3a492a18dd86fecb4b132668a69f27fb0363b"}, - {file = "msgpack-1.0.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47275ff73005a3e5e146e50baa2378e1730cba6e292f0222bc496a8e4c4adfc8"}, - {file = "msgpack-1.0.6-cp39-cp39-win32.whl", hash = "sha256:7baf16fd8908a025c4a8d7b699103e72d41f967e2aee5a2065432bcdbd9fd06e"}, - {file = "msgpack-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:fc97aa4b4fb928ff4d3b74da7c30b360d0cb3ede49a5a6e1fd9705f49aea1deb"}, - {file = "msgpack-1.0.6.tar.gz", hash = "sha256:25d3746da40f3c8c59c3b1d001e49fd2aa17904438f980d9a391370366df001e"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"}, + {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"}, + {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"}, + {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"}, + {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"}, + {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"}, + {file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"}, + {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"}, + {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"}, + {file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"}, + {file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"}, + {file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"}, + {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"}, + {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"}, + {file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"}, + {file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"}, + {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"}, + {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"}, + {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"}, + {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"}, + {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"}, + {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"}, + {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"}, + {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"}, + {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"}, + {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"}, + {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, ] [[package]] From d40a939ff672cb9391c348c34af34b8a2252be67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 10:35:57 +0100 Subject: [PATCH 008/142] Bump phonenumbers from 8.13.19 to 8.13.22 (#16413) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9de5c49bf7..255396033c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1617,13 +1617,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.19" +version = "8.13.22" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.19-py2.py3-none-any.whl", hash = "sha256:ba542f20f6dc83be8f127f240f9b5b7e7c1dec42aceff1879400d4dc0c781d81"}, - {file = "phonenumbers-8.13.19.tar.gz", hash = "sha256:38180247697240ccedd74dec4bfbdbc22bb108b9c5f991f270ca3e41395e6f96"}, + {file = "phonenumbers-8.13.22-py2.py3-none-any.whl", hash = "sha256:85ceeba9e67984ba98182c77e8e4c70093d38c0c6a0cb2bd392e0694ddaeb1f6"}, + {file = "phonenumbers-8.13.22.tar.gz", hash = "sha256:001664c90f59b8954766c2db85adafc8dbc96177efeb49607ca4e64a7acaf569"}, ] [[package]] From 5725712d477e41761aa89a79edd77d613c36a30a Mon Sep 17 00:00:00 2001 From: MomentQYC <62551256+MomentQYC@users.noreply.github.com> Date: Mon, 2 Oct 2023 21:07:53 +0800 Subject: [PATCH 009/142] Remove Python version from `/_synapse/admin/v1/server_version` (#16380) There's no reason to expose the full Python version over what is frequently a public API. --- changelog.d/16380.removal | 1 + docs/admin_api/version_api.md | 10 ++++++---- synapse/rest/admin/__init__.py | 6 +----- tests/rest/admin/test_admin.py | 4 +--- 4 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 changelog.d/16380.removal diff --git a/changelog.d/16380.removal b/changelog.d/16380.removal new file mode 100644 index 0000000000..6e9372134d --- /dev/null +++ b/changelog.d/16380.removal @@ -0,0 +1 @@ +Remove Python version from `/_synapse/admin/v1/server_version`. \ No newline at end of file diff --git a/docs/admin_api/version_api.md b/docs/admin_api/version_api.md index 27977de0d3..bdc37d9119 100644 --- a/docs/admin_api/version_api.md +++ b/docs/admin_api/version_api.md @@ -1,7 +1,7 @@ # Version API -This API returns the running Synapse version and the Python version -on which Synapse is being run. This is useful when a Synapse instance +This API returns the running Synapse version. +This is useful when a Synapse instance is behind a proxy that does not forward the 'Server' header (which also contains Synapse version information). @@ -15,7 +15,9 @@ It returns a JSON body like the following: ```json { - "server_version": "0.99.2rc1 (b=develop, abcdef123)", - "python_version": "3.7.8" + "server_version": "0.99.2rc1 (b=develop, abcdef123)" } ``` + +*Changed in Synapse 1.94.0:* The `python_version` key was removed from the +response body. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 7d0b4b55a0..e42dade246 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -16,7 +16,6 @@ # limitations under the License. import logging -import platform from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple @@ -107,10 +106,7 @@ class VersionServlet(RestServlet): PATTERNS = admin_patterns("/server_version$") def __init__(self, hs: "HomeServer"): - self.res = { - "server_version": SYNAPSE_VERSION, - "python_version": platform.python_version(), - } + self.res = {"server_version": SYNAPSE_VERSION} def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return HTTPStatus.OK, self.res diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 695e84357a..359d131b37 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -42,9 +42,7 @@ class VersionTestCase(unittest.HomeserverTestCase): channel = self.make_request("GET", self.url, shorthand=False) self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual( - {"server_version", "python_version"}, set(channel.json_body.keys()) - ) + self.assertEqual({"server_version"}, set(channel.json_body.keys())) class QuarantineMediaTestCase(unittest.HomeserverTestCase): From 102677638002b3ef6ae956947333ddcde80680a7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 2 Oct 2023 15:22:36 +0100 Subject: [PATCH 010/142] mypy plugin to check `@cached` return types (#14911) Co-authored-by: David Robertson Co-authored-by: Patrick Cloke Co-authored-by: Erik Johnston Assert that the return type of callables wrapped in @cached and @cachedList are cachable (aka immutable). --- changelog.d/14911.misc | 1 + scripts-dev/mypy_synapse_plugin.py | 287 +++++++++++++++--- synapse/handlers/room_list.py | 4 +- .../databases/main/event_push_actions.py | 7 +- synapse/storage/databases/main/relations.py | 12 +- synapse/storage/databases/main/roommember.py | 5 +- synapse/storage/roommember.py | 1 + synapse/util/caches/descriptors.py | 64 +++- 8 files changed, 323 insertions(+), 58 deletions(-) create mode 100644 changelog.d/14911.misc diff --git a/changelog.d/14911.misc b/changelog.d/14911.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/14911.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index a0b3854f1b..6592a4a6b7 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -16,13 +16,24 @@ can crop up, e.g the cache descriptors. """ -from typing import Callable, Optional, Type +from typing import Callable, Optional, Tuple, Type, Union +import mypy.types from mypy.erasetype import remove_instance_last_known_values -from mypy.nodes import ARG_NAMED_OPT -from mypy.plugin import MethodSigContext, Plugin +from mypy.errorcodes import ErrorCode +from mypy.nodes import ARG_NAMED_OPT, TempNode, Var +from mypy.plugin import FunctionSigContext, MethodSigContext, Plugin from mypy.typeops import bind_self -from mypy.types import CallableType, Instance, NoneType, UnionType +from mypy.types import ( + AnyType, + CallableType, + Instance, + NoneType, + TupleType, + TypeAliasType, + UninhabitedType, + UnionType, +) class SynapsePlugin(Plugin): @@ -36,9 +47,37 @@ class SynapsePlugin(Plugin): ) ): return cached_function_method_signature + + if fullname in ( + "synapse.util.caches.descriptors._CachedFunctionDescriptor.__call__", + "synapse.util.caches.descriptors._CachedListFunctionDescriptor.__call__", + ): + return check_is_cacheable_wrapper + return None +def _get_true_return_type(signature: CallableType) -> mypy.types.Type: + """ + Get the "final" return type of a callable which might return an Awaitable/Deferred. + """ + if isinstance(signature.ret_type, Instance): + # If a coroutine, unwrap the coroutine's return type. + if signature.ret_type.type.fullname == "typing.Coroutine": + return signature.ret_type.args[2] + + # If an awaitable, unwrap the awaitable's final value. + elif signature.ret_type.type.fullname == "typing.Awaitable": + return signature.ret_type.args[0] + + # If a Deferred, unwrap the Deferred's final value. + elif signature.ret_type.type.fullname == "twisted.internet.defer.Deferred": + return signature.ret_type.args[0] + + # Otherwise, return the raw value of the function. + return signature.ret_type + + def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: """Fixes the `CachedFunction.__call__` signature to be correct. @@ -47,16 +86,17 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: 1. the `self` argument needs to be marked as "bound"; 2. any `cache_context` argument should be removed; 3. an optional keyword argument `on_invalidated` should be added. + 4. Wrap the return type to always be a Deferred. """ - # First we mark this as a bound function signature. - signature = bind_self(ctx.default_signature) + # 1. Mark this as a bound function signature. + signature: CallableType = bind_self(ctx.default_signature) - # Secondly, we remove any "cache_context" args. + # 2. Remove any "cache_context" args. # # Note: We should be only doing this if `cache_context=True` is set, but if # it isn't then the code will raise an exception when its called anyway, so - # its not the end of the world. + # it's not the end of the world. context_arg_index = None for idx, name in enumerate(signature.arg_names): if name == "cache_context": @@ -72,7 +112,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.pop(context_arg_index) arg_kinds.pop(context_arg_index) - # Third, we add an optional "on_invalidate" argument. + # 3. Add an optional "on_invalidate" argument. # # This is a either # - a callable which accepts no input and returns nothing, or @@ -94,35 +134,16 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: arg_names.append("on_invalidate") arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg. - # Finally we ensure the return type is a Deferred. - if ( - isinstance(signature.ret_type, Instance) - and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred" - ): - # If it is already a Deferred, nothing to do. - ret_type = signature.ret_type - else: - ret_arg = None - if isinstance(signature.ret_type, Instance): - # If a coroutine, wrap the coroutine's return type in a Deferred. - if signature.ret_type.type.fullname == "typing.Coroutine": - ret_arg = signature.ret_type.args[2] + # 4. Ensure the return type is a Deferred. + ret_arg = _get_true_return_type(signature) - # If an awaitable, wrap the awaitable's final value in a Deferred. - elif signature.ret_type.type.fullname == "typing.Awaitable": - ret_arg = signature.ret_type.args[0] - - # Otherwise, wrap the return value in a Deferred. - if ret_arg is None: - ret_arg = signature.ret_type - - # This should be able to use ctx.api.named_generic_type, but that doesn't seem - # to find the correct symbol for anything more than 1 module deep. - # - # modules is not part of CheckerPluginInterface. The following is a combination - # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. - sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] - ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) + # This should be able to use ctx.api.named_generic_type, but that doesn't seem + # to find the correct symbol for anything more than 1 module deep. + # + # modules is not part of CheckerPluginInterface. The following is a combination + # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo. + sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined] + ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)]) signature = signature.copy_modified( arg_types=arg_types, @@ -134,6 +155,198 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: return signature +def check_is_cacheable_wrapper(ctx: MethodSigContext) -> CallableType: + """Asserts that the signature of a method returns a value which can be cached. + + Makes no changes to the provided method signature. + """ + # The true signature, this isn't being modified so this is what will be returned. + signature: CallableType = ctx.default_signature + + if not isinstance(ctx.args[0][0], TempNode): + ctx.api.note("Cached function is not a TempNode?!", ctx.context) # type: ignore[attr-defined] + return signature + + orig_sig = ctx.args[0][0].type + if not isinstance(orig_sig, CallableType): + ctx.api.fail("Cached 'function' is not a callable", ctx.context) + return signature + + check_is_cacheable(orig_sig, ctx) + + return signature + + +def check_is_cacheable( + signature: CallableType, + ctx: Union[MethodSigContext, FunctionSigContext], +) -> None: + """ + Check if a callable returns a type which can be cached. + + Args: + signature: The callable to check. + ctx: The signature context, used for error reporting. + """ + # Unwrap the true return type from the cached function. + return_type = _get_true_return_type(signature) + + verbose = ctx.api.options.verbosity >= 1 + # TODO Technically a cachedList only needs immutable values, but forcing them + # to return Mapping instead of Dict is fine. + ok, note = is_cacheable(return_type, signature, verbose) + + if ok: + message = f"function {signature.name} is @cached, returning {return_type}" + else: + message = f"function {signature.name} is @cached, but has mutable return value {return_type}" + + if note: + message += f" ({note})" + message = message.replace("builtins.", "").replace("typing.", "") + + if ok and note: + ctx.api.note(message, ctx.context) # type: ignore[attr-defined] + elif not ok: + ctx.api.fail(message, ctx.context, code=AT_CACHED_MUTABLE_RETURN) + + +# Immutable simple values. +IMMUTABLE_VALUE_TYPES = { + "builtins.bool", + "builtins.int", + "builtins.float", + "builtins.str", + "builtins.bytes", +} + +# Types defined in Synapse which are known to be immutable. +IMMUTABLE_CUSTOM_TYPES = { + "synapse.synapse_rust.acl.ServerAclEvaluator", + "synapse.synapse_rust.push.FilteredPushRules", + # This is technically not immutable, but close enough. + "signedjson.types.VerifyKey", +} + +# Immutable containers only if the values are also immutable. +IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS = { + "builtins.frozenset", + "builtins.tuple", + "typing.AbstractSet", + "typing.Sequence", + "immutabledict.immutabledict", +} + +MUTABLE_CONTAINER_TYPES = { + "builtins.set", + "builtins.list", + "builtins.dict", +} + +AT_CACHED_MUTABLE_RETURN = ErrorCode( + "synapse-@cached-mutable", + "@cached() should have an immutable return type", + "General", +) + + +def is_cacheable( + rt: mypy.types.Type, signature: CallableType, verbose: bool +) -> Tuple[bool, Optional[str]]: + """ + Check if a particular type is cachable. + + A type is cachable if it is immutable; for complex types this recurses to + check each type parameter. + + Returns: a 2-tuple (cacheable, message). + - cachable: False means the type is definitely not cacheable; + true means anything else. + - Optional message. + """ + + # This should probably be done via a TypeVisitor. Apologies to the reader! + if isinstance(rt, AnyType): + return True, ("may be mutable" if verbose else None) + + elif isinstance(rt, Instance): + if ( + rt.type.fullname in IMMUTABLE_VALUE_TYPES + or rt.type.fullname in IMMUTABLE_CUSTOM_TYPES + ): + # "Simple" types are generally immutable. + return True, None + + elif rt.type.fullname == "typing.Mapping": + # Generally mapping keys are immutable, but they only *have* to be + # hashable, which doesn't imply immutability. E.g. Mapping[K, V] + # is cachable iff K and V are cachable. + return is_cacheable(rt.args[0], signature, verbose) and is_cacheable( + rt.args[1], signature, verbose + ) + + elif rt.type.fullname in IMMUTABLE_CONTAINER_TYPES_REQUIRING_IMMUTABLE_ELEMENTS: + # E.g. Collection[T] is cachable iff T is cachable. + return is_cacheable(rt.args[0], signature, verbose) + + elif rt.type.fullname in MUTABLE_CONTAINER_TYPES: + # Mutable containers are mutable regardless of their underlying type. + return False, None + + elif "attrs" in rt.type.metadata: + # attrs classes are only cachable iff it is frozen (immutable itself) + # and all attributes are cachable. + frozen = rt.type.metadata["attrs"]["frozen"] + if frozen: + for attribute in rt.type.metadata["attrs"]["attributes"]: + attribute_name = attribute["name"] + symbol_node = rt.type.names[attribute_name].node + assert isinstance(symbol_node, Var) + assert symbol_node.type is not None + ok, note = is_cacheable(symbol_node.type, signature, verbose) + if not ok: + return False, f"non-frozen attrs property: {attribute_name}" + # All attributes were frozen. + return True, None + else: + return False, "non-frozen attrs class" + + else: + # Ensure we fail for unknown types, these generally means that the + # above code is not complete. + return ( + False, + f"Don't know how to handle {rt.type.fullname} return type instance", + ) + + elif isinstance(rt, NoneType): + # None is cachable. + return True, None + + elif isinstance(rt, (TupleType, UnionType)): + # Tuples and unions are cachable iff all their items are cachable. + for item in rt.items: + ok, note = is_cacheable(item, signature, verbose) + if not ok: + return False, note + # This discards notes but that's probably fine + return True, None + + elif isinstance(rt, TypeAliasType): + # For a type alias, check if the underlying real type is cachable. + return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose) + + elif isinstance(rt, UninhabitedType) and rt.is_noreturn: + # There is no return value, just consider it cachable. This is only used + # in tests. + return True, None + + else: + # Ensure we fail for unknown types, these generally means that the + # above code is not complete. + return False, f"Don't know how to handle {type(rt).__qualname__} return type" + + def plugin(version: str) -> Type[SynapsePlugin]: # This is the entry point of the plugin, and lets us deal with the fact # that the mypy plugin interface is *not* stable by looking at the version diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index bb0bdb8e6f..36e2db8975 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -33,7 +33,7 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) -from synapse.types import JsonDict, ThirdPartyInstanceID +from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache @@ -256,7 +256,7 @@ class RoomListHandler: cache_context: _CacheContext, with_alias: bool = True, allow_private: bool = False, - ) -> Optional[JsonDict]: + ) -> Optional[JsonMapping]: """Returns the entry for a room Args: diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index ba99e63d26..39556481ff 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -182,6 +182,7 @@ class UserPushAction(EmailPushAction): profile_tag: str +# TODO This is used as a cached value and is mutable. @attr.s(slots=True, auto_attribs=True) class NotifCounts: """ @@ -193,7 +194,7 @@ class NotifCounts: highlight_count: int = 0 -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class RoomNotifCounts: """ The per-user, per-room count of notifications. Used by sync and push. @@ -201,7 +202,7 @@ class RoomNotifCounts: main_timeline: NotifCounts # Map of thread ID to the notification counts. - threads: Dict[str, NotifCounts] + threads: Mapping[str, NotifCounts] @staticmethod def empty() -> "RoomNotifCounts": @@ -483,7 +484,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas return room_to_count - @cached(tree=True, max_entries=5000, iterable=True) + @cached(tree=True, max_entries=5000, iterable=True) # type: ignore[synapse-@cached-mutable] async def get_unread_event_push_actions_by_room_for_user( self, room_id: str, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index b67f780c10..9246b418f5 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -458,7 +458,7 @@ class RelationsWorkerStore(SQLBaseStore): ) return result is not None - @cached() + @cached() # type: ignore[synapse-@cached-mutable] async def get_references_for_event(self, event_id: str) -> List[JsonDict]: raise NotImplementedError() @@ -512,11 +512,12 @@ class RelationsWorkerStore(SQLBaseStore): "_get_references_for_events_txn", _get_references_for_events_txn ) - @cached() + @cached() # type: ignore[synapse-@cached-mutable] def get_applicable_edit(self, event_id: str) -> Optional[EventBase]: raise NotImplementedError() - @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") + # TODO: This returns a mutable object, which is generally bad. + @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_applicable_edits( self, event_ids: Collection[str] ) -> Mapping[str, Optional[EventBase]]: @@ -598,11 +599,12 @@ class RelationsWorkerStore(SQLBaseStore): for original_event_id in event_ids } - @cached() + @cached() # type: ignore[synapse-@cached-mutable] def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: raise NotImplementedError() - @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") + # TODO: This returns a mutable object, which is generally bad. + @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") # type: ignore[synapse-@cached-mutable] async def get_thread_summaries( self, event_ids: Collection[str] ) -> Mapping[str, Optional[Tuple[int, EventBase]]]: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3755773faa..e93573f315 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -275,7 +275,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): _get_users_in_room_with_profiles, ) - @cached(max_entries=100000) + @cached(max_entries=100000) # type: ignore[synapse-@cached-mutable] async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. @@ -1071,7 +1071,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) return {row["event_id"]: row["membership"] for row in rows} - @cached(max_entries=10000) + # TODO This returns a mutable object, which is generally confusing when using a cache. + @cached(max_entries=10000) # type: ignore[synapse-@cached-mutable] def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache": return _JoinedHostsCache() diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 2500381b7b..cbfb32014c 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -45,6 +45,7 @@ class ProfileInfo: display_name: Optional[str] +# TODO This is used as a cached value and is mutable. @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class MemberSummary: # A truncated list of (user_id, event_id) tuples for users of a given diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 8514a75a1c..ce736fdf75 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -36,6 +36,8 @@ from typing import ( ) from weakref import WeakValueDictionary +import attr + from twisted.internet import defer from twisted.python.failure import Failure @@ -466,6 +468,35 @@ class _CacheContext: ) +@attr.s(auto_attribs=True, slots=True, frozen=True) +class _CachedFunctionDescriptor: + """Helper for `@cached`, we name it so that we can hook into it with mypy + plugin.""" + + max_entries: int + num_args: Optional[int] + uncached_args: Optional[Collection[str]] + tree: bool + cache_context: bool + iterable: bool + prune_unread_entries: bool + name: Optional[str] + + def __call__(self, orig: F) -> CachedFunction[F]: + d = DeferredCacheDescriptor( + orig, + max_entries=self.max_entries, + num_args=self.num_args, + uncached_args=self.uncached_args, + tree=self.tree, + cache_context=self.cache_context, + iterable=self.iterable, + prune_unread_entries=self.prune_unread_entries, + name=self.name, + ) + return cast(CachedFunction[F], d) + + def cached( *, max_entries: int = 1000, @@ -476,9 +507,8 @@ def cached( iterable: bool = False, prune_unread_entries: bool = True, name: Optional[str] = None, -) -> Callable[[F], CachedFunction[F]]: - func = lambda orig: DeferredCacheDescriptor( - orig, +) -> _CachedFunctionDescriptor: + return _CachedFunctionDescriptor( max_entries=max_entries, num_args=num_args, uncached_args=uncached_args, @@ -489,7 +519,26 @@ def cached( name=name, ) - return cast(Callable[[F], CachedFunction[F]], func) + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class _CachedListFunctionDescriptor: + """Helper for `@cachedList`, we name it so that we can hook into it with mypy + plugin.""" + + cached_method_name: str + list_name: str + num_args: Optional[int] = None + name: Optional[str] = None + + def __call__(self, orig: F) -> CachedFunction[F]: + d = DeferredCacheListDescriptor( + orig, + cached_method_name=self.cached_method_name, + list_name=self.list_name, + num_args=self.num_args, + name=self.name, + ) + return cast(CachedFunction[F], d) def cachedList( @@ -498,7 +547,7 @@ def cachedList( list_name: str, num_args: Optional[int] = None, name: Optional[str] = None, -) -> Callable[[F], CachedFunction[F]]: +) -> _CachedListFunctionDescriptor: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. Used to do batch lookups for an already created cache. One of the arguments @@ -527,16 +576,13 @@ def cachedList( def batch_do_something(self, first_arg, second_args): ... """ - func = lambda orig: DeferredCacheListDescriptor( - orig, + return _CachedListFunctionDescriptor( cached_method_name=cached_method_name, list_name=list_name, num_args=num_args, name=name, ) - return cast(Callable[[F], CachedFunction[F]], func) - def _get_cache_key_builder( param_names: Sequence[str], From 127b940dc0806b8d74456d34e3f636ef1f6f1c68 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 2 Oct 2023 11:05:29 -0400 Subject: [PATCH 011/142] Clean-up old release notes (#16418) Fixes some broken formatting from the reStructuedText to Markdown conversion and fixes some typos. --- changelog.d/16418.doc | 1 + docs/changelogs/CHANGES-pre-1.0.md | 267 +++++++++++++++-------------- docs/user_directory.md | 2 +- 3 files changed, 136 insertions(+), 134 deletions(-) create mode 100644 changelog.d/16418.doc diff --git a/changelog.d/16418.doc b/changelog.d/16418.doc new file mode 100644 index 0000000000..4ec5dbb6b2 --- /dev/null +++ b/changelog.d/16418.doc @@ -0,0 +1 @@ +Improve legacy release notes. diff --git a/docs/changelogs/CHANGES-pre-1.0.md b/docs/changelogs/CHANGES-pre-1.0.md index e414dbb3b1..a08f867b67 100644 --- a/docs/changelogs/CHANGES-pre-1.0.md +++ b/docs/changelogs/CHANGES-pre-1.0.md @@ -1186,9 +1186,9 @@ Synapse 0.33.0rc1 (2018-07-18) Features -------- -- Enforce the specified API for report\_event. ([\#3316](https://github.com/matrix-org/synapse/issues/3316)) +- Enforce the specified API for `report_event`. ([\#3316](https://github.com/matrix-org/synapse/issues/3316)) - Include CPU time from database threads in request/block metrics. ([\#3496](https://github.com/matrix-org/synapse/issues/3496), [\#3501](https://github.com/matrix-org/synapse/issues/3501)) -- Add CPU metrics for \_fetch\_event\_list. ([\#3497](https://github.com/matrix-org/synapse/issues/3497)) +- Add CPU metrics for `_fetch_event_list`. ([\#3497](https://github.com/matrix-org/synapse/issues/3497)) - Optimisation to make handling incoming federation requests more efficient. ([\#3541](https://github.com/matrix-org/synapse/issues/3541)) Bugfixes @@ -1238,19 +1238,19 @@ Features - Add metrics to track appservice transactions ([\#3344](https://github.com/matrix-org/synapse/issues/3344)) - Try to log more helpful info when a sig verification fails ([\#3372](https://github.com/matrix-org/synapse/issues/3372)) - Synapse now uses the best performing JSON encoder/decoder according to your runtime (simplejson on CPython, stdlib json on PyPy). ([\#3462](https://github.com/matrix-org/synapse/issues/3462)) -- Add optional ip\_range\_whitelist param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465)) +- Add optional `ip_range_whitelist` param to AS registration files to lock AS IP access ([\#3465](https://github.com/matrix-org/synapse/issues/3465)) - Reject invalid server names in federation requests ([\#3480](https://github.com/matrix-org/synapse/issues/3480)) - Reject invalid server names in homeserver.yaml ([\#3483](https://github.com/matrix-org/synapse/issues/3483)) Bugfixes -------- -- Strip access\_token from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327)) +- Strip `access_token` from outgoing requests ([\#3327](https://github.com/matrix-org/synapse/issues/3327)) - Redact AS tokens in logs ([\#3349](https://github.com/matrix-org/synapse/issues/3349)) - Fix federation backfill from SQLite servers ([\#3355](https://github.com/matrix-org/synapse/issues/3355)) - Fix event-purge-by-ts admin API ([\#3363](https://github.com/matrix-org/synapse/issues/3363)) -- Fix event filtering in get\_missing\_events handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371)) -- Synapse is now stricter regarding accepting events which it cannot retrieve the prev\_events for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456)) +- Fix event filtering in `get_missing_events` handler ([\#3371](https://github.com/matrix-org/synapse/issues/3371)) +- Synapse is now stricter regarding accepting events which it cannot retrieve the `prev_events` for. ([\#3456](https://github.com/matrix-org/synapse/issues/3456)) - Fix bug where synapse would explode when receiving unicode in HTTP User-Agent header ([\#3470](https://github.com/matrix-org/synapse/issues/3470)) - Invalidate cache on correct thread to avoid race ([\#3473](https://github.com/matrix-org/synapse/issues/3473)) @@ -1262,7 +1262,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove was\_forgotten\_at ([\#3324](https://github.com/matrix-org/synapse/issues/3324)) +- Remove `was_forgotten_at` ([\#3324](https://github.com/matrix-org/synapse/issues/3324)) Misc ---- @@ -1285,7 +1285,7 @@ We are not aware of it being actively exploited but please upgrade asap. Bug Fixes: -- Fix event filtering in get\_missing\_events handler (PR #3371) +- Fix event filtering in `get_missing_events` handler (PR #3371) Changes in synapse v0.31.0 (2018-06-06) ======================================= @@ -1309,7 +1309,7 @@ Features: Changes: - daily user type phone home stats (PR #3264) -- Use iter\* methods for \_filter\_events\_for\_server (PR #3267) +- Use `iter*` methods for `_filter_events_for_server` (PR #3267) - Docs on consent bits (PR #3268) - Remove users from user directory on deactivate (PR #3277) - Avoid sending consent notice to guest users (PR #3288) @@ -1323,10 +1323,10 @@ Changes, python 3 migration: - Replace some more comparisons with six (PR #3243) Thanks to @NotAFile! - replace some iteritems with six (PR #3244) Thanks to @NotAFile! -- Add batch\_iter to utils (PR #3245) Thanks to @NotAFile! +- Add `batch_iter` to utils (PR #3245) Thanks to @NotAFile! - use repr, not str (PR #3246) Thanks to @NotAFile! - Misc Python3 fixes (PR #3247) Thanks to @NotAFile! -- Py3 storage/\_base.py (PR #3278) Thanks to @NotAFile! +- Py3 `storage/_base.py` (PR #3278) Thanks to @NotAFile! - more six iteritems (PR #3279) Thanks to @NotAFile! - More Misc. py3 fixes (PR #3280) Thanks to @NotAFile! - remaining isintance fixes (PR #3281) Thanks to @NotAFile! @@ -1342,7 +1342,7 @@ Bugs: Changes in synapse v0.30.0 (2018-05-24) ======================================= -\'Server Notices\' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. +"Server Notices" are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. They are used as part of communication of the server policies (see `docs/consent_tracking.md`), however the intention is that they may also find a use for features such as "Message of the day". @@ -1350,9 +1350,9 @@ This feature is specific to Synapse, but uses standard Matrix communication mech Further Server Notices/Consent Tracking Support: -- Allow overriding the server\_notices user's avatar (PR #3273) +- Allow overriding the `server_notices` user's avatar (PR #3273) - Use the localpart in the consent uri (PR #3272) -- Support for putting %(consent\_uri)s in messages (PR #3271) +- Support for putting `%(consent_uri)s` in messages (PR #3271) - Block attempts to send server notices to remote users (PR #3270) - Docs on consent bits (PR #3268) @@ -1366,7 +1366,7 @@ Server Notices/Consent Tracking Support: - Infrastructure for a server notices room (PR #3232) - Send users a server notice about consent (PR #3236) - Reject attempts to send event before privacy consent is given (PR #3257) -- Add a \'has\_consented\' template var to consent forms (PR #3262) +- Add a `has_consented` template var to consent forms (PR #3262) - Fix dependency on jinja2 (PR #3263) Features: @@ -1377,9 +1377,9 @@ Features: Changes: -- Remove unused update\_external\_syncs (PR #3233) +- Remove unused `update_external_syncs` (PR #3233) - Use stream rather depth ordering for push actions (PR #3212) -- Make purge\_history operate on tokens (PR #3221) +- Make `purge_history` operate on tokens (PR #3221) - Don't support limitless pagination (PR #3265) Bug Fixes: @@ -1421,29 +1421,29 @@ Changes - General: - nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77! - Part user from rooms on account deactivate (PR #3201) -- Make \'unexpected logging context\' into warnings (PR #3007) +- Make "unexpected logging context" into warnings (PR #3007) - Set Server header in SynapseRequest (PR #3208) - remove duplicates from groups tables (PR #3129) - Improve exception handling for background processes (PR #3138) - Add missing consumeErrors to improve exception handling (PR #3139) - reraise exceptions more carefully (PR #3142) -- Remove redundant call to preserve\_fn (PR #3143) -- Trap exceptions thrown within run\_in\_background (PR #3144) +- Remove redundant call to `preserve_fn` (PR #3143) +- Trap exceptions thrown within `run_in_background` (PR #3144) Changes - Refactors: - Refactor /context to reuse pagination storage functions (PR #3193) - Refactor recent events func to use pagination func (PR #3195) - Refactor pagination DB API to return concrete type (PR #3196) -- Refactor get\_recent\_events\_for\_room return type (PR #3198) +- Refactor `get_recent_events_for_room` return type (PR #3198) - Refactor sync APIs to reuse pagination API (PR #3199) - Remove unused code path from member change DB func (PR #3200) - Refactor request handling wrappers (PR #3203) -- transaction\_id, destination defined twice (PR #3209) Thanks to @damir-manapov! +- `transaction_id`, destination defined twice (PR #3209) Thanks to @damir-manapov! - Refactor event storage to prepare for changes in state calculations (PR #3141) - Set Server header in SynapseRequest (PR #3208) -- Use deferred.addTimeout instead of time\_bound\_deferred (PR #3127, #3178) -- Use run\_in\_background in preference to preserve\_fn (PR #3140) +- Use deferred.addTimeout instead of `time_bound_deferred` (PR #3127, #3178) +- Use `run_in_background` in preference to `preserve_fn` (PR #3140) Changes - Python 3 migration: @@ -1463,29 +1463,29 @@ Changes - Python 3 migration: Bug Fixes: -- synapse fails to start under Twisted \>= 18.4 (PR #3157) +- synapse fails to start under Twisted >= 18.4 (PR #3157) - Fix a class of logcontext leaks (PR #3170) - Fix a couple of logcontext leaks in unit tests (PR #3172) - Fix logcontext leak in media repo (PR #3174) - Escape label values in prometheus metrics (PR #3175, #3186) -- Fix \'Unhandled Error\' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot! +- Fix "Unhandled Error" logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot! - Fix logcontext leaks in rate limiter (PR #3183) -- notifications: Convert next\_token to string according to the spec (PR #3190) Thanks to @mujx! +- notifications: Convert `next_token` to string according to the spec (PR #3190) Thanks to @mujx! - nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77! -- add guard for None on purge\_history api (PR #3160) Thanks to @krombel! +- add guard for None on `purge_history` api (PR #3160) Thanks to @krombel! Changes in synapse v0.28.1 (2018-05-01) ======================================= SECURITY UPDATE -- Clamp the allowed values of event depth received over federation to be \[0, 2\^63 - 1\]. This mitigates an attack where malicious events injected with depth = 2\^63 - 1 render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using stream\_ordering rather than depth (topological\_ordering). +- Clamp the allowed values of event depth received over federation to be `[0, 2^63 - 1]`. This mitigates an attack where malicious events injected with `depth = 2^63 - 1` render rooms unusable. Depth is used to determine the cosmetic ordering of events within a room, and so the ordering of events in such a room will default to using `stream_ordering` rather than `depth` (topological ordering). This is a temporary solution to mitigate abuse in the wild, whilst a long term solution is being implemented to improve how the depth parameter is used. Full details at -- Pin Twisted to \<18.4 until we stop using the private \_OpenSSLECCurve API. +- Pin Twisted to <18.4 until we stop using the private `_OpenSSLECCurve` API. Changes in synapse v0.28.0 (2018-04-26) ======================================= @@ -1510,7 +1510,7 @@ Features: Changes: - Synapse on PyPy (PR #2760) Thanks to @Valodim! -- move handling of auto\_join\_rooms to RegisterHandler (PR #2996) Thanks to @krombel! +- move handling of `auto_join_rooms` to RegisterHandler (PR #2996) Thanks to @krombel! - Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh! - Document the behaviour of ResponseCache (PR #3059) - Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile! @@ -1524,15 +1524,15 @@ Changes: - Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh! - Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile! - Use six.itervalues in some places (PR #3106) Thanks to @NotAFile! -- Refactor store.have\_events (PR #3117) +- Refactor `store.have_events` (PR #3117) Bug Fixes: -- Return 401 for invalid access\_token on logout (PR #2938) Thanks to @dklug! +- Return 401 for invalid `access_token` on logout (PR #2938) Thanks to @dklug! - Return a 404 rather than a 500 on rejoining empty rooms (PR #3080) -- fix federation\_domain\_whitelist (PR #3099) -- Avoid creating events with huge numbers of prev\_events (PR #3113) -- Reject events which have lots of prev\_events (PR #3118) +- fix `federation_domain_whitelist` (PR #3099) +- Avoid creating events with huge numbers of `prev_events` (PR #3113) +- Reject events which have lots of `prev_events` (PR #3118) Changes in synapse v0.27.4 (2018-04-13) ======================================= @@ -1556,12 +1556,13 @@ v0.27.3-rc1 used a stale version of the develop branch so the changelog overstat Changes in synapse v0.27.3-rc1 (2018-04-09) =========================================== -Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording \'r30\' stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- +Notable changes include API support for joinability of groups. Also new metrics and phone home stats. Phone home stats include better visibility of system usage so we can tweak synpase to work better for all users rather than our own experience with matrix.org. Also, recording "r30" stat which is the measure we use to track overall growth of the Matrix ecosystem. It is defined as:- -Counts the number of native 30 day retained users, defined as:- \* Users who have created their accounts more than 30 days +Counts the number of native 30 day retained users, defined as: -: - Where last seen at most 30 days ago - - Where account creation and last\_seen are \> 30 days\" +- Users who have created their accounts more than 30 days +- Where last seen at most 30 days ago +- Where account creation and `last_seen` are > 30 days Features: @@ -1577,9 +1578,9 @@ Features: Changes: - Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live! -- Replace old style error catching with \'as\' keyword (PR #3000) Thanks to @NotAFile! -- Use .iter\* to avoid copies in StateHandler (PR #3006) -- Linearize calls to \_generate\_user\_id (PR #3029) +- Replace old style error catching with `as` keyword (PR #3000) Thanks to @NotAFile! +- Use `.iter*` to avoid copies in StateHandler (PR #3006) +- Linearize calls to `_generate_user_id` (PR #3029) - Remove last usage of ujson (PR #3030) - Use simplejson throughout (PR #3048) - Use static JSONEncoders (PR #3049) @@ -1588,13 +1589,13 @@ Changes: Bug fixes: -- Add room\_id to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte! +- Add `room_id` to the response of rooms/{roomId}/join (PR #2986) Thanks to @jplatte! - Fix replication after switch to simplejson (PR #3015) - 404 correctly on missing paths via NoResource (PR #3022) - Fix error when claiming e2e keys from offline servers (PR #3034) -- fix tests/storage/test\_user\_directory.py (PR #3042) -- use PUT instead of POST for federating groups/m.join\_policy (PR #3070) Thanks to @krombel! -- postgres port script: fix state\_groups\_pkey error (PR #3072) +- fix `tests/storage/test_user_directory.py` (PR #3042) +- use `PUT` instead of `POST` for federating `groups`/`m.join_policy` (PR #3070) Thanks to @krombel! +- postgres port script: fix `state_groups_pkey` error (PR #3072) Changes in synapse v0.27.2 (2018-03-26) ======================================= @@ -1640,7 +1641,7 @@ Features: - Add ability for ASes to override message send time (PR #2754) - Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767) -- Add purge API features, see [docs/admin\_api/purge\_history\_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943) +- Add purge API features, see [docs/admin_api/purge_history_api.rst](docs/admin_api/purge_history_api.rst) for full details (PR #2858, #2867, #2882, #2946, #2962, #2943) - Add support for whitelisting 3PIDs that users can register. (PR #2813) - Add `/room/{id}/event/{id}` API (PR #2766) - Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live! @@ -1669,8 +1670,8 @@ Bug fixes: - Fix publicised groups GET API (singular) over federation (PR #2772) - Fix user directory when using `user_directory_search_all_users` config option (PR #2803, #2831) - Fix error on `/publicRooms` when no rooms exist (PR #2827) -- Fix bug in quarantine\_media (PR #2837) -- Fix url\_previews when no Content-Type is returned from URL (PR #2845) +- Fix bug in `quarantine_media` (PR #2837) +- Fix `url_previews` when no `Content-Type` is returned from URL (PR #2845) - Fix rare race in sync API when joining room (PR #2944) - Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848) @@ -1685,27 +1686,27 @@ Changes in synapse v0.26.0-rc1 (2017-12-13) Features: - Add ability for ASes to publicise groups for their users (PR #2686) -- Add all local users to the user\_directory and optionally search them (PR #2723) +- Add all local users to the `user_directory` and optionally search them (PR #2723) - Add support for custom login types for validating users (PR #2729) Changes: - Update example Prometheus config to new format (PR #2648) Thanks to @krombel! -- Rename redact\_content option to include\_content in Push API (PR #2650) +- Rename `redact_content` option to `include_content` in Push API (PR #2650) - Declare support for r0.3.0 (PR #2677) - Improve upserts (PR #2684, #2688, #2689, #2713) - Improve documentation of workers (PR #2700) - Improve tracebacks on exceptions (PR #2705) - Allow guest access to group APIs for reading (PR #2715) -- Support for posting content in federation\_client script (PR #2716) +- Support for posting content in `federation_client` script (PR #2716) - Delete devices and pushers on logouts etc (PR #2722) Bug fixes: - Fix database port script (PR #2673) -- Fix internal server error on login with ldap\_auth\_provider (PR #2678) Thanks to @jkolo! +- Fix internal server error on login with `ldap_auth_provider` (PR #2678) Thanks to @jkolo! - Fix error on sqlite 3.7 (PR #2697) -- Fix OPTIONS on preview\_url (PR #2707) +- Fix `OPTIONS` on `preview_url` (PR #2707) - Fix error handling on dns lookup (PR #2711) - Fix wrong avatars when inviting multiple users when creating room (PR #2717) - Fix 500 when joining matrix-dev (PR #2719) @@ -1729,7 +1730,7 @@ Changes in synapse v0.25.0-rc1 (2017-11-14) Features: -- Add is\_public to groups table to allow for private groups (PR #2582) +- Add `is_public` to groups table to allow for private groups (PR #2582) - Add a route for determining who you are (PR #2668) Thanks to @turt2live! - Add more features to the password providers (PR #2608, #2610, #2620, #2622, #2623, #2624, #2626, #2628, #2629) - Add a hook for custom rest endpoints (PR #2627) @@ -1737,7 +1738,7 @@ Features: Changes: -- Ignore \ tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt! +- Ignore `` tags when generating URL preview descriptions (PR #2576) Thanks to @maximevaillancourt! - Register some /unstable endpoints in /r0 as well (PR #2579) Thanks to @krombel! - Support /keys/upload on /r0 as well as /unstable (PR #2585) - Front-end proxy: pass through auth header (PR #2586) @@ -1745,9 +1746,9 @@ Changes: - Remove refresh tokens (PR #2613) - Automatically set default displayname on register (PR #2617) - Log login requests (PR #2618) -- Always return is\_public in the /groups/:group\_id/rooms API (PR #2630) +- Always return `is_public` in the `/groups/:group_id/rooms` API (PR #2630) - Avoid no-op media deletes (PR #2637) Thanks to @spantaleev! -- Fix various embarrassing typos around user\_directory and add some doc. (PR #2643) +- Fix various embarrassing typos around `user_directory` and add some doc. (PR #2643) - Return whether a user is an admin within a group (PR #2647) - Namespace visibility options for groups (PR #2657) - Downcase UserIDs on registration (PR #2662) @@ -1760,7 +1761,7 @@ Bug fixes: - Fix UI auth when deleting devices (PR #2591) - Fix typo when checking if user is invited to group (PR #2599) - Fix the port script to drop NUL values in all tables (PR #2611) -- Fix appservices being backlogged and not receiving new events due to a bug in notify\_interested\_services (PR #2631) Thanks to @xyzz! +- Fix appservices being backlogged and not receiving new events due to a bug in `notify_interested_services` (PR #2631) Thanks to @xyzz! - Fix updating rooms avatar/display name when modified by admin (PR #2636) Thanks to @farialima! - Fix bug in state group storage (PR #2649) - Fix 500 on invalid utf-8 in request (PR #2663) @@ -1794,7 +1795,7 @@ Changes: - Ignore incoming events for rooms that we have left (PR #2490) - Allow spam checker to reject invites too (PR #2492) - Add room creation checks to spam checker (PR #2495) -- Spam checking: add the invitee to user\_may\_invite (PR #2502) +- Spam checking: add the invitee to `user_may_invite` (PR #2502) - Process events from federation for different rooms in parallel (PR #2520) - Allow error strings from spam checker (PR #2531) - Improve error handling for missing files in config (PR #2551) @@ -1805,7 +1806,7 @@ Bug fixes: - Fix incompatibility with newer versions of ujson (PR #2483) Thanks to @jeremycline! - Fix notification keywords that start/end with non-word chars (PR #2500) - Fix stack overflow and logcontexts from linearizer (PR #2532) -- Fix 500 error when fields missing from power\_levels event (PR #2552) +- Fix 500 error when fields missing from `power_levels` event (PR #2552) - Fix 500 error when we get an error handling a PDU (PR #2553) Changes in synapse v0.23.1 (2017-10-02) @@ -1813,7 +1814,7 @@ Changes in synapse v0.23.1 (2017-10-02) Changes: -- Make \'affinity\' package optional, as it is not supported on some platforms +- Make `affinity` package optional, as it is not supported on some platforms Changes in synapse v0.23.0 (2017-10-02) ======================================= @@ -1833,7 +1834,7 @@ Changes in synapse v0.23.0-rc1 (2017-09-25) Features: - Add a frontend proxy worker (PR #2344) -- Add support for event\_id\_only push format (PR #2450) +- Add support for `event_id_only` push format (PR #2450) - Add a PoC for filtering spammy events (PR #2456) - Add a config option to block all room invites (PR #2457) @@ -1897,12 +1898,12 @@ Changes: - Deduplicate sync filters (PR #2219) Thanks to @krombel! - Correct a typo in UPGRADE.rst (PR #2231) Thanks to @aaronraimist! - Add count of one time keys to sync stream (PR #2237) -- Only store event\_auth for state events (PR #2247) +- Only store `event_auth` for state events (PR #2247) - Store URL cache preview downloads separately (PR #2299) Bug fixes: -- Fix users not getting notifications when AS listened to that user\_id (PR #2216) Thanks to @slipeer! +- Fix users not getting notifications when AS listened to that `user_id` (PR #2216) Thanks to @slipeer! - Fix users without push set up not getting notifications after joining rooms (PR #2236) - Fix preview url API to trim long descriptions (PR #2243) - Fix bug where we used cached but unpersisted state group as prev group, resulting in broken state of restart (PR #2263) @@ -1935,7 +1936,7 @@ Changes: - Update username availability checker API (PR #2209, #2213) - When purging, Don't de-delta state groups we're about to delete (PR #2214) - Documentation to check synapse version (PR #2215) Thanks to @hamber-dick! -- Add an index to event\_search to speed up purge history API (PR #2218) +- Add an index to `event_search` to speed up purge history API (PR #2218) Bug fixes: @@ -2004,7 +2005,7 @@ Changes in synapse v0.20.0-rc1 (2017-03-30) Features: -- Add delete\_devices API (PR #1993) +- Add `delete_devices` API (PR #1993) - Add phone number registration/login support (PR #1994, #2055) Changes: @@ -2024,12 +2025,12 @@ Changes: Bug fixes: -- Fix bug where current\_state\_events renamed to current\_state\_ids (PR #1849) +- Fix bug where `current_state_events` renamed to `current_state_ids` (PR #1849) - Fix routing loop when fetching remote media (PR #1992) -- Fix current\_state\_events table to not lie (PR #1996) +- Fix `current_state_events` table to not lie (PR #1996) - Fix CAS login to handle PartialDownloadError (PR #1997) - Fix assertion to stop transaction queue getting wedged (PR #2010) -- Fix presence to fallback to last\_active\_ts if it beats the last sync time. Thanks @Half-Shot! (PR #2014) +- Fix presence to fallback to `last_active_ts` if it beats the last sync time. Thanks @Half-Shot! (PR #2014) - Fix bug when federation received a PDU while a room join is in progress (PR #2016) - Fix resetting state on rejected events (PR #2025) - Fix installation issues in readme. Thanks @ricco386 (PR #2037) @@ -2064,7 +2065,7 @@ Changes: Bug fixes: -- Fix synapse\_port\_db failure. Thanks to Pneumaticat! (PR #1904) +- Fix synapse_port_db failure. Thanks to Pneumaticat! (PR #1904) - Fix caching to not cache error responses (PR #1913) - Fix APIs to make kick & ban reasons work (PR #1917) - Fix bugs in the /keys/changes api (PR #1921) @@ -2099,7 +2100,7 @@ Changes in synapse v0.19.0-rc3 (2017-02-02) =========================================== - Fix email push in pusher worker (PR #1875) -- Make presence.get\_new\_events a bit faster (PR #1876) +- Make `presence.get_new_events` a bit faster (PR #1876) - Make /keys/changes a bit more performant (PR #1877) Changes in synapse v0.19.0-rc2 (2017-02-02) @@ -2122,14 +2123,14 @@ Features: Changes: - Improve IPv6 support (PR #1696). Thanks to @kyrias and @glyph! -- Log which files we saved attachments to in the media\_repository (PR #1791) +- Log which files we saved attachments to in the `media_repository` (PR #1791) - Linearize updates to membership via PUT /state/ to better handle multiple joins (PR #1787) - Limit number of entries to prefill from cache on startup (PR #1792) -- Remove full\_twisted\_stacktraces option (PR #1802) +- Remove `full_twisted_stacktraces` option (PR #1802) - Measure size of some caches by sum of the size of cached values (PR #1815) -- Measure metrics of string\_cache (PR #1821) +- Measure metrics of `string_cache` (PR #1821) - Reduce logging verbosity (PR #1822, #1823, #1824) -- Don't clobber a displayname or avatar\_url if provided by an m.room.member event (PR #1852) +- Don't clobber a displayname or `avatar_url` if provided by an m.room.member event (PR #1852) - Better handle 401/404 response for federation /send/ (PR #1866, #1871) Fixes: @@ -2142,7 +2143,7 @@ Fixes: Performance: - Don't block messages sending on bumping presence (PR #1789) -- Change device\_inbox stream index to include user (PR #1793) +- Change `device_inbox` stream index to include user (PR #1793) - Optimise state resolution (PR #1818) - Use DB cache of joined users for presence (PR #1862) - Add an index to make membership queries faster (PR #1867) @@ -2225,7 +2226,7 @@ Changes: - Enable guest access for private rooms by default (PR #653) - Limit the number of events that can be created on a given room concurrently (PR #1620) - Log the args that we have on UI auth completion (PR #1649) -- Stop generating refresh\_tokens (PR #1654) +- Stop generating `refresh_tokens` (PR #1654) - Stop putting a time caveat on access tokens (PR #1656) - Remove unspecced GET endpoints for e2e keys (PR #1694) @@ -2250,7 +2251,7 @@ Changes in synapse v0.18.5-rc1 (2016-11-24) Features: -- Implement \"event\_fields\" in filters (PR #1638) +- Implement `event_fields` in filters (PR #1638) Changes: @@ -2279,7 +2280,7 @@ Bug fixes: - Fix media repo to set CORs headers on responses (PR #1190) - Fix registration to not error on non-ascii passwords (PR #1191) -- Fix create event code to limit the number of prev\_events (PR #1615) +- Fix create event code to limit the number of `prev_events` (PR #1615) - Fix bug in transaction ID deduplication (PR #1624) Changes in synapse v0.18.3 (2016-11-08) @@ -2338,10 +2339,10 @@ Changes in synapse v0.18.2-rc1 (2016-10-17) Changes: -- Remove redundant event\_auth index (PR #1113) +- Remove redundant `event_auth` index (PR #1113) - Reduce DB hits for replication (PR #1141) - Implement pluggable password auth (PR #1155) -- Remove rate limiting from app service senders and fix get\_or\_create\_user requester, thanks to Patrik Oldsberg (PR #1157) +- Remove rate limiting from app service senders and fix `get_or_create_user` requester, thanks to Patrik Oldsberg (PR #1157) - window.postmessage for Interactive Auth fallback (PR #1159) - Use sys.executable instead of hardcoded python, thanks to Pedro Larroy (PR #1162) - Add config option for adding additional TLS fingerprints (PR #1167) @@ -2349,7 +2350,7 @@ Changes: Bug fixes: -- Fix not being allowed to set your own state\_key, thanks to Patrik Oldsberg (PR #1150) +- Fix not being allowed to set your own `state_key`, thanks to Patrik Oldsberg (PR #1150) - Fix interactive auth to return 401 from for incorrect password (PR #1160, #1166) - Fix email push notifs being dropped (PR #1169) @@ -2363,7 +2364,7 @@ Changes in synapse v0.18.1-rc1 (2016-09-30) Features: -- Add total\_room\_count\_estimate to `/publicRooms` (PR #1133) +- Add `total_room_count_estimate` to `/publicRooms` (PR #1133) Changes: @@ -2398,17 +2399,17 @@ Features: - Add `only=highlight` on `/notifications` (PR #1081) - Add server param to /publicRooms (PR #1082) - Allow clients to ask for the whole of a single state event (PR #1094) -- Add is\_direct param to /createRoom (PR #1108) +- Add `is_direct` param to /createRoom (PR #1108) - Add pagination support to publicRooms (PR #1121) - Add very basic filter API to /publicRooms (PR #1126) - Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, #1111) Changes: -- Move to storing state\_groups\_state as deltas, greatly reducing DB size (PR #1065) +- Move to storing `state_groups_state` as deltas, greatly reducing DB size (PR #1065) - Reduce amount of state pulled out of the DB during common requests (PR #1069) - Allow PDF to be rendered from media repo (PR #1071) -- Reindex state\_groups\_state after pruning (PR #1085) +- Reindex `state_groups_state` after pruning (PR #1085) - Clobber EDUs in send queue (PR #1095) - Conform better to the CAS protocol specification (PR #1100) - Limit how often we ask for keys from dead servers (PR #1114) @@ -2442,22 +2443,22 @@ Changes: - Avoid pulling the full state of a room out so often (PR #1047, #1049, #1063, #1068) - Don't notify for online to online presence transitions. (PR #1054) - Occasionally persist unpersisted presence updates (PR #1055) -- Allow application services to have an optional \'url\' (PR #1056) +- Allow application services to have an optional `url` (PR #1056) - Clean up old sent transactions from DB (PR #1059) Bug fixes: - Fix None check in backfill (PR #1043) - Fix membership changes to be idempotent (PR #1067) -- Fix bug in get\_pdu where it would sometimes return events with incorrect signature +- Fix bug in `get_pdu` where it would sometimes return events with incorrect signature Changes in synapse v0.17.1 (2016-08-24) ======================================= Changes: -- Delete old received\_transactions rows (PR #1038) -- Pass through user-supplied content in /join/\$room\_id (PR #1039) +- Delete old `received_transactions` rows (PR #1038) +- Pass through user-supplied content in `/join/$room_id` (PR #1039) Bug fixes: @@ -2478,15 +2479,15 @@ Changes: - Move default display name push rule (PR #1011, #1023) - Fix up preview URL API. Add tests. (PR #1015) - Set `Content-Security-Policy` on media repo (PR #1021) -- Make notify\_interested\_services faster (PR #1022) +- Make `notify_interested_services` faster (PR #1022) - Add usage stats to prometheus monitoring (PR #1037) Bug fixes: - Fix token login (PR #993) - Fix CAS login (PR #994, #995) -- Fix /sync to not clobber status\_msg (PR #997) -- Fix redacted state events to include prev\_content (PR #1003) +- Fix /sync to not clobber `status_msg` (PR #997) +- Fix redacted state events to include `prev_content` (PR #1003) - Fix some bugs in the auth/ldap handler (PR #1007) - Fix backfill request to limit URI length, so that remotes Don't reject the requests due to path length limits (PR #1012) - Fix AS push code to not send duplicate events (PR #1025) @@ -2527,7 +2528,7 @@ Changes in synapse v0.17.0-rc3 (2016-08-02) Changes: -- Forbid non-ASes from registering users whose names begin with \'\_\' (PR #958) +- Forbid non-ASes from registering users whose names begin with `_` (PR #958) - Add some basic admin API docs (PR #963) Bug fixes: @@ -2549,16 +2550,16 @@ This release changes the LDAP configuration format in a backwards incompatible w Features: -- Add purge\_media\_cache admin API (PR #902) +- Add `purge_media_cache` admin API (PR #902) - Add deactivate account admin API (PR #903) - Add optional pepper to password hashing (PR #907, #910 by KentShikama) - Add an admin option to shared secret registration (breaks backwards compat) (PR #909) - Add purge local room history API (PR #911, #923, #924) - Add requestToken endpoints (PR #915) - Add an /account/deactivate endpoint (PR #921) -- Add filter param to /messages. Add \'contains\_url\' to filter. (PR #922) -- Add device\_id support to /login (PR #929) -- Add device\_id support to /v2/register flow. (PR #937, #942) +- Add filter param to /messages. Add `contains_url` to filter. (PR #922) +- Add `device_id` support to /login (PR #929) +- Add `device_id` support to /v2/register flow. (PR #937, #942) - Add GET /devices endpoint (PR #939, #944) - Add GET /device/{deviceId} (PR #943) - Add update and delete APIs for devices (PR #949) @@ -2566,14 +2567,14 @@ Features: Changes: - Rewrite LDAP Authentication against ldap3 (PR #843 by mweinelt) -- Linearize some federation endpoints based on (origin, room\_id) (PR #879) +- Linearize some federation endpoints based on `(origin, room_id)` (PR #879) - Remove the legacy v0 content upload API. (PR #888) - Use similar naming we use in email notifs for push (PR #894) - Optionally include password hash in createUser endpoint (PR #905 by KentShikama) -- Use a query that postgresql optimises better for get\_events\_around (PR #906) -- Fall back to \'username\' if \'user\' is not given for appservice registration. (PR #927 by Half-Shot) +- Use a query that postgresql optimises better for `get_events_around` (PR #906) +- Fall back to '`username` if `user` is not given for appservice registration. (PR #927 by Half-Shot) - Add metrics for psutil derived memory usage (PR #936) -- Record device\_id in client\_ips (PR #938) +- Record `device_id` in `client_ips` (PR #938) - Send the correct host header when fetching keys (PR #941) - Log the hostname the reCAPTCHA was completed on (PR #946) - Make the device id on e2e key upload optional (PR #956) @@ -2586,8 +2587,8 @@ Bug fixes: - Put most recent 20 messages in email notif (PR #892) - Ensure that the guest user is in the database when upgrading accounts (PR #914) - Fix various edge cases in auth handling (PR #919) -- Fix 500 ISE when sending alias event without a state\_key (PR #925) -- Fix bug where we stored rejections in the state\_group, persist all rejections (PR #948) +- Fix 500 ISE when sending alias event without a `state_key` (PR #925) +- Fix bug where we stored rejections in the `state_group`, persist all rejections (PR #948) - Fix lack of check of if the user is banned when handling 3pid invites (PR #952) - Fix a couple of bugs in the transaction and keyring code (PR #954, #955) @@ -2656,7 +2657,7 @@ Changes: Bug fixes: -- Fix \'From\' header in email notifications (PR #843) +- Fix `From` header in email notifications (PR #843) - Fix presence where timeouts were not being fired for the first 8h after restarts (PR #842) - Fix bug where synapse sent malformed transactions to AS's when retrying transactions (Commits 310197b, 8437906) @@ -2677,22 +2678,22 @@ Features: - Add a `url_preview_ip_range_whitelist` config param (PR #760) - Add /report endpoint (PR #762) - Add basic ignore user API (PR #763) -- Add an openidish mechanism for proving that you own a given user\_id (PR #765) -- Allow clients to specify a server\_name to avoid \'No known servers\' (PR #794) -- Add secondary\_directory\_servers option to fetch room list from other servers (PR #808, #813) +- Add an openidish mechanism for proving that you own a given `user_id` (PR #765) +- Allow clients to specify a `server_name` to avoid "No known servers" (PR #794) +- Add `secondary_directory_servers` option to fetch room list from other servers (PR #808, #813) Changes: -- Report per request metrics for all of the things using request\_handler (PR #756) +- Report per request metrics for all of the things using `request_handler` (PR #756) - Correctly handle `NULL` password hashes from the database (PR #775) - Allow receipts for events we haven't seen in the db (PR #784) - Make synctl read a cache factor from config file (PR #785) - Increment badge count per missed convo, not per msg (PR #793) -- Special case m.room.third\_party\_invite event auth to match invites (PR #814) +- Special case `m.room.third_party_invite` event auth to match invites (PR #814) Bug fixes: -- Fix typo in event\_auth servlet path (PR #757) +- Fix typo in `event_auth` servlet path (PR #757) - Fix password reset (PR #758) Performance improvements: @@ -2708,7 +2709,7 @@ Performance improvements: - Add `get_users_with_read_receipts_in_room` cache (PR #809) - Use state to calculate `get_users_in_room` (PR #811) - Load push rules in storage layer so that they get cached (PR #825) -- Make `get_joined_hosts_for_room` use get\_users\_in\_room (PR #828) +- Make `get_joined_hosts_for_room` use `get_users_in_room` (PR #828) - Poke notifier on next reactor tick (PR #829) - Change CacheMetrics to be quicker (PR #830) @@ -2772,19 +2773,19 @@ Changes in synapse v0.14.0-rc1 (2016-03-14) Features: -- Add event\_id to response to state event PUT (PR #581) +- Add `event_id` to response to state event PUT (PR #581) - Allow guest users access to messages in rooms they have joined (PR #587) - Add config for what state is included in a room invite (PR #598) - Send the inviter's member event in room invite state (PR #607) - Add error codes for malformed/bad JSON in /login (PR #608) - Add support for changing the actions for default rules (PR #609) -- Add environment variable SYNAPSE\_CACHE\_FACTOR, default it to 0.1 (PR #612) +- Add environment variable `SYNAPSE_CACHE_FACTOR`, default it to 0.1 (PR #612) - Add ability for alias creators to delete aliases (PR #614) - Add profile information to invites (PR #624) Changes: -- Enforce user\_id exclusivity for AS registrations (PR #572) +- Enforce `user_id` exclusivity for AS registrations (PR #572) - Make adding push rules idempotent (PR #587) - Improve presence performance (PR #582, #586) - Change presence semantics for `last_active_ago` (PR #582, #586) @@ -2792,7 +2793,7 @@ Changes: - Add 800x600 to default list of valid thumbnail sizes (PR #616) - Always include kicks and bans in full /sync (PR #625) - Send history visibility on boundary changes (PR #626) -- Register endpoint now returns a refresh\_token (PR #637) +- Register endpoint now returns a `refresh_token` (PR #637) Bug fixes: @@ -2963,7 +2964,7 @@ Changes in synapse v0.11.0-rc1 (2015-11-11) =========================================== - Add Search API (PR #307, #324, #327, #336, #350, #359) -- Add \'archived\' state to v2 /sync API (PR #316) +- Add `archived` state to v2 /sync API (PR #316) - Add ability to reject invites (PR #317) - Add config option to disable password login (PR #322) - Add the login fallback API (PR #330) @@ -3028,7 +3029,7 @@ Changes in synapse v0.10.0-rc3 (2015-08-25) =========================================== - Add `--keys-directory` config option to specify where files such as certs and signing keys should be stored in, when using `--generate-config` or `--generate-keys`. (PR #250) -- Allow `--config-path` to specify a directory, causing synapse to use all \*.yaml files in the directory as config files. (PR #249) +- Allow `--config-path` to specify a directory, causing synapse to use all `*.yaml` files in the directory as config files. (PR #249) - Add `web_client_location` config option to specify static files to be hosted by synapse under `/_matrix/client`. (PR #245) - Add helper utility to synapse to read and parse the config files and extract the value of a given key. For example: @@ -3060,7 +3061,7 @@ General: - Batch various storage request (PR #226, #228) - Fix bug where we didn't correctly log the entity that triggered the request if the request came in via an application service (PR #230) - Fix bug where we needlessly regenerated the full list of rooms an AS is interested in. (PR #232) -- Add support for AS's to use v2\_alpha registration API (PR #210) +- Add support for AS's to use `v2_alpha` registration API (PR #210) Configuration: @@ -3207,7 +3208,7 @@ Configuration: Application services: - Reliably retry sending of events from Synapse to application services, as per [Application Services](https://github.com/matrix-org/matrix-doc/blob/0c6bd9/specification/25_application_service_api.rst#home-server---application-service-api) spec. -- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application\_services.rst](docs/application_services.rst) for more information. +- Application services can no longer register via the `/register` API, instead their configuration should be saved to a file and listed in the synapse `app_service_config_files` config option. The AS configuration file has the same format as the old `/register` request. See [docs/application_services.rst](docs/application_services.rst) for more information. Changes in synapse v0.8.1 (2015-03-18) ====================================== @@ -3289,7 +3290,7 @@ Changes in synapse 0.6.0 (2014-12-16) ===================================== - Add new API for media upload and download that supports thumbnailing. -- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the \--content-addr parameter and confusion over accessing content directly from remote homeservers. +- Replicate media uploads over multiple homeservers so media is always served to clients from their local homeserver. This obsoletes the `--content-addr` parameter and confusion over accessing content directly from remote homeservers. - Implement exponential backoff when retrying federation requests when sending to remote homeservers which are offline. - Implement typing notifications. - Fix bugs where we sent events with invalid signatures due to bugs where we incorrectly persisted events. @@ -3304,13 +3305,13 @@ Changes in synapse 0.5.4 (2014-12-03) ===================================== - Fix presence bug where some rooms did not display presence updates for remote users. -- Do not log SQL timing log lines when started with \"-v\" +- Do not log SQL timing log lines when started with `-v` - Fix potential memory leak. Changes in synapse 0.5.3c (2014-12-02) ====================================== -- Change the default value for the content\_addr option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate. +- Change the default value for the `content_addr` option to use the HTTP listener, as by default the HTTPS listener will be using a self-signed certificate. Changes in synapse 0.5.3 (2014-11-27) ===================================== @@ -3391,7 +3392,7 @@ You will also need an updated syutil and config. See UPGRADES.rst. Homeserver: - Sign federation transactions to assert strong identity over federation. -- Rename timestamp keys in PDUs and events from \'ts\' and \'hsob\_ts\' to \'origin\_server\_ts\'. +- Rename timestamp keys in PDUs and events from `ts` and `hsob_ts` to `origin_server_ts`. Changes in synapse 0.3.4 (2014-09-25) ===================================== @@ -3461,9 +3462,9 @@ See UPGRADE for information about changes to the client server API, including br Homeserver: - When a user changes their displayname or avatar the server will now update all their join states to reflect this. -- The server now adds \"age\" key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. +- The server now adds `age` key to events to indicate how old they are. This is clock independent, so at no point does any server or webclient have to assume their clock is in sync with everyone else. - Fix bug where we didn't correctly pull in missing PDUs. -- Fix bug where prev\_content key wasn't always returned. +- Fix bug where `prev_content` key wasn't always returned. - Add support for password resets. Webclient: @@ -3481,9 +3482,9 @@ Webclient: Registration API: -- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: \'type\':\'m.login.password\'. See UPGRADE for more information on this. -- The \'user\_id\' key has been renamed to \'user\' to better match the login API. -- There is an additional login type: \'m.login.email.identity\'. +- The registration API has been overhauled to function like the login API. In practice, this means registration requests must now include the following: `type`:`m.login.password`. See UPGRADE for more information on this. +- The `user_id` key has been renamed to `user` to better match the login API. +- There is an additional login type: `m.login.email.identity`. - The command client and web client have been updated to reflect these changes. Changes in synapse 0.2.3 (2014-09-12) @@ -3516,7 +3517,7 @@ Homeserver: - When the server returns state events it now also includes the previous content. - Add support for inviting people when creating a new room. - Make the homeserver inform the room via m.room.aliases when a new alias is added for a room. -- Validate m.room.power\_level events. +- Validate `m.room.power_level` events. Webclient: @@ -3559,7 +3560,7 @@ Homeserver: - Add support for kicking/banning and power levels. - Allow setting of room names and topics on creation. - Change presence to include last seen time of the user. -- Change url path prefix to /\_matrix/\... +- Change url path prefix to `/_matrix/...` - Bug fixes to presence. Webclient: diff --git a/docs/user_directory.md b/docs/user_directory.md index b33fd2bc2a..1271cfb862 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -83,7 +83,7 @@ The search term is then split into words: available, then the system's [default locale](https://unicode-org.github.io/icu/userguide/locale/#default-locales) will be used to break the search term into words. (See the [installation instructions](setup/installation.md) for how to install ICU.) -* If unavailable, then runs of ASCII characters, numbers, underscores, and hypens +* If unavailable, then runs of ASCII characters, numbers, underscores, and hyphens are considered words. The queries for PostgreSQL and SQLite are detailed below, by their overall goal From 1e67191a79a06fb4031b17c24b4621c066345182 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 3 Oct 2023 11:55:29 +0300 Subject: [PATCH 012/142] Add note to 'federation_domain_whitelist' option (#16416) --- changelog.d/16416.doc | 1 + docs/usage/configuration/config_documentation.md | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/16416.doc diff --git a/changelog.d/16416.doc b/changelog.d/16416.doc new file mode 100644 index 0000000000..be2b7d2805 --- /dev/null +++ b/changelog.d/16416.doc @@ -0,0 +1 @@ +Add note to `federation_domain_whitelist` config option to clarify its usage. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 502cd9f823..92e00c1380 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1190,6 +1190,11 @@ inbound federation traffic as early as possible, rather than relying purely on this application-layer restriction. If not specified, the default is to whitelist everything. +Note: this does not stop a server from joining rooms that servers not on the +whitelist are in. As such, this option is really only useful to establish a +"private federation", where a group of servers all whitelist each other and have +the same whitelist. + Example configuration: ```yaml federation_domain_whitelist: From cce94844523c614ad0b5c30c101618bd5d8f8a66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:57:11 +0100 Subject: [PATCH 013/142] Bump urllib3 from 1.26.15 to 1.26.17 (#16422) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 255396033c..13884e6698 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3197,17 +3197,17 @@ files = [ [[package]] name = "urllib3" -version = "1.26.15" +version = "1.26.17" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, - {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, + {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, + {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] From 8b50a9d01da2c84bb9838287519fa3e0a4e955ce Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 Oct 2023 11:50:57 +0100 Subject: [PATCH 014/142] 1.94.0rc1 --- CHANGES.md | 52 +++++++++++++++++++++++++++++++++++++++ changelog.d/14745.misc | 1 - changelog.d/14911.misc | 1 - changelog.d/15691.doc | 1 - changelog.d/15988.feature | 1 - changelog.d/16320.doc | 1 - changelog.d/16332.misc | 1 - changelog.d/16348.misc | 1 - changelog.d/16350.misc | 1 - changelog.d/16355.doc | 1 - changelog.d/16356.misc | 1 - changelog.d/16359.misc | 1 - changelog.d/16360.misc | 1 - changelog.d/16361.feature | 1 - changelog.d/16380.removal | 1 - changelog.d/16381.misc | 1 - changelog.d/16382.doc | 1 - changelog.d/16383.misc | 1 - changelog.d/16385.misc | 1 - changelog.d/16387.misc | 1 - changelog.d/16394.misc | 1 - changelog.d/16395.misc | 1 - changelog.d/16401.misc | 1 - changelog.d/16416.doc | 1 - changelog.d/16418.doc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 27 files changed, 59 insertions(+), 25 deletions(-) delete mode 100644 changelog.d/14745.misc delete mode 100644 changelog.d/14911.misc delete mode 100644 changelog.d/15691.doc delete mode 100644 changelog.d/15988.feature delete mode 100644 changelog.d/16320.doc delete mode 100644 changelog.d/16332.misc delete mode 100644 changelog.d/16348.misc delete mode 100644 changelog.d/16350.misc delete mode 100644 changelog.d/16355.doc delete mode 100644 changelog.d/16356.misc delete mode 100644 changelog.d/16359.misc delete mode 100644 changelog.d/16360.misc delete mode 100644 changelog.d/16361.feature delete mode 100644 changelog.d/16380.removal delete mode 100644 changelog.d/16381.misc delete mode 100644 changelog.d/16382.doc delete mode 100644 changelog.d/16383.misc delete mode 100644 changelog.d/16385.misc delete mode 100644 changelog.d/16387.misc delete mode 100644 changelog.d/16394.misc delete mode 100644 changelog.d/16395.misc delete mode 100644 changelog.d/16401.misc delete mode 100644 changelog.d/16416.doc delete mode 100644 changelog.d/16418.doc diff --git a/CHANGES.md b/CHANGES.md index c1ea40de20..9e73868788 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,55 @@ +# Synapse 1.94.0rc1 (2023-10-03) + +### Features + +- Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) +- Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) +- Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) +- Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) + +### Improved Documentation + +- Add developer documentation concerning gradual schema migrations with column alterations. ([\#15691](https://github.com/matrix-org/synapse/issues/15691)) +- Improve documentation of the user directory search algorithm. ([\#16320](https://github.com/matrix-org/synapse/issues/16320)) +- Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. ([\#16355](https://github.com/matrix-org/synapse/issues/16355)) +- Update documentation around message retention policies. ([\#16382](https://github.com/matrix-org/synapse/issues/16382)) +- Add note to `federation_domain_whitelist` config option to clarify its usage. ([\#16416](https://github.com/matrix-org/synapse/issues/16416)) +- Improve legacy release notes. ([\#16418](https://github.com/matrix-org/synapse/issues/16418)) + +### Deprecations and Removals + +- Remove Python version from `/_synapse/admin/v1/server_version`. ([\#16380](https://github.com/matrix-org/synapse/issues/16380)) + +### Internal Changes + +- Avoid running CI steps when the files they check have not been changed. ([\#14745](https://github.com/matrix-org/synapse/issues/14745), [\#16387](https://github.com/matrix-org/synapse/issues/16387)) +- Improve type hints. ([\#14911](https://github.com/matrix-org/synapse/issues/14911), [\#16350](https://github.com/matrix-org/synapse/issues/16350), [\#16356](https://github.com/matrix-org/synapse/issues/16356), [\#16395](https://github.com/matrix-org/synapse/issues/16395)) +- Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). ([\#16332](https://github.com/matrix-org/synapse/issues/16332)) +- Get CI to check PRs have been signed-off. ([\#16348](https://github.com/matrix-org/synapse/issues/16348)) +- Add missing licence header. ([\#16359](https://github.com/matrix-org/synapse/issues/16359)) +- Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +- Improve comments in `StateGroupBackgroundUpdateStore`. ([\#16383](https://github.com/matrix-org/synapse/issues/16383)) +- Update maturin configuration. ([\#16394](https://github.com/matrix-org/synapse/issues/16394)) +- Downgrade replication stream time out error log lines to warning. ([\#16401](https://github.com/matrix-org/synapse/issues/16401)) + +### Updates to locked dependencies + +* Bump actions/checkout from 3 to 4. ([\#16250](https://github.com/matrix-org/synapse/issues/16250)) +* Bump cryptography from 41.0.3 to 41.0.4. ([\#16362](https://github.com/matrix-org/synapse/issues/16362)) +* Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0. ([\#16374](https://github.com/matrix-org/synapse/issues/16374)) +* Bump docker/setup-buildx-action from 2 to 3. ([\#16375](https://github.com/matrix-org/synapse/issues/16375)) +* Bump gitpython from 3.1.35 to 3.1.37. ([\#16376](https://github.com/matrix-org/synapse/issues/16376)) +* Bump msgpack from 1.0.5 to 1.0.6. ([\#16377](https://github.com/matrix-org/synapse/issues/16377)) +* Bump msgpack from 1.0.6 to 1.0.7. ([\#16412](https://github.com/matrix-org/synapse/issues/16412)) +* Bump phonenumbers from 8.13.19 to 8.13.22. ([\#16413](https://github.com/matrix-org/synapse/issues/16413)) +* Bump psycopg2 from 2.9.7 to 2.9.8. ([\#16409](https://github.com/matrix-org/synapse/issues/16409)) +* Bump pydantic from 2.3.0 to 2.4.2. ([\#16410](https://github.com/matrix-org/synapse/issues/16410)) +* Bump regex from 1.9.5 to 1.9.6. ([\#16408](https://github.com/matrix-org/synapse/issues/16408)) +* Bump sentry-sdk from 1.30.0 to 1.31.0. ([\#16378](https://github.com/matrix-org/synapse/issues/16378)) +* Bump types-netaddr from 0.8.0.9 to 0.9.0.1. ([\#16411](https://github.com/matrix-org/synapse/issues/16411)) +* Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +* Bump urllib3 from 1.26.15 to 1.26.17. ([\#16422](https://github.com/matrix-org/synapse/issues/16422)) + # Synapse 1.93.0 (2023-09-26) No significant changes since 1.93.0rc1. diff --git a/changelog.d/14745.misc b/changelog.d/14745.misc deleted file mode 100644 index eae0501d6b..0000000000 --- a/changelog.d/14745.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid running CI steps when the files they check have not been changed. \ No newline at end of file diff --git a/changelog.d/14911.misc b/changelog.d/14911.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/14911.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15691.doc b/changelog.d/15691.doc deleted file mode 100644 index fe649e1027..0000000000 --- a/changelog.d/15691.doc +++ /dev/null @@ -1 +0,0 @@ -Add developer documentation concerning gradual schema migrations with column alterations. \ No newline at end of file diff --git a/changelog.d/15988.feature b/changelog.d/15988.feature deleted file mode 100644 index dee8fa597f..0000000000 --- a/changelog.d/15988.feature +++ /dev/null @@ -1 +0,0 @@ -Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. \ No newline at end of file diff --git a/changelog.d/16320.doc b/changelog.d/16320.doc deleted file mode 100644 index 53e42df56f..0000000000 --- a/changelog.d/16320.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of the user directory search algorithm. diff --git a/changelog.d/16332.misc b/changelog.d/16332.misc deleted file mode 100644 index 862d547d60..0000000000 --- a/changelog.d/16332.misc +++ /dev/null @@ -1 +0,0 @@ -Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). diff --git a/changelog.d/16348.misc b/changelog.d/16348.misc deleted file mode 100644 index 846bb048c8..0000000000 --- a/changelog.d/16348.misc +++ /dev/null @@ -1 +0,0 @@ -Get CI to check PRs have been signed-off. diff --git a/changelog.d/16350.misc b/changelog.d/16350.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16350.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16355.doc b/changelog.d/16355.doc deleted file mode 100644 index 73d29c7889..0000000000 --- a/changelog.d/16355.doc +++ /dev/null @@ -1 +0,0 @@ -Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. diff --git a/changelog.d/16356.misc b/changelog.d/16356.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16356.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16359.misc b/changelog.d/16359.misc deleted file mode 100644 index 8752085fc6..0000000000 --- a/changelog.d/16359.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing licence header. diff --git a/changelog.d/16360.misc b/changelog.d/16360.misc deleted file mode 100644 index b32d7b521e..0000000000 --- a/changelog.d/16360.misc +++ /dev/null @@ -1 +0,0 @@ -Cache server ACL checking. diff --git a/changelog.d/16361.feature b/changelog.d/16361.feature deleted file mode 100644 index 632fff789b..0000000000 --- a/changelog.d/16361.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. diff --git a/changelog.d/16380.removal b/changelog.d/16380.removal deleted file mode 100644 index 6e9372134d..0000000000 --- a/changelog.d/16380.removal +++ /dev/null @@ -1 +0,0 @@ -Remove Python version from `/_synapse/admin/v1/server_version`. \ No newline at end of file diff --git a/changelog.d/16381.misc b/changelog.d/16381.misc deleted file mode 100644 index a454651952..0000000000 --- a/changelog.d/16381.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. diff --git a/changelog.d/16382.doc b/changelog.d/16382.doc deleted file mode 100644 index 2549586310..0000000000 --- a/changelog.d/16382.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation around message retention policies. diff --git a/changelog.d/16383.misc b/changelog.d/16383.misc deleted file mode 100644 index d8d84cc184..0000000000 --- a/changelog.d/16383.misc +++ /dev/null @@ -1 +0,0 @@ -Improve comments in `StateGroupBackgroundUpdateStore`. diff --git a/changelog.d/16385.misc b/changelog.d/16385.misc deleted file mode 100644 index d439a931d6..0000000000 --- a/changelog.d/16385.misc +++ /dev/null @@ -1 +0,0 @@ -Minor performance improvement when sending presence to federated servers. diff --git a/changelog.d/16387.misc b/changelog.d/16387.misc deleted file mode 100644 index eae0501d6b..0000000000 --- a/changelog.d/16387.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid running CI steps when the files they check have not been changed. \ No newline at end of file diff --git a/changelog.d/16394.misc b/changelog.d/16394.misc deleted file mode 100644 index ee08c3e024..0000000000 --- a/changelog.d/16394.misc +++ /dev/null @@ -1 +0,0 @@ -Update maturin configuration. diff --git a/changelog.d/16395.misc b/changelog.d/16395.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16395.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16401.misc b/changelog.d/16401.misc deleted file mode 100644 index 86d2749a08..0000000000 --- a/changelog.d/16401.misc +++ /dev/null @@ -1 +0,0 @@ -Downgrade replication stream time out error log lines to warning. diff --git a/changelog.d/16416.doc b/changelog.d/16416.doc deleted file mode 100644 index be2b7d2805..0000000000 --- a/changelog.d/16416.doc +++ /dev/null @@ -1 +0,0 @@ -Add note to `federation_domain_whitelist` config option to clarify its usage. diff --git a/changelog.d/16418.doc b/changelog.d/16418.doc deleted file mode 100644 index 4ec5dbb6b2..0000000000 --- a/changelog.d/16418.doc +++ /dev/null @@ -1 +0,0 @@ -Improve legacy release notes. diff --git a/debian/changelog b/debian/changelog index 7be71019b4..78da69ebb0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.94.0~rc1) stable; urgency=medium + + * New Synapse release 1.94.0rc1. + + -- Synapse Packaging team Tue, 03 Oct 2023 11:48:18 +0100 + matrix-synapse-py3 (1.93.0) stable; urgency=medium * New Synapse release 1.93.0. diff --git a/pyproject.toml b/pyproject.toml index 5fb64479a1..b22172291a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.93.0" +version = "1.94.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From a01ee24734242cf95a29a3631d7f1192cc8bd2af Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 3 Oct 2023 13:21:45 +0100 Subject: [PATCH 015/142] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9e73868788..6c30c40858 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,8 +2,8 @@ ### Features -- Render plain, CSS, CSV, JSON and common image formats media content in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) -- Experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) +- Render plain, CSS, CSV, JSON and common image formats in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) +- Add experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) - Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) - Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) From 80ec81dcc54bdb823b95c2f870a919868de9a481 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 4 Oct 2023 18:28:40 +0300 Subject: [PATCH 016/142] Some refactors around receipts stream (#16426) --- changelog.d/16426.misc | 1 + synapse/handlers/appservice.py | 4 +- synapse/handlers/push_rules.py | 6 +- synapse/handlers/receipts.py | 25 ++++---- synapse/notifier.py | 17 +++--- synapse/push/__init__.py | 2 +- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 2 +- synapse/push/pusherpool.py | 12 +--- synapse/replication/tcp/client.py | 4 +- .../storage/databases/main/e2e_room_keys.py | 2 +- synapse/storage/databases/main/receipts.py | 6 +- synapse/streams/events.py | 15 +++-- synapse/types/__init__.py | 59 ++++++++++++++----- tests/handlers/test_appservice.py | 8 +-- tests/handlers/test_typing.py | 26 +++++--- 16 files changed, 111 insertions(+), 80 deletions(-) create mode 100644 changelog.d/16426.misc diff --git a/changelog.d/16426.misc b/changelog.d/16426.misc new file mode 100644 index 0000000000..208a007171 --- /dev/null +++ b/changelog.d/16426.misc @@ -0,0 +1 @@ +Refactor some code to simplify and better type receipts stream adjacent code. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 7de7bd3289..c200a45f3a 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -216,7 +216,7 @@ class ApplicationServicesHandler: def notify_interested_services_ephemeral( self, - stream_key: str, + stream_key: StreamKeyType, new_token: Union[int, RoomStreamToken], users: Collection[Union[str, UserID]], ) -> None: @@ -326,7 +326,7 @@ class ApplicationServicesHandler: async def _notify_interested_services_ephemeral( self, services: List[ApplicationService], - stream_key: str, + stream_key: StreamKeyType, new_token: int, users: Collection[Union[str, UserID]], ) -> None: diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 7ed88a3611..87b428ab1c 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -19,7 +19,7 @@ from synapse.api.errors import SynapseError, UnrecognizedRequestError from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.push_rule import RuleNotFoundException from synapse.synapse_rust.push import get_base_rule_ids -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, StreamKeyType, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -114,7 +114,9 @@ class PushRulesHandler: user_id: the user ID the change is for. """ stream_id = self._main_store.get_max_push_rules_stream_id() - self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) + self._notifier.on_new_event( + StreamKeyType.PUSH_RULES, stream_id, users=[user_id] + ) async def push_rules_for_user( self, user: UserID diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a7a29b758b..69ac468f75 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -130,11 +130,10 @@ class ReceiptsHandler: async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool: """Takes a list of receipts, stores them and informs the notifier.""" - min_batch_id: Optional[int] = None - max_batch_id: Optional[int] = None + receipts_persisted: List[ReadReceipt] = [] for receipt in receipts: - res = await self.store.insert_receipt( + stream_id = await self.store.insert_receipt( receipt.room_id, receipt.receipt_type, receipt.user_id, @@ -143,30 +142,26 @@ class ReceiptsHandler: receipt.data, ) - if not res: - # res will be None if this receipt is 'old' + if stream_id is None: + # stream_id will be None if this receipt is 'old' continue - stream_id, max_persisted_id = res + receipts_persisted.append(receipt) - if min_batch_id is None or stream_id < min_batch_id: - min_batch_id = stream_id - if max_batch_id is None or max_persisted_id > max_batch_id: - max_batch_id = max_persisted_id - - # Either both of these should be None or neither. - if min_batch_id is None or max_batch_id is None: + if not receipts_persisted: # no new receipts return False - affected_room_ids = list({r.room_id for r in receipts}) + max_batch_id = self.store.get_max_receipt_stream_id() + + affected_room_ids = list({r.room_id for r in receipts_persisted}) self.notifier.on_new_event( StreamKeyType.RECEIPT, max_batch_id, rooms=affected_room_ids ) # Note that the min here shouldn't be relied upon to be accurate. await self.hs.get_pusherpool().on_new_receipts( - min_batch_id, max_batch_id, affected_room_ids + {r.user_id for r in receipts_persisted} ) return True diff --git a/synapse/notifier.py b/synapse/notifier.py index fc39e5c963..99e7715896 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -126,7 +126,7 @@ class _NotifierUserStream: def notify( self, - stream_key: str, + stream_key: StreamKeyType, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ) -> None: @@ -454,7 +454,7 @@ class Notifier: def on_new_event( self, - stream_key: str, + stream_key: StreamKeyType, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[StrCollection] = None, @@ -655,30 +655,29 @@ class Notifier: events: List[Union[JsonDict, EventBase]] = [] end_token = from_token - for name, source in self.event_sources.sources.get_sources(): - keyname = "%s_key" % name - before_id = getattr(before_token, keyname) - after_id = getattr(after_token, keyname) + for keyname, source in self.event_sources.sources.get_sources(): + before_id = before_token.get_field(keyname) + after_id = after_token.get_field(keyname) if before_id == after_id: continue new_events, new_key = await source.get_new_events( user=user, - from_key=getattr(from_token, keyname), + from_key=from_token.get_field(keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) - if name == "room": + if keyname == StreamKeyType.ROOM: new_events = await filter_events_for_client( self._storage_controllers, user.to_string(), new_events, is_peeking=is_peeking, ) - elif name == "presence": + elif keyname == StreamKeyType.PRESENCE: now = self.clock.time_msec() new_events[:] = [ { diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9e3a98741a..9e5eb2a445 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -182,7 +182,7 @@ class Pusher(metaclass=abc.ABCMeta): raise NotImplementedError() @abc.abstractmethod - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: raise NotImplementedError() @abc.abstractmethod diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 1710dd51b9..cf45fd09a8 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -99,7 +99,7 @@ class EmailPusher(Pusher): pass self.timed_call = None - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: # We could wake up and cancel the timer but there tend to be quite a # lot of read receipts so it's probably less work to just let the # timer fire diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 50027680cb..725910a659 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -160,7 +160,7 @@ class HttpPusher(Pusher): if should_check_for_notifs: self._start_processing() - def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None: + def on_new_receipts(self) -> None: # Note that the min here shouldn't be relied upon to be accurate. # We could check the receipts are actually m.read receipts here, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 6517e3566f..15a2cc932f 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -292,20 +292,12 @@ class PusherPool: except Exception: logger.exception("Exception in pusher on_new_notifications") - async def on_new_receipts( - self, min_stream_id: int, max_stream_id: int, affected_room_ids: Iterable[str] - ) -> None: + async def on_new_receipts(self, users_affected: StrCollection) -> None: if not self.pushers: # nothing to do here. return try: - # Need to subtract 1 from the minimum because the lower bound here - # is not inclusive - users_affected = await self.store.get_users_sent_receipts_between( - min_stream_id - 1, max_stream_id - ) - for u in users_affected: # Don't push if the user account has expired expired = await self._account_validity_handler.is_user_expired(u) @@ -314,7 +306,7 @@ class PusherPool: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_receipts(min_stream_id, max_stream_id) + p.on_new_receipts() except Exception: logger.exception("Exception in pusher on_new_receipts") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index f4f2b29e96..d5337fe588 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -129,9 +129,7 @@ class ReplicationDataHandler: self.notifier.on_new_event( StreamKeyType.RECEIPT, token, rooms=[row.room_id for row in rows] ) - await self._pusher_pool.on_new_receipts( - token, token, {row.room_id for row in rows} - ) + await self._pusher_pool.on_new_receipts({row.user_id for row in rows}) elif stream_name == ToDeviceStream.NAME: entities = [row.entity for row in rows if row.entity.startswith("@")] if entities: diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index d01f28cc80..bc7c6a6346 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -208,7 +208,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): "message": "Set room key", "room_id": room_id, "session_id": session_id, - StreamKeyType.ROOM: room_key, + StreamKeyType.ROOM.value: room_key, } ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 0231f9407b..3bab1024ea 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -742,7 +742,7 @@ class ReceiptsWorkerStore(SQLBaseStore): event_ids: List[str], thread_id: Optional[str], data: dict, - ) -> Optional[Tuple[int, int]]: + ) -> Optional[int]: """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph @@ -804,9 +804,7 @@ class ReceiptsWorkerStore(SQLBaseStore): data, ) - max_persisted_id = self._receipts_id_gen.get_current_token() - - return stream_id, max_persisted_id + return stream_id async def _insert_graph_receipt( self, diff --git a/synapse/streams/events.py b/synapse/streams/events.py index d7084d2358..609a0978a9 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Iterator, Tuple +from typing import TYPE_CHECKING, Sequence, Tuple import attr @@ -23,7 +23,7 @@ from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource from synapse.logging.opentracing import trace from synapse.streams import EventSource -from synapse.types import StreamToken +from synapse.types import StreamKeyType, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer @@ -37,9 +37,14 @@ class _EventSourcesInner: receipt: ReceiptEventSource account_data: AccountDataEventSource - def get_sources(self) -> Iterator[Tuple[str, EventSource]]: - for attribute in attr.fields(_EventSourcesInner): - yield attribute.name, getattr(self, attribute.name) + def get_sources(self) -> Sequence[Tuple[StreamKeyType, EventSource]]: + return [ + (StreamKeyType.ROOM, self.room), + (StreamKeyType.PRESENCE, self.presence), + (StreamKeyType.TYPING, self.typing), + (StreamKeyType.RECEIPT, self.receipt), + (StreamKeyType.ACCOUNT_DATA, self.account_data), + ] class EventSources: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 76b0e3e694..406d5b1611 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -22,8 +22,8 @@ from typing import ( Any, ClassVar, Dict, - Final, List, + Literal, Mapping, Match, MutableMapping, @@ -34,6 +34,7 @@ from typing import ( Type, TypeVar, Union, + overload, ) import attr @@ -649,20 +650,20 @@ class RoomStreamToken: return "s%d" % (self.stream,) -class StreamKeyType: +class StreamKeyType(Enum): """Known stream types. A stream is a list of entities ordered by an incrementing "stream token". """ - ROOM: Final = "room_key" - PRESENCE: Final = "presence_key" - TYPING: Final = "typing_key" - RECEIPT: Final = "receipt_key" - ACCOUNT_DATA: Final = "account_data_key" - PUSH_RULES: Final = "push_rules_key" - TO_DEVICE: Final = "to_device_key" - DEVICE_LIST: Final = "device_list_key" + ROOM = "room_key" + PRESENCE = "presence_key" + TYPING = "typing_key" + RECEIPT = "receipt_key" + ACCOUNT_DATA = "account_data_key" + PUSH_RULES = "push_rules_key" + TO_DEVICE = "to_device_key" + DEVICE_LIST = "device_list_key" UN_PARTIAL_STATED_ROOMS = "un_partial_stated_rooms_key" @@ -784,7 +785,7 @@ class StreamToken: def room_stream_id(self) -> int: return self.room_key.stream - def copy_and_advance(self, key: str, new_value: Any) -> "StreamToken": + def copy_and_advance(self, key: StreamKeyType, new_value: Any) -> "StreamToken": """Advance the given key in the token to a new value if and only if the new value is after the old value. @@ -797,16 +798,44 @@ class StreamToken: return new_token new_token = self.copy_and_replace(key, new_value) - new_id = int(getattr(new_token, key)) - old_id = int(getattr(self, key)) + new_id = new_token.get_field(key) + old_id = self.get_field(key) if old_id < new_id: return new_token else: return self - def copy_and_replace(self, key: str, new_value: Any) -> "StreamToken": - return attr.evolve(self, **{key: new_value}) + def copy_and_replace(self, key: StreamKeyType, new_value: Any) -> "StreamToken": + return attr.evolve(self, **{key.value: new_value}) + + @overload + def get_field(self, key: Literal[StreamKeyType.ROOM]) -> RoomStreamToken: + ... + + @overload + def get_field( + self, + key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.DEVICE_LIST, + StreamKeyType.PRESENCE, + StreamKeyType.PUSH_RULES, + StreamKeyType.RECEIPT, + StreamKeyType.TO_DEVICE, + StreamKeyType.TYPING, + StreamKeyType.UN_PARTIAL_STATED_ROOMS, + ], + ) -> int: + ... + + @overload + def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + ... + + def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + """Returns the stream ID for the given key.""" + return getattr(self, key.value) StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0, 0) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index a7e6cdd66a..8ce6ccf529 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -31,7 +31,7 @@ from synapse.appservice import ( from synapse.handlers.appservice import ApplicationServicesHandler from synapse.rest.client import login, receipts, register, room, sendtodevice from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, StreamKeyType from synapse.util import Clock from synapse.util.stringutils import random_string @@ -304,7 +304,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - "receipt_key", 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] @@ -332,7 +332,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - "receipt_key", 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] ) # This method will be called, but with an empty list of events self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( @@ -634,7 +634,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.get_success( self.hs.get_application_service_handler()._notify_interested_services_ephemeral( services=[interested_appservice], - stream_key="receipt_key", + stream_key=StreamKeyType.RECEIPT, new_token=stream_token, users=[self.exclusive_as_user], ) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 95106ec8f3..3060bc9744 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -28,7 +28,7 @@ from synapse.federation.transport.server import TransportLayerServer from synapse.handlers.typing import TypingWriterHandler from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.server import HomeServer -from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.types import JsonDict, Requester, StreamKeyType, UserID, create_requester from synapse.util import Clock from tests import unittest @@ -203,7 +203,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( @@ -273,7 +275,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, 200) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( @@ -349,7 +353,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.mock_federation_client.put_json.assert_called_once_with( "farm", @@ -399,7 +405,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 1, rooms=[ROOM_ID])] + ) self.on_new_event.reset_mock() self.assertEqual(self.event_source.get_current_key(), 1) @@ -425,7 +433,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.reactor.pump([16]) - self.on_new_event.assert_has_calls([call("typing_key", 2, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 2, rooms=[ROOM_ID])] + ) self.assertEqual(self.event_source.get_current_key(), 2) events = self.get_success( @@ -459,7 +469,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) ) - self.on_new_event.assert_has_calls([call("typing_key", 3, rooms=[ROOM_ID])]) + self.on_new_event.assert_has_calls( + [call(StreamKeyType.TYPING, 3, rooms=[ROOM_ID])] + ) self.on_new_event.reset_mock() self.assertEqual(self.event_source.get_current_key(), 3) From ab9c1e8f3951dcdb9d628b7ed155de543c046c44 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 4 Oct 2023 13:53:04 -0400 Subject: [PATCH 017/142] Add type hints to synmark. (#16421) --- changelog.d/16421.misc | 1 + mypy.ini | 4 ++ synmark/__init__.py | 9 +++-- synmark/__main__.py | 48 ++++++++++++++-------- synmark/suites/logging.py | 68 ++++++++++++++++++++------------ synmark/suites/lrucache.py | 5 ++- synmark/suites/lrucache_evict.py | 5 ++- 7 files changed, 91 insertions(+), 49 deletions(-) create mode 100644 changelog.d/16421.misc diff --git a/changelog.d/16421.misc b/changelog.d/16421.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16421.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 88aea301b9..fdfe9432fc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,6 +32,7 @@ files = docker/, scripts-dev/, synapse/, + synmark/, tests/, build_rust.py @@ -80,6 +81,9 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True +[mypy-pyperf.*] +ignore_missing_imports = True + [mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True diff --git a/synmark/__init__.py b/synmark/__init__.py index 2cc00b0f03..f213319542 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -13,15 +13,18 @@ # limitations under the License. import sys +from typing import cast + +from synapse.types import ISynapseReactor try: from twisted.internet.epollreactor import EPollReactor as Reactor except ImportError: - from twisted.internet.pollreactor import PollReactor as Reactor + from twisted.internet.pollreactor import PollReactor as Reactor # type: ignore[assignment] from twisted.internet.main import installReactor -def make_reactor(): +def make_reactor() -> ISynapseReactor: """ Instantiate and install a Twisted reactor suitable for testing (i.e. not the default global one). @@ -32,4 +35,4 @@ def make_reactor(): del sys.modules["twisted.internet.reactor"] installReactor(reactor) - return reactor + return cast(ISynapseReactor, reactor) diff --git a/synmark/__main__.py b/synmark/__main__.py index 19de639187..397dd86576 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import sys -from argparse import REMAINDER +from argparse import REMAINDER, Namespace from contextlib import redirect_stderr from io import StringIO +from typing import Any, Callable, Coroutine, List, TypeVar import pyperf @@ -22,44 +23,50 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.logger import globalLogBeginner, textFileLogObserver from twisted.python.failure import Failure +from synapse.types import ISynapseReactor from synmark import make_reactor from synmark.suites import SUITES from tests.utils import setupdb +T = TypeVar("T") -def make_test(main): + +def make_test( + main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]] +) -> Callable[[int], float]: """ Take a benchmark function and wrap it in a reactor start and stop. """ - def _main(loops): + def _main(loops: int) -> float: reactor = make_reactor() file_out = StringIO() with redirect_stderr(file_out): - d = Deferred() + d: "Deferred[float]" = Deferred() d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) - def on_done(_): - if isinstance(_, Failure): - _.printTraceback() + def on_done(res: T) -> T: + if isinstance(res, Failure): + res.printTraceback() print(file_out.getvalue()) reactor.stop() - return _ + return res d.addBoth(on_done) reactor.callWhenRunning(lambda: d.callback(True)) reactor.run() - return d.result + # mypy thinks this is an object for some reason. + return d.result # type: ignore[return-value] return _main if __name__ == "__main__": - def add_cmdline_args(cmd, args): + def add_cmdline_args(cmd: List[str], args: Namespace) -> None: if args.log: cmd.extend(["--log"]) cmd.extend(args.tests) @@ -82,17 +89,26 @@ if __name__ == "__main__": setupdb() if runner.args.tests: - SUITES = list( - filter(lambda x: x[0].__name__.split(".")[-1] in runner.args.tests, SUITES) - ) + existing_suites = {s.__name__.split(".")[-1] for s, _ in SUITES} + for test in runner.args.tests: + if test not in existing_suites: + print(f"Test suite {test} does not exist.") + exit(-1) - for suite, loops in SUITES: + suites = list( + filter(lambda t: t[0].__name__.split(".")[-1] in runner.args.tests, SUITES) + ) + else: + suites = SUITES + + for suite, loops in suites: if loops: runner.args.loops = loops + loops_desc = str(loops) else: runner.args.loops = orig_loops - loops = "auto" + loops_desc = "auto" runner.bench_time_func( - suite.__name__ + "_" + str(loops), + suite.__name__ + "_" + loops_desc, make_test(suite.main), ) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 04e5b29dc9..e160443643 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -11,14 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging +import logging.config import warnings from io import StringIO +from typing import Optional from unittest.mock import Mock from pyperf import perf_counter +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred from twisted.internet.protocol import ServerFactory from twisted.logger import LogBeginner, LogPublisher @@ -26,45 +28,53 @@ from twisted.protocols.basic import LineOnlyReceiver from synapse.config.logger import _setup_stdlib_logging from synapse.logging import RemoteHandler +from synapse.synapse_rust import reset_logging_config +from synapse.types import ISynapseReactor from synapse.util import Clock class LineCounter(LineOnlyReceiver): delimiter = b"\n" + count = 0 - def __init__(self, *args, **kwargs): - self.count = 0 - super().__init__(*args, **kwargs) - - def lineReceived(self, line): + def lineReceived(self, line: bytes) -> None: self.count += 1 + assert isinstance(self.factory, Factory) + if self.count >= self.factory.wait_for and self.factory.on_done: on_done = self.factory.on_done self.factory.on_done = None on_done.callback(True) -async def main(reactor, loops): +class Factory(ServerFactory): + protocol = LineCounter + wait_for: int + on_done: Optional[Deferred] + + +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark how long it takes to send `loops` messages. """ - servers = [] - def protocol(): - p = LineCounter() - servers.append(p) - return p - - logger_factory = ServerFactory.forProtocol(protocol) + logger_factory = Factory() logger_factory.wait_for = loops logger_factory.on_done = Deferred() - port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") + port = reactor.listenTCP(0, logger_factory, backlog=50, interface="127.0.0.1") # A fake homeserver config. class Config: - server_name = "synmark-" + str(loops) - no_redirect_stdio = True + class server: + server_name = "synmark-" + str(loops) + + # This odd construct is to avoid mypy thinking that logging escapes the + # scope of Config. + class _logging: + no_redirect_stdio = True + + logging = _logging hs_config = Config() @@ -78,28 +88,34 @@ async def main(reactor, loops): publisher, errors, mock_sys, warnings, initialBufferSize=loops ) + address = port.getHost() + assert isinstance(address, (IPv4Address, IPv6Address)) log_config = { "version": 1, - "loggers": {"synapse": {"level": "DEBUG", "handlers": ["tersejson"]}}, + "loggers": {"synapse": {"level": "DEBUG", "handlers": ["remote"]}}, "formatters": {"tersejson": {"class": "synapse.logging.TerseJsonFormatter"}}, "handlers": { - "tersejson": { + "remote": { "class": "synapse.logging.RemoteHandler", - "host": "127.0.0.1", - "port": port.getHost().port, + "formatter": "tersejson", + "host": address.host, + "port": address.port, "maximum_buffer": 100, - "_reactor": reactor, } }, } - logger = logging.getLogger("synapse.logging.test_terse_json") + logger = logging.getLogger("synapse") _setup_stdlib_logging( - hs_config, - log_config, + hs_config, # type: ignore[arg-type] + None, logBeginner=beginner, ) + # Force a new logging config without having to load it from a file. + logging.config.dictConfig(log_config) + reset_logging_config() + # Wait for it to connect... for handler in logging.getLogger("synapse").handlers: if isinstance(handler, RemoteHandler): @@ -107,7 +123,7 @@ async def main(reactor, loops): else: raise RuntimeError("Improperly configured: no RemoteHandler found.") - await handler._service.whenConnected() + await handler._service.whenConnected(failAfterFailures=10) start = perf_counter() diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py index 9b4a424149..cfa0163c62 100644 --- a/synmark/suites/lrucache.py +++ b/synmark/suites/lrucache.py @@ -14,14 +14,15 @@ from pyperf import perf_counter +from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache -async def main(reactor, loops): +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark `loops` number of insertions into LruCache without eviction. """ - cache = LruCache(loops) + cache: LruCache[int, bool] = LruCache(loops) start = perf_counter() diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py index 0ee202ed36..02238c2627 100644 --- a/synmark/suites/lrucache_evict.py +++ b/synmark/suites/lrucache_evict.py @@ -14,15 +14,16 @@ from pyperf import perf_counter +from synapse.types import ISynapseReactor from synapse.util.caches.lrucache import LruCache -async def main(reactor, loops): +async def main(reactor: ISynapseReactor, loops: int) -> float: """ Benchmark `loops` number of insertions into LruCache where half of them are evicted. """ - cache = LruCache(loops // 2) + cache: LruCache[int, bool] = LruCache(loops // 2) start = perf_counter() From 009b47badfed7593cff5f8acbd61e8fddb3ca788 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 5 Oct 2023 12:46:28 +0300 Subject: [PATCH 018/142] Factor out `MultiWriter` token from `RoomStreamToken` (#16427) --- changelog.d/16427.misc | 1 + synapse/handlers/admin.py | 4 +- synapse/handlers/initial_sync.py | 3 +- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/rest/admin/__init__.py | 2 +- synapse/storage/databases/main/stream.py | 22 ++-- synapse/types/__init__.py | 132 ++++++++++++++++------- tests/handlers/test_appservice.py | 8 +- 9 files changed, 115 insertions(+), 61 deletions(-) create mode 100644 changelog.d/16427.misc diff --git a/changelog.d/16427.misc b/changelog.d/16427.misc new file mode 100644 index 0000000000..44f0e0595e --- /dev/null +++ b/changelog.d/16427.misc @@ -0,0 +1 @@ +Factor out `MultiWriter` token from `RoomStreamToken`. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index ba9704a065..97fd1fd427 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -171,8 +171,8 @@ class AdminHandler: else: stream_ordering = room.stream_ordering - from_key = RoomStreamToken(0, 0) - to_key = RoomStreamToken(None, stream_ordering) + from_key = RoomStreamToken(topological=0, stream=0) + to_key = RoomStreamToken(stream=stream_ordering) # Events that we've processed in this room written_events: Set[str] = set() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 5737f8014d..c34bd7db95 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -192,8 +192,7 @@ class InitialSyncHandler: ) elif event.membership == Membership.LEAVE: room_end_token = RoomStreamToken( - None, - event.stream_ordering, + stream=event.stream_ordering, ) deferred_room_state = run_in_background( self._state_storage_controller.get_state_for_events, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a0c3b16819..4cdf0a8502 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1708,7 +1708,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): if from_key.topological: logger.warning("Stream has topological part!!!! %r", from_key) - from_key = RoomStreamToken(None, from_key.stream) + from_key = RoomStreamToken(stream=from_key.stream) app_service = self.store.get_app_service_by_user_id(user.to_string()) if app_service: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7bd42f635f..744e080309 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -2333,7 +2333,7 @@ class SyncHandler: continue leave_token = now_token.copy_and_replace( - StreamKeyType.ROOM, RoomStreamToken(None, event.stream_ordering) + StreamKeyType.ROOM, RoomStreamToken(stream=event.stream_ordering) ) room_entries.append( RoomSyncResultBuilder( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index e42dade246..9bd0d764f8 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -146,7 +146,7 @@ class PurgeHistoryRestServlet(RestServlet): # RoomStreamToken expects [int] not Optional[int] assert event.internal_metadata.stream_ordering is not None room_token = RoomStreamToken( - event.depth, event.internal_metadata.stream_ordering + topological=event.depth, stream=event.internal_metadata.stream_ordering ) token = await room_token.to_string(self.store) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 5a3611c415..ea06e4eee0 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -266,7 +266,7 @@ def generate_next_token( # when we are going backwards so we subtract one from the # stream part. last_stream_ordering -= 1 - return RoomStreamToken(last_topo_ordering, last_stream_ordering) + return RoomStreamToken(topological=last_topo_ordering, stream=last_stream_ordering) def _make_generic_sql_bound( @@ -558,7 +558,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if p > min_pos } - return RoomStreamToken(None, min_pos, immutabledict(positions)) + return RoomStreamToken(stream=min_pos, instance_map=immutabledict(positions)) async def get_room_events_stream_for_rooms( self, @@ -708,7 +708,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ret.reverse() if rows: - key = RoomStreamToken(None, min(r.stream_ordering for r in rows)) + key = RoomStreamToken(stream=min(r.stream_ordering for r in rows)) else: # Assume we didn't get anything because there was nothing to # get. @@ -969,7 +969,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topo = await self.db_pool.runInteraction( "_get_max_topological_txn", self._get_max_topological_txn, room_id ) - return RoomStreamToken(topo, stream_ordering) + return RoomStreamToken(topological=topo, stream=stream_ordering) @overload def get_stream_id_for_event_txn( @@ -1033,7 +1033,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) - return RoomStreamToken(row["topological_ordering"], row["stream_ordering"]) + return RoomStreamToken( + topological=row["topological_ordering"], stream=row["stream_ordering"] + ) async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: """Gets the topological token in a room after or at the given stream @@ -1114,8 +1116,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): else: topo = None internal = event.internal_metadata - internal.before = RoomStreamToken(topo, stream - 1) - internal.after = RoomStreamToken(topo, stream) + internal.before = RoomStreamToken(topological=topo, stream=stream - 1) + internal.after = RoomStreamToken(topological=topo, stream=stream) internal.order = (int(topo) if topo else 0, int(stream)) async def get_events_around( @@ -1191,11 +1193,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # Paginating backwards includes the event at the token, but paginating # forward doesn't. before_token = RoomStreamToken( - results["topological_ordering"] - 1, results["stream_ordering"] + topological=results["topological_ordering"] - 1, + stream=results["stream_ordering"], ) after_token = RoomStreamToken( - results["topological_ordering"], results["stream_ordering"] + topological=results["topological_ordering"], + stream=results["stream_ordering"], ) rows, start_token = self._paginate_room_events_txn( diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 406d5b1611..09a88c86a7 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -61,6 +61,8 @@ from synapse.util.cancellation import cancellable from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: + from typing_extensions import Self + from synapse.appservice.api import ApplicationService from synapse.storage.databases.main import DataStore, PurgeEventsStore from synapse.storage.databases.main.appservice import ApplicationServiceWorkerStore @@ -437,7 +439,78 @@ def map_username_to_mxid_localpart( @attr.s(frozen=True, slots=True, order=False) -class RoomStreamToken: +class AbstractMultiWriterStreamToken(metaclass=abc.ABCMeta): + """An abstract stream token class for streams that supports multiple + writers. + + This works by keeping track of the stream position of each writer, + represented by a default `stream` attribute and a map of instance name to + stream position of any writers that are ahead of the default stream + position. + """ + + stream: int = attr.ib(validator=attr.validators.instance_of(int), kw_only=True) + + instance_map: "immutabledict[str, int]" = attr.ib( + factory=immutabledict, + validator=attr.validators.deep_mapping( + key_validator=attr.validators.instance_of(str), + value_validator=attr.validators.instance_of(int), + mapping_validator=attr.validators.instance_of(immutabledict), + ), + kw_only=True, + ) + + @classmethod + @abc.abstractmethod + async def parse(cls, store: "DataStore", string: str) -> "Self": + """Parse the string representation of the token.""" + ... + + @abc.abstractmethod + async def to_string(self, store: "DataStore") -> str: + """Serialize the token into its string representation.""" + ... + + def copy_and_advance(self, other: "Self") -> "Self": + """Return a new token such that if an event is after both this token and + the other token, then its after the returned token too. + """ + + max_stream = max(self.stream, other.stream) + + instance_map = { + instance: max( + self.instance_map.get(instance, self.stream), + other.instance_map.get(instance, other.stream), + ) + for instance in set(self.instance_map).union(other.instance_map) + } + + return attr.evolve( + self, stream=max_stream, instance_map=immutabledict(instance_map) + ) + + def get_max_stream_pos(self) -> int: + """Get the maximum stream position referenced in this token. + + The corresponding "min" position is, by definition just `self.stream`. + + This is used to handle tokens that have non-empty `instance_map`, and so + reference stream positions after the `self.stream` position. + """ + return max(self.instance_map.values(), default=self.stream) + + def get_stream_pos_for_instance(self, instance_name: str) -> int: + """Get the stream position that the given writer was at at this token.""" + + # If we don't have an entry for the instance we can assume that it was + # at `self.stream`. + return self.instance_map.get(instance_name, self.stream) + + +@attr.s(frozen=True, slots=True, order=False) +class RoomStreamToken(AbstractMultiWriterStreamToken): """Tokens are positions between events. The token "s1" comes after event 1. s0 s1 @@ -514,16 +587,8 @@ class RoomStreamToken: topological: Optional[int] = attr.ib( validator=attr.validators.optional(attr.validators.instance_of(int)), - ) - stream: int = attr.ib(validator=attr.validators.instance_of(int)) - - instance_map: "immutabledict[str, int]" = attr.ib( - factory=immutabledict, - validator=attr.validators.deep_mapping( - key_validator=attr.validators.instance_of(str), - value_validator=attr.validators.instance_of(int), - mapping_validator=attr.validators.instance_of(immutabledict), - ), + kw_only=True, + default=None, ) def __attrs_post_init__(self) -> None: @@ -583,17 +648,7 @@ class RoomStreamToken: if self.topological or other.topological: raise Exception("Can't advance topological tokens") - max_stream = max(self.stream, other.stream) - - instance_map = { - instance: max( - self.instance_map.get(instance, self.stream), - other.instance_map.get(instance, other.stream), - ) - for instance in set(self.instance_map).union(other.instance_map) - } - - return RoomStreamToken(None, max_stream, immutabledict(instance_map)) + return super().copy_and_advance(other) def as_historical_tuple(self) -> Tuple[int, int]: """Returns a tuple of `(topological, stream)` for historical tokens. @@ -619,16 +674,6 @@ class RoomStreamToken: # at `self.stream`. return self.instance_map.get(instance_name, self.stream) - def get_max_stream_pos(self) -> int: - """Get the maximum stream position referenced in this token. - - The corresponding "min" position is, by definition just `self.stream`. - - This is used to handle tokens that have non-empty `instance_map`, and so - reference stream positions after the `self.stream` position. - """ - return max(self.instance_map.values(), default=self.stream) - async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) @@ -838,23 +883,28 @@ class StreamToken: return getattr(self, key.value) -StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0, 0) +StreamToken.START = StreamToken(RoomStreamToken(stream=0), 0, 0, 0, 0, 0, 0, 0, 0, 0) @attr.s(slots=True, frozen=True, auto_attribs=True) -class PersistedEventPosition: +class PersistedPosition: + """Position of a newly persisted row with instance that persisted it.""" + + instance_name: str + stream: int + + def persisted_after(self, token: AbstractMultiWriterStreamToken) -> bool: + return token.get_stream_pos_for_instance(self.instance_name) < self.stream + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class PersistedEventPosition(PersistedPosition): """Position of a newly persisted event with instance that persisted it. This can be used to test whether the event is persisted before or after a RoomStreamToken. """ - instance_name: str - stream: int - - def persisted_after(self, token: RoomStreamToken) -> bool: - return token.get_stream_pos_for_instance(self.instance_name) < self.stream - def to_room_stream_token(self) -> RoomStreamToken: """Converts the position to a room stream token such that events persisted in the same room after this position will be after the @@ -865,7 +915,7 @@ class PersistedEventPosition: """ # Doing the naive thing satisfies the desired properties described in # the docstring. - return RoomStreamToken(None, self.stream) + return RoomStreamToken(stream=self.stream) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 8ce6ccf529..867dbd6001 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -86,7 +86,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): [event], ] ) - self.handler.notify_interested_services(RoomStreamToken(None, 1)) + self.handler.notify_interested_services(RoomStreamToken(stream=1)) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, events=[event] @@ -107,7 +107,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ] ) self.mock_store.get_events_as_list = AsyncMock(side_effect=[[event]]) - self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.handler.notify_interested_services(RoomStreamToken(stream=0)) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id) @@ -126,7 +126,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): ] ) - self.handler.notify_interested_services(RoomStreamToken(None, 0)) + self.handler.notify_interested_services(RoomStreamToken(stream=0)) self.assertFalse( self.mock_as_api.query_user.called, @@ -441,7 +441,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.get_success( self.hs.get_application_service_handler()._notify_interested_services( RoomStreamToken( - None, self.hs.get_application_service_handler().current_max + stream=self.hs.get_application_service_handler().current_max ) ) ) From 4e302b30b6f29bd6f1edf7e7dfb835a959fc66e4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 07:38:55 -0400 Subject: [PATCH 019/142] Add __slots__ to replication commands. (#16429) To slightly reduce the amount of memory each command takes. --- changelog.d/16429.misc | 1 + synapse/replication/tcp/commands.py | 27 ++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16429.misc diff --git a/changelog.d/16429.misc b/changelog.d/16429.misc new file mode 100644 index 0000000000..4241e143be --- /dev/null +++ b/changelog.d/16429.misc @@ -0,0 +1 @@ +Reduce the size of each replication command instance. diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index e616b5e1c8..1b92302fd3 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -18,7 +18,7 @@ allowed to be sent by which side. """ import abc import logging -from typing import Optional, Tuple, Type, TypeVar +from typing import List, Optional, Tuple, Type, TypeVar from synapse.replication.tcp.streams._base import StreamRow from synapse.util import json_decoder, json_encoder @@ -74,6 +74,8 @@ SC = TypeVar("SC", bound="_SimpleCommand") class _SimpleCommand(Command): """An implementation of Command whose argument is just a 'data' string.""" + __slots__ = ["data"] + def __init__(self, data: str): self.data = data @@ -122,6 +124,8 @@ class RdataCommand(Command): RDATA presence master 59 ["@baz:example.com", "online", ...] """ + __slots__ = ["stream_name", "instance_name", "token", "row"] + NAME = "RDATA" def __init__( @@ -179,6 +183,8 @@ class PositionCommand(Command): of the stream. """ + __slots__ = ["stream_name", "instance_name", "prev_token", "new_token"] + NAME = "POSITION" def __init__( @@ -235,6 +241,8 @@ class ReplicateCommand(Command): REPLICATE """ + __slots__: List[str] = [] + NAME = "REPLICATE" def __init__(self) -> None: @@ -264,6 +272,8 @@ class UserSyncCommand(Command): Where is either "start" or "end" """ + __slots__ = ["instance_id", "user_id", "device_id", "is_syncing", "last_sync_ms"] + NAME = "USER_SYNC" def __init__( @@ -316,6 +326,8 @@ class ClearUserSyncsCommand(Command): CLEAR_USER_SYNC """ + __slots__ = ["instance_id"] + NAME = "CLEAR_USER_SYNC" def __init__(self, instance_id: str): @@ -343,6 +355,8 @@ class FederationAckCommand(Command): FEDERATION_ACK """ + __slots__ = ["instance_name", "token"] + NAME = "FEDERATION_ACK" def __init__(self, instance_name: str, token: int): @@ -368,6 +382,15 @@ class UserIpCommand(Command): USER_IP , , , , , """ + __slots__ = [ + "user_id", + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ] + NAME = "USER_IP" def __init__( @@ -441,6 +464,8 @@ class LockReleasedCommand(Command): LOCK_RELEASED ["", "", ""] """ + __slots__ = ["instance_name", "lock_name", "lock_key"] + NAME = "LOCK_RELEASED" def __init__( From fa907025f4b263d27c2b338fb0fe86d257d74fa8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 11:07:38 -0400 Subject: [PATCH 020/142] Remove manys calls to cursor_to_dict (#16431) This avoids calling cursor_to_dict and then immediately unpacking the values in the dict for other users. By not creating the intermediate dictionary we can avoid allocating the dictionary and strings for the keys, which should generally be more performant. Additionally this improves type hints by avoid Dict[str, Any] dictionaries coming out of the database layer. --- changelog.d/16429.misc | 2 +- changelog.d/16431.misc | 1 + synapse/push/__init__.py | 2 +- .../storage/databases/main/account_data.py | 11 +- synapse/storage/databases/main/appservice.py | 29 +--- synapse/storage/databases/main/devices.py | 12 +- .../storage/databases/main/end_to_end_keys.py | 22 +-- synapse/storage/databases/main/events.py | 9 +- synapse/storage/databases/main/presence.py | 18 ++- synapse/storage/databases/main/pusher.py | 121 ++++++++++++---- synapse/storage/databases/main/receipts.py | 72 +++++----- .../storage/databases/main/registration.py | 131 ++++++++++-------- synapse/storage/databases/main/room.py | 42 +++--- synapse/storage/databases/main/roommember.py | 10 +- synapse/storage/databases/main/search.py | 20 +-- .../storage/databases/main/task_scheduler.py | 44 ++++-- 16 files changed, 319 insertions(+), 227 deletions(-) create mode 100644 changelog.d/16431.misc diff --git a/changelog.d/16429.misc b/changelog.d/16429.misc index 4241e143be..bd7cdd42af 100644 --- a/changelog.d/16429.misc +++ b/changelog.d/16429.misc @@ -1 +1 @@ -Reduce the size of each replication command instance. +Reduce memory allocations. diff --git a/changelog.d/16431.misc b/changelog.d/16431.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16431.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9e5eb2a445..4d405f2a0c 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -101,7 +101,7 @@ if TYPE_CHECKING: class PusherConfig: """Parameters necessary to configure a pusher.""" - id: Optional[str] + id: Optional[int] user_name: str profile_tag: str diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 80f146dd53..16c284807a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -151,10 +151,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) sql += " AND content != '{}'" txn.execute(sql, (user_id,)) - rows = self.db_pool.cursor_to_dict(txn) return { - row["account_data_type"]: db_to_json(row["content"]) for row in rows + account_data_type: db_to_json(content) + for account_data_type, content in txn } return await self.db_pool.runInteraction( @@ -196,13 +196,12 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) sql += " AND content != '{}'" txn.execute(sql, (user_id,)) - rows = self.db_pool.cursor_to_dict(txn) by_room: Dict[str, Dict[str, JsonDict]] = {} - for row in rows: - room_data = by_room.setdefault(row["room_id"], {}) + for room_id, account_data_type, content in txn: + room_data = by_room.setdefault(room_id, {}) - room_data[row["account_data_type"]] = db_to_json(row["content"]) + room_data[account_data_type] = db_to_json(content) return by_room diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 0553a0621a..073a99cd84 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -14,17 +14,7 @@ # limitations under the License. import logging import re -from typing import ( - TYPE_CHECKING, - Any, - Dict, - List, - Optional, - Pattern, - Sequence, - Tuple, - cast, -) +from typing import TYPE_CHECKING, List, Optional, Pattern, Sequence, Tuple, cast from synapse.appservice import ( ApplicationService, @@ -353,21 +343,15 @@ class ApplicationServiceTransactionWorkerStore( def _get_oldest_unsent_txn( txn: LoggingTransaction, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Tuple[int, str]]: # Monotonically increasing txn ids, so just select the smallest # one in the txns table (we delete them when they are sent) txn.execute( - "SELECT * FROM application_services_txns WHERE as_id=?" + "SELECT txn_id, event_ids FROM application_services_txns WHERE as_id=?" " ORDER BY txn_id ASC LIMIT 1", (service.id,), ) - rows = self.db_pool.cursor_to_dict(txn) - if not rows: - return None - - entry = rows[0] - - return entry + return cast(Optional[Tuple[int, str]], txn.fetchone()) entry = await self.db_pool.runInteraction( "get_oldest_unsent_appservice_txn", _get_oldest_unsent_txn @@ -376,8 +360,9 @@ class ApplicationServiceTransactionWorkerStore( if not entry: return None - event_ids = db_to_json(entry["event_ids"]) + txn_id, event_ids_str = entry + event_ids = db_to_json(event_ids_str) events = await self.get_events_as_list(event_ids) # TODO: to-device messages, one-time key counts, device list summaries and unused @@ -385,7 +370,7 @@ class ApplicationServiceTransactionWorkerStore( # We likely want to populate those for reliability. return AppServiceTransaction( service=service, - id=entry["txn_id"], + id=txn_id, events=events, ephemeral=[], to_device_messages=[], diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index df596f35f9..9f3804a504 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1413,13 +1413,13 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): def get_devices_not_accessed_since_txn( txn: LoggingTransaction, - ) -> List[Dict[str, str]]: + ) -> List[Tuple[str, str]]: sql = """ SELECT user_id, device_id FROM devices WHERE last_seen < ? AND hidden = FALSE """ txn.execute(sql, (since_ms,)) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, str]], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_devices_not_accessed_since", @@ -1427,11 +1427,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ) devices: Dict[str, List[str]] = {} - for row in rows: + for user_id, device_id in rows: # Remote devices are never stale from our point of view. - if self.hs.is_mine_id(row["user_id"]): - user_devices = devices.setdefault(row["user_id"], []) - user_devices.append(row["device_id"]) + if self.hs.is_mine_id(user_id): + user_devices = devices.setdefault(user_id, []) + user_devices.append(device_id) return devices diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 89fac23f93..749ae54e20 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -921,14 +921,10 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker } txn.execute(sql, params) - rows = self.db_pool.cursor_to_dict(txn) - for row in rows: - user_id = row["user_id"] - key_type = row["keytype"] - key = db_to_json(row["keydata"]) + for user_id, key_type, key_data, _ in txn: user_keys = result.setdefault(user_id, {}) - user_keys[key_type] = key + user_keys[key_type] = db_to_json(key_data) return result @@ -988,13 +984,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker query_params.extend(item) txn.execute(sql, query_params) - rows = self.db_pool.cursor_to_dict(txn) # and add the signatures to the appropriate keys - for row in rows: - key_id: str = row["key_id"] - target_user_id: str = row["target_user_id"] - target_device_id: str = row["target_device_id"] + for target_user_id, target_device_id, key_id, signature in txn: key_type = devices[(target_user_id, target_device_id)] # We need to copy everything, because the result may have come # from the cache. dict.copy only does a shallow copy, so we @@ -1012,13 +1004,11 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ].copy() if from_user_id in signatures: user_sigs = signatures[from_user_id] = signatures[from_user_id] - user_sigs[key_id] = row["signature"] + user_sigs[key_id] = signature else: - signatures[from_user_id] = {key_id: row["signature"]} + signatures[from_user_id] = {key_id: signature} else: - target_user_key["signatures"] = { - from_user_id: {key_id: row["signature"]} - } + target_user_key["signatures"] = {from_user_id: {key_id: signature}} return keys diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 790d058c43..d4dcdb898c 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1654,8 +1654,6 @@ class PersistEventsStore: ) -> None: to_prefill = [] - rows = [] - ev_map = {e.event_id: e for e, _ in events_and_contexts} if not ev_map: return @@ -1676,10 +1674,9 @@ class PersistEventsStore: ) txn.execute(sql + clause, args) - rows = self.db_pool.cursor_to_dict(txn) - for row in rows: - event = ev_map[row["event_id"]] - if not row["rejects"] and not row["redacts"]: + for event_id, redacts, rejects in txn: + event = ev_map[event_id] + if not rejects and not redacts: to_prefill.append(EventCacheEntry(event=event, redacted_event=None)) async def external_prefill() -> None: diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 194b4e031f..805c23f89f 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -434,13 +434,21 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() txn.close() - for row in rows: - row["currently_active"] = bool(row["currently_active"]) - - return [UserPresenceState(**row) for row in rows] + return [ + UserPresenceState( + user_id=user_id, + state=state, + last_active_ts=last_active_ts, + last_federation_update_ts=last_federation_update_ts, + last_user_sync_ts=last_user_sync_ts, + status_msg=status_msg, + currently_active=bool(currently_active), + ) + for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows + ] def take_presence_startup_info(self) -> List[UserPresenceState]: active_on_startup = self._presence_on_startup diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 87e28e22d3..c7eb7fc478 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -47,6 +47,27 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +# The type of a row in the pushers table. +PusherRow = Tuple[ + int, # id + str, # user_name + Optional[int], # access_token + str, # profile_tag + str, # kind + str, # app_id + str, # app_display_name + str, # device_display_name + str, # pushkey + int, # ts + str, # lang + str, # data + int, # last_stream_ordering + int, # last_success + int, # failing_since + bool, # enabled + str, # device_id +] + class PusherWorkerStore(SQLBaseStore): def __init__( @@ -83,30 +104,66 @@ class PusherWorkerStore(SQLBaseStore): self._remove_deleted_email_pushers, ) - def _decode_pushers_rows(self, rows: Iterable[dict]) -> Iterator[PusherConfig]: + def _decode_pushers_rows( + self, + rows: Iterable[PusherRow], + ) -> Iterator[PusherConfig]: """JSON-decode the data in the rows returned from the `pushers` table Drops any rows whose data cannot be decoded """ - for r in rows: - data_json = r["data"] + for ( + id, + user_name, + access_token, + profile_tag, + kind, + app_id, + app_display_name, + device_display_name, + pushkey, + ts, + lang, + data, + last_stream_ordering, + last_success, + failing_since, + enabled, + device_id, + ) in rows: try: - r["data"] = db_to_json(data_json) + data_json = db_to_json(data) except Exception as e: logger.warning( "Invalid JSON in data for pusher %d: %s, %s", - r["id"], - data_json, + id, + data, e.args[0], ) continue - # If we're using SQLite, then boolean values are integers. This is - # troublesome since some code using the return value of this method might - # expect it to be a boolean, or will expose it to clients (in responses). - r["enabled"] = bool(r["enabled"]) - - yield PusherConfig(**r) + yield PusherConfig( + id=id, + user_name=user_name, + profile_tag=profile_tag, + kind=kind, + app_id=app_id, + app_display_name=app_display_name, + device_display_name=device_display_name, + pushkey=pushkey, + ts=ts, + lang=lang, + data=data_json, + last_stream_ordering=last_stream_ordering, + last_success=last_success, + failing_since=failing_since, + # If we're using SQLite, then boolean values are integers. This is + # troublesome since some code using the return value of this method might + # expect it to be a boolean, or will expose it to clients (in responses). + enabled=bool(enabled), + device_id=device_id, + access_token=access_token, + ) def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() @@ -136,7 +193,7 @@ class PusherWorkerStore(SQLBaseStore): The pushers for which the given columns have the given values. """ - def get_pushers_by_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def get_pushers_by_txn(txn: LoggingTransaction) -> List[PusherRow]: # We could technically use simple_select_list here, but we need to call # COALESCE on the 'enabled' column. While it is technically possible to give # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it @@ -154,7 +211,7 @@ class PusherWorkerStore(SQLBaseStore): txn.execute(sql, list(keyvalues.values())) - return self.db_pool.cursor_to_dict(txn) + return cast(List[PusherRow], txn.fetchall()) ret = await self.db_pool.runInteraction( desc="get_pushers_by", @@ -164,14 +221,22 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(ret) async def get_enabled_pushers(self) -> Iterator[PusherConfig]: - def get_enabled_pushers_txn(txn: LoggingTransaction) -> Iterator[PusherConfig]: - txn.execute("SELECT * FROM pushers WHERE COALESCE(enabled, TRUE)") - rows = self.db_pool.cursor_to_dict(txn) + def get_enabled_pushers_txn(txn: LoggingTransaction) -> List[PusherRow]: + txn.execute( + """ + SELECT id, user_name, access_token, profile_tag, kind, app_id, + app_display_name, device_display_name, pushkey, ts, lang, data, + last_stream_ordering, last_success, failing_since, + enabled, device_id + FROM pushers WHERE COALESCE(enabled, TRUE) + """ + ) + return cast(List[PusherRow], txn.fetchall()) - return self._decode_pushers_rows(rows) - - return await self.db_pool.runInteraction( - "get_enabled_pushers", get_enabled_pushers_txn + return self._decode_pushers_rows( + await self.db_pool.runInteraction( + "get_enabled_pushers", get_enabled_pushers_txn + ) ) async def get_all_updated_pushers_rows( @@ -304,7 +369,7 @@ class PusherWorkerStore(SQLBaseStore): ) async def get_throttle_params_by_room( - self, pusher_id: str + self, pusher_id: int ) -> Dict[str, ThrottleParams]: res = await self.db_pool.simple_select_list( "pusher_throttle", @@ -323,7 +388,7 @@ class PusherWorkerStore(SQLBaseStore): return params_by_room async def set_throttle_params( - self, pusher_id: str, room_id: str, params: ThrottleParams + self, pusher_id: int, room_id: str, params: ThrottleParams ) -> None: await self.db_pool.simple_upsert( "pusher_throttle", @@ -534,7 +599,7 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): (last_pusher_id, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if len(rows) == 0: return 0 @@ -550,19 +615,19 @@ class PusherBackgroundUpdatesStore(SQLBaseStore): txn=txn, table="pushers", key_names=("id",), - key_values=[(row["pusher_id"],) for row in rows], + key_values=[row[0] for row in rows], value_names=("device_id", "access_token"), # If there was already a device_id on the pusher, we only want to clear # the access_token column, so we keep the existing device_id. Otherwise, # we set the device_id we got from joining the access_tokens table. value_values=[ - (row["pusher_device_id"] or row["token_device_id"], None) - for row in rows + (pusher_device_id or token_device_id, None) + for _, pusher_device_id, token_device_id in rows ], ) self.db_pool.updates._background_update_progress_txn( - txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["pusher_id"]} + txn, "set_device_id_for_pushers", {"pusher_id": rows[-1][0]} ) return len(rows) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3bab1024ea..b2645ab43c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -313,25 +313,25 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: if from_key: sql = ( - "SELECT * FROM receipts_linearized WHERE" + "SELECT receipt_type, user_id, event_id, data" + " FROM receipts_linearized WHERE" " room_id = ? AND stream_id > ? AND stream_id <= ?" ) txn.execute(sql, (room_id, from_key, to_key)) else: sql = ( - "SELECT * FROM receipts_linearized WHERE" + "SELECT receipt_type, user_id, event_id, data" + " FROM receipts_linearized WHERE" " room_id = ? AND stream_id <= ?" ) txn.execute(sql, (room_id, to_key)) - rows = self.db_pool.cursor_to_dict(txn) - - return rows + return cast(List[Tuple[str, str, str, str]], txn.fetchall()) rows = await self.db_pool.runInteraction("get_linearized_receipts_for_room", f) @@ -339,10 +339,10 @@ class ReceiptsWorkerStore(SQLBaseStore): return [] content: JsonDict = {} - for row in rows: - content.setdefault(row["event_id"], {}).setdefault(row["receipt_type"], {})[ - row["user_id"] - ] = db_to_json(row["data"]) + for receipt_type, user_id, event_id, data in rows: + content.setdefault(event_id, {}).setdefault(receipt_type, {})[ + user_id + ] = db_to_json(data) return [{"type": EduTypes.RECEIPT, "room_id": room_id, "content": content}] @@ -357,10 +357,13 @@ class ReceiptsWorkerStore(SQLBaseStore): if not room_ids: return {} - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: if from_key: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ clause, args = make_in_list_sql_clause( @@ -370,7 +373,8 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [from_key, to_key] + list(args)) else: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized WHERE stream_id <= ? AND """ @@ -380,29 +384,31 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [to_key] + list(args)) - return self.db_pool.cursor_to_dict(txn) + return cast( + List[Tuple[str, str, str, str, Optional[str], str]], txn.fetchall() + ) txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f ) results: JsonDict = {} - for row in txn_results: + for room_id, receipt_type, user_id, event_id, thread_id, data in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( - row["room_id"], - {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}}, + room_id, + {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } - event_entry = room_event["content"].setdefault(row["event_id"], {}) - receipt_type = event_entry.setdefault(row["receipt_type"], {}) + event_entry = room_event["content"].setdefault(event_id, {}) + receipt_type_dict = event_entry.setdefault(receipt_type, {}) - receipt_type[row["user_id"]] = db_to_json(row["data"]) - if row["thread_id"]: - receipt_type[row["user_id"]]["thread_id"] = row["thread_id"] + receipt_type_dict[user_id] = db_to_json(data) + if thread_id: + receipt_type_dict[user_id]["thread_id"] = thread_id results = { room_id: [results[room_id]] if room_id in results else [] @@ -428,10 +434,11 @@ class ReceiptsWorkerStore(SQLBaseStore): A dictionary of roomids to a list of receipts. """ - def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str, str]]: if from_key: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? ORDER BY stream_id DESC LIMIT 100 @@ -439,7 +446,8 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, [from_key, to_key]) else: sql = """ - SELECT * FROM receipts_linearized WHERE + SELECT room_id, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE stream_id <= ? ORDER BY stream_id DESC LIMIT 100 @@ -447,27 +455,27 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql, [to_key]) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, str, str, str, str]], txn.fetchall()) txn_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_all_rooms", f ) results: JsonDict = {} - for row in txn_results: + for room_id, receipt_type, user_id, event_id, data in txn_results: # We want a single event per room, since we want to batch the # receipts by room, event and type. room_event = results.setdefault( - row["room_id"], - {"type": EduTypes.RECEIPT, "room_id": row["room_id"], "content": {}}, + room_id, + {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, ) # The content is of the form: # {"$foo:bar": { "read": { "@user:host": }, .. }, .. } - event_entry = room_event["content"].setdefault(row["event_id"], {}) - receipt_type = event_entry.setdefault(row["receipt_type"], {}) + event_entry = room_event["content"].setdefault(event_id, {}) + receipt_type_dict = event_entry.setdefault(receipt_type, {}) - receipt_type[row["user_id"]] = db_to_json(row["data"]) + receipt_type_dict[user_id] = db_to_json(data) return results diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index cc964604e2..64a2c31a5d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -195,7 +195,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: """Returns info about the user account, if it exists.""" - def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: + def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[UserInfo]: # We could technically use simple_select_one here, but it would not perform # the COALESCEs (unless hacked into the column names), which could yield # confusing results. @@ -213,35 +213,46 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): (user_id,), ) - rows = self.db_pool.cursor_to_dict(txn) - - if len(rows) == 0: + row = txn.fetchone() + if not row: return None - return rows[0] + ( + name, + is_guest, + admin, + consent_version, + consent_ts, + consent_server_notice_sent, + appservice_id, + creation_ts, + user_type, + deactivated, + shadow_banned, + approved, + locked, + ) = row - row = await self.db_pool.runInteraction( + return UserInfo( + appservice_id=appservice_id, + consent_server_notice_sent=consent_server_notice_sent, + consent_version=consent_version, + consent_ts=consent_ts, + creation_ts=creation_ts, + is_admin=bool(admin), + is_deactivated=bool(deactivated), + is_guest=bool(is_guest), + is_shadow_banned=bool(shadow_banned), + user_id=UserID.from_string(name), + user_type=user_type, + approved=bool(approved), + locked=bool(locked), + ) + + return await self.db_pool.runInteraction( desc="get_user_by_id", func=get_user_by_id_txn, ) - if row is None: - return None - - return UserInfo( - appservice_id=row["appservice_id"], - consent_server_notice_sent=row["consent_server_notice_sent"], - consent_version=row["consent_version"], - consent_ts=row["consent_ts"], - creation_ts=row["creation_ts"], - is_admin=bool(row["admin"]), - is_deactivated=bool(row["deactivated"]), - is_guest=bool(row["is_guest"]), - is_shadow_banned=bool(row["shadow_banned"]), - user_id=UserID.from_string(row["name"]), - user_type=row["user_type"], - approved=bool(row["approved"]), - locked=bool(row["locked"]), - ) async def is_trial_user(self, user_id: str) -> bool: """Checks if user is in the "trial" period, i.e. within the first @@ -579,16 +590,31 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """ txn.execute(sql, (token,)) - rows = self.db_pool.cursor_to_dict(txn) + row = txn.fetchone() - if rows: - row = rows[0] + if row: + ( + user_id, + is_guest, + shadow_banned, + token_id, + device_id, + valid_until_ms, + token_owner, + token_used, + ) = row - # This field is nullable, ensure it comes out as a boolean - if row["token_used"] is None: - row["token_used"] = False - - return TokenLookupResult(**row) + return TokenLookupResult( + user_id=user_id, + is_guest=is_guest, + shadow_banned=shadow_banned, + token_id=token_id, + device_id=device_id, + valid_until_ms=valid_until_ms, + token_owner=token_owner, + # This field is nullable, ensure it comes out as a boolean + token_used=bool(token_used), + ) return None @@ -833,11 +859,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Counts all users registered on the homeserver.""" def _count_users(txn: LoggingTransaction) -> int: - txn.execute("SELECT COUNT(*) AS users FROM users") - rows = self.db_pool.cursor_to_dict(txn) - if rows: - return rows[0]["users"] - return 0 + txn.execute("SELECT COUNT(*) FROM users") + row = txn.fetchone() + assert row is not None + return row[0] return await self.db_pool.runInteraction("count_users", _count_users) @@ -891,11 +916,10 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): """Counts all users without a special user_type registered on the homeserver.""" def _count_users(txn: LoggingTransaction) -> int: - txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null") - rows = self.db_pool.cursor_to_dict(txn) - if rows: - return rows[0]["users"] - return 0 + txn.execute("SELECT COUNT(*) FROM users where user_type is null") + row = txn.fetchone() + assert row is not None + return row[0] return await self.db_pool.runInteraction("count_real_users", _count_users) @@ -1252,12 +1276,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ) txn.execute(sql, []) - res = self.db_pool.cursor_to_dict(txn) - if res: - for user in res: - self.set_expiration_date_for_user_txn( - txn, user["name"], use_delta=True - ) + for (name,) in txn.fetchall(): + self.set_expiration_date_for_user_txn(txn, name, use_delta=True) await self.db_pool.runInteraction( "get_users_with_no_expiration_date", @@ -1963,11 +1983,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): (user_id,), ) - rows = self.db_pool.cursor_to_dict(txn) + row = txn.fetchone() + assert row is not None # We cast to bool because the value returned by the database engine might # be an integer if we're using SQLite. - return bool(rows[0]["approved"]) + return bool(row[0]) return await self.db_pool.runInteraction( desc="is_user_pending_approval", @@ -2045,22 +2066,22 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): (last_user, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return True, 0 rows_processed_nb = 0 - for user in rows: - if not user["count_tokens"] and not user["count_threepids"]: - self.set_user_deactivated_status_txn(txn, user["name"], True) + for name, count_tokens, count_threepids in rows: + if not count_tokens and not count_threepids: + self.set_user_deactivated_status_txn(txn, name, True) rows_processed_nb += 1 logger.info("Marked %d rows as deactivated", rows_processed_nb) self.db_pool.updates._background_update_progress_txn( - txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]} + txn, "users_set_deactivated_flag", {"user_id": rows[-1][0]} ) if batch_size > len(rows): diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 719e11aea6..1d4d99932b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -831,7 +831,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_retention_policy_for_room_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Optional[int]]]: + ) -> Optional[Tuple[Optional[int], Optional[int]]]: txn.execute( """ SELECT min_lifetime, max_lifetime FROM room_retention @@ -841,7 +841,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): (room_id,), ) - return self.db_pool.cursor_to_dict(txn) + return cast(Optional[Tuple[Optional[int], Optional[int]]], txn.fetchone()) ret = await self.db_pool.runInteraction( "get_retention_policy_for_room", @@ -856,8 +856,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): max_lifetime=self.config.retention.retention_default_max_lifetime, ) - min_lifetime = ret[0]["min_lifetime"] - max_lifetime = ret[0]["max_lifetime"] + min_lifetime, max_lifetime = ret # If one of the room's policy's attributes isn't defined, use the matching # attribute from the default policy. @@ -1162,14 +1161,13 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql, args) - rows = self.db_pool.cursor_to_dict(txn) - rooms_dict = {} - - for row in rows: - rooms_dict[row["room_id"]] = RetentionPolicy( - min_lifetime=row["min_lifetime"], - max_lifetime=row["max_lifetime"], + rooms_dict = { + room_id: RetentionPolicy( + min_lifetime=min_lifetime, + max_lifetime=max_lifetime, ) + for room_id, min_lifetime, max_lifetime in txn + } if include_null: # If required, do a second query that retrieves all of the rooms we know @@ -1178,13 +1176,11 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql) - rows = self.db_pool.cursor_to_dict(txn) - # If a room isn't already in the dict (i.e. it doesn't have a retention # policy in its state), add it with a null policy. - for row in rows: - if row["room_id"] not in rooms_dict: - rooms_dict[row["room_id"]] = RetentionPolicy() + for (room_id,) in txn: + if room_id not in rooms_dict: + rooms_dict[room_id] = RetentionPolicy() return rooms_dict @@ -1703,24 +1699,24 @@ class RoomBackgroundUpdateStore(SQLBaseStore): (last_room, batch_size), ) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return True - for row in rows: - if not row["json"]: + for room_id, event_id, json in rows: + if not json: retention_policy = {} else: - ev = db_to_json(row["json"]) + ev = db_to_json(json) retention_policy = ev["content"] self.db_pool.simple_insert_txn( txn=txn, table="room_retention", values={ - "room_id": row["room_id"], - "event_id": row["event_id"], + "room_id": room_id, + "event_id": event_id, "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), }, @@ -1729,7 +1725,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): logger.info("Inserted %d rows into room_retention", len(rows)) self.db_pool.updates._background_update_progress_txn( - txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} + txn, "insert_room_retention", {"room_id": rows[-1][0]} ) if batch_size > len(rows): diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e93573f315..bbe08368db 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1349,18 +1349,16 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return 0 - min_stream_id = rows[-1]["stream_ordering"] + min_stream_id = rows[-1][0] to_update = [] - for row in rows: - event_id = row["event_id"] - room_id = row["room_id"] + for _, event_id, room_id, json in rows: try: - event_json = db_to_json(row["json"]) + event_json = db_to_json(json) content = event_json["content"] except Exception: continue diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index a7aae661d8..1d69c4a5f0 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -179,22 +179,24 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): # store_search_entries_txn with a generator function, but that # would mean having two cursors open on the database at once. # Instead we just build a list of results. - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() if not rows: return 0 - min_stream_id = rows[-1]["stream_ordering"] + min_stream_id = rows[-1][0] event_search_rows = [] - for row in rows: + for ( + stream_ordering, + event_id, + room_id, + etype, + json, + origin_server_ts, + ) in rows: try: - event_id = row["event_id"] - room_id = row["room_id"] - etype = row["type"] - stream_ordering = row["stream_ordering"] - origin_server_ts = row["origin_server_ts"] try: - event_json = db_to_json(row["json"]) + event_json = db_to_json(json) content = event_json["content"] except Exception: continue diff --git a/synapse/storage/databases/main/task_scheduler.py b/synapse/storage/databases/main/task_scheduler.py index 5c5372a825..5555b53575 100644 --- a/synapse/storage/databases/main/task_scheduler.py +++ b/synapse/storage/databases/main/task_scheduler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, List, Optional, Tuple, cast from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -27,6 +27,8 @@ from synapse.util import json_encoder if TYPE_CHECKING: from synapse.server import HomeServer +ScheduledTaskRow = Tuple[str, str, str, int, str, str, str, str] + class TaskSchedulerWorkerStore(SQLBaseStore): def __init__( @@ -38,13 +40,18 @@ class TaskSchedulerWorkerStore(SQLBaseStore): super().__init__(database, db_conn, hs) @staticmethod - def _convert_row_to_task(row: Dict[str, Any]) -> ScheduledTask: - row["status"] = TaskStatus(row["status"]) - if row["params"] is not None: - row["params"] = db_to_json(row["params"]) - if row["result"] is not None: - row["result"] = db_to_json(row["result"]) - return ScheduledTask(**row) + def _convert_row_to_task(row: ScheduledTaskRow) -> ScheduledTask: + task_id, action, status, timestamp, resource_id, params, result, error = row + return ScheduledTask( + id=task_id, + action=action, + status=TaskStatus(status), + timestamp=timestamp, + resource_id=resource_id, + params=db_to_json(params) if params is not None else None, + result=db_to_json(result) if result is not None else None, + error=error, + ) async def get_scheduled_tasks( self, @@ -68,7 +75,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): Returns: a list of `ScheduledTask`, ordered by increasing timestamps """ - def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + def get_scheduled_tasks_txn(txn: LoggingTransaction) -> List[ScheduledTaskRow]: clauses: List[str] = [] args: List[Any] = [] if resource_id: @@ -101,7 +108,7 @@ class TaskSchedulerWorkerStore(SQLBaseStore): args.append(limit) txn.execute(sql, args) - return self.db_pool.cursor_to_dict(txn) + return cast(List[ScheduledTaskRow], txn.fetchall()) rows = await self.db_pool.runInteraction( "get_scheduled_tasks", get_scheduled_tasks_txn @@ -193,7 +200,22 @@ class TaskSchedulerWorkerStore(SQLBaseStore): desc="get_scheduled_task", ) - return TaskSchedulerWorkerStore._convert_row_to_task(row) if row else None + return ( + TaskSchedulerWorkerStore._convert_row_to_task( + ( + row["id"], + row["action"], + row["status"], + row["timestamp"], + row["resource_id"], + row["params"], + row["result"], + row["error"], + ) + ) + if row + else None + ) async def delete_scheduled_task(self, id: str) -> None: """Delete a specific task from its id. From 3555790b27a923f29283dbb01fed6844086edcd1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 5 Oct 2023 17:42:44 -0400 Subject: [PATCH 021/142] Remove unused method. (#16435) --- changelog.d/16435.misc | 1 + synapse/storage/databases/main/__init__.py | 20 -------------------- 2 files changed, 1 insertion(+), 20 deletions(-) create mode 100644 changelog.d/16435.misc diff --git a/changelog.d/16435.misc b/changelog.d/16435.misc new file mode 100644 index 0000000000..e541607161 --- /dev/null +++ b/changelog.d/16435.misc @@ -0,0 +1 @@ +Remove unused method. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 101403578c..dfcbf0a175 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -142,26 +142,6 @@ class DataStore( super().__init__(database, db_conn, hs) - async def get_users(self) -> List[JsonDict]: - """Function to retrieve a list of users in users table. - - Returns: - A list of dictionaries representing users. - """ - return await self.db_pool.simple_select_list( - table="users", - keyvalues={}, - retcols=[ - "name", - "password_hash", - "is_guest", - "admin", - "user_type", - "deactivated", - ], - desc="get_users", - ) - async def get_users_paginate( self, start: int, From 5946074d69314226343a0727f24e3aa9616aa1f6 Mon Sep 17 00:00:00 2001 From: V02460 Date: Fri, 6 Oct 2023 12:27:59 +0200 Subject: [PATCH 022/142] Bump pyo3 from 0.17.1 to 0.19.2 (#16162) Signed-off-by: Kai A. Hiller --- Cargo.lock | 28 ++++++++++++++-------------- changelog.d/16162.misc | 1 + rust/Cargo.toml | 6 +++--- rust/src/push/evaluator.rs | 11 +++++++++++ 4 files changed, 29 insertions(+), 17 deletions(-) create mode 100644 changelog.d/16162.misc diff --git a/Cargo.lock b/Cargo.lock index 084b8b91c3..f2b44b5448 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -144,9 +144,9 @@ checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -191,9 +191,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "268be0c73583c183f2b14052337465768c07726936a260f480f0857cb95ba543" +checksum = "e681a6cfdc4adcc93b4d3cf993749a4552018ee0a9b65fc0ccfad74352c72a38" dependencies = [ "anyhow", "cfg-if", @@ -209,9 +209,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28fcd1e73f06ec85bf3280c48c67e731d8290ad3d730f8be9dc07946923005c8" +checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5" dependencies = [ "once_cell", "target-lexicon", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6cb136e222e49115b3c51c32792886defbfb0adead26a688142b346a0b9ffc" +checksum = "e53cee42e77ebe256066ba8aa77eff722b3bb91f3419177cf4cd0f304d3284d9" dependencies = [ "libc", "pyo3-build-config", @@ -240,9 +240,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94144a1266e236b1c932682136dc35a9dee8d3589728f68130c7c3861ef96b28" +checksum = "dfeb4c99597e136528c6dd7d5e3de5434d1ceaf487436a3f03b2d56b6fc9efd1" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -252,9 +252,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.17.3" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f" +checksum = "947dc12175c254889edc0c02e399476c2f652b4b9ebd123aa655c224de259536" dependencies = [ "proc-macro2", "quote", @@ -263,9 +263,9 @@ dependencies = [ [[package]] name = "pythonize" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6" +checksum = "8e35b716d430ace57e2d1b4afb51c9e5b7c46d2bce72926e07f9be6a98ced03e" dependencies = [ "pyo3", "serde", diff --git a/changelog.d/16162.misc b/changelog.d/16162.misc new file mode 100644 index 0000000000..b6c77229c1 --- /dev/null +++ b/changelog.d/16162.misc @@ -0,0 +1 @@ +Bump pyo3 from 0.17.1 to 0.19.2. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 26403d58cc..f62da35a6f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -25,14 +25,14 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = [ +pyo3 = { version = "0.19.2", features = [ "macros", "anyhow", "abi3", - "abi3-py37", + "abi3-py38", ] } pyo3-log = "0.8.1" -pythonize = "0.17.0" +pythonize = "0.19.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } serde_json = "1.0.85" diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 48e670478b..3bde075528 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -105,6 +105,17 @@ impl PushRuleEvaluator { /// Create a new `PushRuleEvaluator`. See struct docstring for details. #[allow(clippy::too_many_arguments)] #[new] + #[pyo3(signature = ( + flattened_keys, + has_mentions, + room_member_count, + sender_power_level, + notification_power_levels, + related_events_flattened, + related_event_match_enabled, + room_version_feature_flags, + msc3931_enabled, + ))] pub fn py_new( flattened_keys: BTreeMap, has_mentions: bool, From 26b960b08ba0110ef3246e5749bb75b9b04a231c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:22:55 -0400 Subject: [PATCH 023/142] Register media servlets via regex. (#16419) This converts the media servlet URLs in the same way as (most) of the rest of Synapse. This will give more flexibility in the versions each endpoint exists under. --- changelog.d/16419.misc | 1 + synapse/http/server.py | 2 +- synapse/media/_base.py | 48 +------ synapse/media/media_repository.py | 10 +- synapse/rest/media/config_resource.py | 13 +- synapse/rest/media/download_resource.py | 40 ++++-- .../rest/media/media_repository_resource.py | 33 +++-- synapse/rest/media/preview_url_resource.py | 26 ++-- synapse/rest/media/thumbnail_resource.py | 35 ++--- synapse/rest/media/upload_resource.py | 14 +- tests/media/test_media_storage.py | 88 ++++++------- tests/media/test_url_previewer.py | 6 +- tests/replication/test_multi_media_repo.py | 19 ++- tests/rest/admin/test_admin.py | 58 +++----- tests/rest/admin/test_media.py | 71 +++------- tests/rest/admin/test_statistics.py | 15 ++- tests/rest/admin/test_user.py | 21 ++- tests/rest/client/utils.py | 6 +- tests/rest/media/test_url_preview.py | 124 +++++++++++------- tests/unittest.py | 4 +- 20 files changed, 297 insertions(+), 337 deletions(-) create mode 100644 changelog.d/16419.misc diff --git a/changelog.d/16419.misc b/changelog.d/16419.misc new file mode 100644 index 0000000000..591f371d00 --- /dev/null +++ b/changelog.d/16419.misc @@ -0,0 +1 @@ +Update registration of media repository URLs. diff --git a/synapse/http/server.py b/synapse/http/server.py index 3bbf91298e..1e4e56f36b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -266,7 +266,7 @@ class HttpServer(Protocol): def register_paths( self, method: str, - path_patterns: Iterable[Pattern], + path_patterns: Iterable[Pattern[str]], callback: ServletCallback, servlet_classname: str, ) -> None: diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 80c448de2b..d103b43449 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -26,11 +26,11 @@ from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender from twisted.web.server import Request -from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.util.stringutils import is_ascii, parse_and_validate_server_name +from synapse.util.stringutils import is_ascii logger = logging.getLogger(__name__) @@ -84,52 +84,12 @@ INLINE_CONTENT_TYPES = [ ] -def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: - """Parses the server name, media ID and optional file name from the request URI - - Also performs some rough validation on the server name. - - Args: - request: The `Request`. - - Returns: - A tuple containing the parsed server name, media ID and optional file name. - - Raises: - SynapseError(404): if parsing or validation fail for any reason - """ - try: - # The type on postpath seems incorrect in Twisted 21.2.0. - postpath: List[bytes] = request.postpath # type: ignore - assert postpath - - # This allows users to append e.g. /test.png to the URL. Useful for - # clients that parse the URL to see content type. - server_name_bytes, media_id_bytes = postpath[:2] - server_name = server_name_bytes.decode("utf-8") - media_id = media_id_bytes.decode("utf8") - - # Validate the server name, raising if invalid - parse_and_validate_server_name(server_name) - - file_name = None - if len(postpath) > 2: - try: - file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) - except UnicodeDecodeError: - pass - return server_name, media_id, file_name - except Exception: - raise SynapseError( - 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN - ) - - def respond_404(request: SynapseRequest) -> None: + assert request.path is not None respond_with_json( request, 404, - cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), + cs_error("Not found '%s'" % (request.path.decode(),), code=Codes.NOT_FOUND), send_cors=True, ) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 1b7b014f9a..d11c2ff4ee 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -48,6 +48,7 @@ from synapse.media.filepath import MediaFilePaths from synapse.media.media_storage import MediaStorage from synapse.media.storage_provider import StorageProviderWrapper from synapse.media.thumbnailer import Thumbnailer, ThumbnailError +from synapse.media.url_previewer import UrlPreviewer from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID from synapse.util.async_helpers import Linearizer @@ -114,7 +115,7 @@ class MediaRepository: ) storage_providers.append(provider) - self.media_storage = MediaStorage( + self.media_storage: MediaStorage = MediaStorage( self.hs, self.primary_base_path, self.filepaths, storage_providers ) @@ -142,6 +143,13 @@ class MediaRepository: MEDIA_RETENTION_CHECK_PERIOD_MS, ) + if hs.config.media.url_preview_enabled: + self.url_previewer: Optional[UrlPreviewer] = UrlPreviewer( + hs, self, self.media_storage + ) + else: + self.url_previewer = None + def _start_update_recently_accessed(self) -> Deferred: return run_as_background_process( "update_recently_accessed_media", self._update_recently_accessed diff --git a/synapse/rest/media/config_resource.py b/synapse/rest/media/config_resource.py index a95804d327..dbf5133c72 100644 --- a/synapse/rest/media/config_resource.py +++ b/synapse/rest/media/config_resource.py @@ -14,17 +14,19 @@ # limitations under the License. # +import re from typing import TYPE_CHECKING -from synapse.http.server import DirectServeJsonResource, respond_with_json +from synapse.http.server import respond_with_json +from synapse.http.servlet import RestServlet from synapse.http.site import SynapseRequest if TYPE_CHECKING: from synapse.server import HomeServer -class MediaConfigResource(DirectServeJsonResource): - isLeaf = True +class MediaConfigResource(RestServlet): + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/config$")] def __init__(self, hs: "HomeServer"): super().__init__() @@ -33,9 +35,6 @@ class MediaConfigResource(DirectServeJsonResource): self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.media.max_upload_size} - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET(self, request: SynapseRequest) -> None: await self.auth.get_user_by_req(request) respond_with_json(request, 200, self.limits_dict, send_cors=True) - - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - respond_with_json(request, 200, {}, send_cors=True) diff --git a/synapse/rest/media/download_resource.py b/synapse/rest/media/download_resource.py index 3c618ef60a..65b9ff52fa 100644 --- a/synapse/rest/media/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -13,16 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING +import re +from typing import TYPE_CHECKING, Optional -from synapse.http.server import ( - DirectServeJsonResource, - set_corp_headers, - set_cors_headers, -) -from synapse.http.servlet import parse_boolean +from synapse.http.server import set_corp_headers, set_cors_headers +from synapse.http.servlet import RestServlet, parse_boolean from synapse.http.site import SynapseRequest -from synapse.media._base import parse_media_id, respond_404 +from synapse.media._base import respond_404 +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository @@ -31,15 +29,28 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class DownloadResource(DirectServeJsonResource): - isLeaf = True +class DownloadResource(RestServlet): + PATTERNS = [ + re.compile( + "/_matrix/media/(r0|v3|v1)/download/(?P[^/]*)/(?P[^/]*)(/(?P[^/]*))?$" + ) + ] def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() self.media_repo = media_repo self._is_mine_server_name = hs.is_mine_server_name - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET( + self, + request: SynapseRequest, + server_name: str, + media_id: str, + file_name: Optional[str] = None, + ) -> None: + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + set_cors_headers(request) set_corp_headers(request) request.setHeader( @@ -58,9 +69,8 @@ class DownloadResource(DirectServeJsonResource): b"Referrer-Policy", b"no-referrer", ) - server_name, media_id, name = parse_media_id(request) if self._is_mine_server_name(server_name): - await self.media_repo.get_local_media(request, media_id, name) + await self.media_repo.get_local_media(request, media_id, file_name) else: allow_remote = parse_boolean(request, "allow_remote", default=True) if not allow_remote: @@ -72,4 +82,6 @@ class DownloadResource(DirectServeJsonResource): respond_404(request) return - await self.media_repo.get_remote_media(request, server_name, media_id, name) + await self.media_repo.get_remote_media( + request, server_name, media_id, file_name + ) diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py index 5ebaa3b032..2089bb1029 100644 --- a/synapse/rest/media/media_repository_resource.py +++ b/synapse/rest/media/media_repository_resource.py @@ -15,7 +15,7 @@ from typing import TYPE_CHECKING from synapse.config._base import ConfigError -from synapse.http.server import UnrecognizedRequestResource +from synapse.http.server import HttpServer, JsonResource from .config_resource import MediaConfigResource from .download_resource import DownloadResource @@ -27,7 +27,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer -class MediaRepositoryResource(UnrecognizedRequestResource): +class MediaRepositoryResource(JsonResource): """File uploading and downloading. Uploads are POSTed to a resource which returns a token which is used to GET @@ -70,6 +70,11 @@ class MediaRepositoryResource(UnrecognizedRequestResource): width and height are close to the requested size and the aspect matches the requested size. The client should scale the image if it needs to fit within a given rectangle. + + This gets mounted at various points under /_matrix/media, including: + * /_matrix/media/r0 + * /_matrix/media/v1 + * /_matrix/media/v3 """ def __init__(self, hs: "HomeServer"): @@ -77,17 +82,23 @@ class MediaRepositoryResource(UnrecognizedRequestResource): if not hs.config.media.can_load_media_repo: raise ConfigError("Synapse is not configured to use a media repo.") - super().__init__() + JsonResource.__init__(self, hs, canonical_json=False) + self.register_servlets(self, hs) + + @staticmethod + def register_servlets(http_server: HttpServer, hs: "HomeServer") -> None: media_repo = hs.get_media_repository() - self.putChild(b"upload", UploadResource(hs, media_repo)) - self.putChild(b"download", DownloadResource(hs, media_repo)) - self.putChild( - b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) + # Note that many of these should not exist as v1 endpoints, but empirically + # a lot of traffic still goes to them. + + UploadResource(hs, media_repo).register(http_server) + DownloadResource(hs, media_repo).register(http_server) + ThumbnailResource(hs, media_repo, media_repo.media_storage).register( + http_server ) if hs.config.media.url_preview_enabled: - self.putChild( - b"preview_url", - PreviewUrlResource(hs, media_repo, media_repo.media_storage), + PreviewUrlResource(hs, media_repo, media_repo.media_storage).register( + http_server ) - self.putChild(b"config", MediaConfigResource(hs)) + MediaConfigResource(hs).register(http_server) diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py index 58513c4be4..c8acb65dca 100644 --- a/synapse/rest/media/preview_url_resource.py +++ b/synapse/rest/media/preview_url_resource.py @@ -13,24 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re from typing import TYPE_CHECKING -from synapse.http.server import ( - DirectServeJsonResource, - respond_with_json, - respond_with_json_bytes, -) -from synapse.http.servlet import parse_integer, parse_string +from synapse.http.server import respond_with_json_bytes +from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.media.media_storage import MediaStorage -from synapse.media.url_previewer import UrlPreviewer if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer -class PreviewUrlResource(DirectServeJsonResource): +class PreviewUrlResource(RestServlet): """ The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix @@ -48,7 +44,7 @@ class PreviewUrlResource(DirectServeJsonResource): * Matrix cannot be used to distribute the metadata between homeservers. """ - isLeaf = True + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/preview_url$")] def __init__( self, @@ -62,14 +58,10 @@ class PreviewUrlResource(DirectServeJsonResource): self.clock = hs.get_clock() self.media_repo = media_repo self.media_storage = media_storage + assert self.media_repo.url_previewer is not None + self.url_previewer = self.media_repo.url_previewer - self._url_previewer = UrlPreviewer(hs, media_repo, media_storage) - - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - request.setHeader(b"Allow", b"OPTIONS, GET") - respond_with_json(request, 200, {}, send_cors=True) - - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET(self, request: SynapseRequest) -> None: # XXX: if get_user_by_req fails, what should we do in an async render? requester = await self.auth.get_user_by_req(request) url = parse_string(request, "url", required=True) @@ -77,5 +69,5 @@ class PreviewUrlResource(DirectServeJsonResource): if ts is None: ts = self.clock.time_msec() - og = await self._url_previewer.preview(url, requester.user, ts) + og = await self.url_previewer.preview(url, requester.user, ts) respond_with_json_bytes(request, 200, og, send_cors=True) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index 661e604b85..f9cd773f77 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -13,29 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. - import logging +import re from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from synapse.api.errors import Codes, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP -from synapse.http.server import ( - DirectServeJsonResource, - respond_with_json, - set_corp_headers, - set_cors_headers, -) -from synapse.http.servlet import parse_integer, parse_string +from synapse.http.server import respond_with_json, set_corp_headers, set_cors_headers +from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.media._base import ( FileInfo, ThumbnailInfo, - parse_media_id, respond_404, respond_with_file, respond_with_responder, ) from synapse.media.media_storage import MediaStorage +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository @@ -44,8 +39,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class ThumbnailResource(DirectServeJsonResource): - isLeaf = True +class ThumbnailResource(RestServlet): + PATTERNS = [ + re.compile( + "/_matrix/media/(r0|v3|v1)/thumbnail/(?P[^/]*)/(?P[^/]*)$" + ) + ] def __init__( self, @@ -60,12 +59,17 @@ class ThumbnailResource(DirectServeJsonResource): self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails self._is_mine_server_name = hs.is_mine_server_name + self._server_name = hs.hostname self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from - async def _async_render_GET(self, request: SynapseRequest) -> None: + async def on_GET( + self, request: SynapseRequest, server_name: str, media_id: str + ) -> None: + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + set_cors_headers(request) set_corp_headers(request) - server_name, media_id, _ = parse_media_id(request) width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") @@ -418,13 +422,14 @@ class ThumbnailResource(DirectServeJsonResource): # `dynamic_thumbnails` is disabled. logger.info("Failed to find any generated thumbnails") + assert request.path is not None respond_with_json( request, 400, cs_error( - "Cannot find any thumbnails for the requested media (%r). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" + "Cannot find any thumbnails for the requested media ('%s'). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" % ( - request.postpath, + request.path.decode(), ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()), ), code=Codes.UNKNOWN, diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 043e8d6077..949326d85d 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -14,11 +14,12 @@ # limitations under the License. import logging +import re from typing import IO, TYPE_CHECKING, Dict, List, Optional from synapse.api.errors import Codes, SynapseError -from synapse.http.server import DirectServeJsonResource, respond_with_json -from synapse.http.servlet import parse_bytes_from_args +from synapse.http.server import respond_with_json +from synapse.http.servlet import RestServlet, parse_bytes_from_args from synapse.http.site import SynapseRequest from synapse.media.media_storage import SpamMediaException @@ -29,8 +30,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class UploadResource(DirectServeJsonResource): - isLeaf = True +class UploadResource(RestServlet): + PATTERNS = [re.compile("/_matrix/media/(r0|v3|v1)/upload")] def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): super().__init__() @@ -43,10 +44,7 @@ class UploadResource(DirectServeJsonResource): self.max_upload_size = hs.config.media.max_upload_size self.clock = hs.get_clock() - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - respond_with_json(request, 200, {}, send_cors=True) - - async def _async_render_POST(self, request: SynapseRequest) -> None: + async def on_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 04fc7bdcef..ba00e35a9e 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -28,6 +28,7 @@ from typing_extensions import Literal from twisted.internet import defer from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource from synapse.api.errors import Codes from synapse.events import EventBase @@ -41,12 +42,13 @@ from synapse.module_api import ModuleApi from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.rest import admin from synapse.rest.client import login +from synapse.rest.media.thumbnail_resource import ThumbnailResource from synapse.server import HomeServer from synapse.types import JsonDict, RoomAlias from synapse.util import Clock from tests import unittest -from tests.server import FakeChannel, FakeSite, make_request +from tests.server import FakeChannel from tests.test_utils import SMALL_PNG from tests.utils import default_config @@ -288,22 +290,22 @@ class MediaRepoTests(unittest.HomeserverTestCase): return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_resource = hs.get_media_repository_resource() - self.download_resource = media_resource.children[b"download"] - self.thumbnail_resource = media_resource.children[b"thumbnail"] self.store = hs.get_datastores().main self.media_repo = hs.get_media_repository() self.media_id = "example.com/12345" + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def _req( self, content_disposition: Optional[bytes], include_content_type: bool = True ) -> FakeChannel: - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id, + f"/_matrix/media/v3/download/{self.media_id}", shorthand=False, await_result=False, ) @@ -481,11 +483,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/v3/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -511,11 +511,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) shutil.rmtree(thumbnail_dir, ignore_errors=True) - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/v3/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -549,11 +547,9 @@ class MediaRepoTests(unittest.HomeserverTestCase): """ params = "?width=32&height=32&method=" + method - channel = make_request( - self.reactor, - FakeSite(self.thumbnail_resource, self.reactor), + channel = self.make_request( "GET", - self.media_id + params, + f"/_matrix/media/r0/thumbnail/{self.media_id}{params}", shorthand=False, await_result=False, ) @@ -590,7 +586,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_UNKNOWN", - "error": "Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + "error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", }, ) else: @@ -600,7 +596,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): channel.json_body, { "errcode": "M_NOT_FOUND", - "error": "Not found [b'example.com', b'12345']", + "error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'", }, ) @@ -609,12 +605,17 @@ class MediaRepoTests(unittest.HomeserverTestCase): """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" + media_repo = self.hs.get_media_repository() + thumbnail_resouce = ThumbnailResource( + self.hs, media_repo, media_repo.media_storage + ) + self.assertIsNotNone( - self.thumbnail_resource._select_thumbnail( + thumbnail_resouce._select_thumbnail( desired_width=desired_size, desired_height=desired_size, desired_method=method, - desired_type=self.test_image.content_type, + desired_type=self.test_image.content_type, # type: ignore[arg-type] # Provide two identical thumbnails which are guaranteed to have the same # quality rating. thumbnail_infos=[ @@ -636,7 +637,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): }, ], file_id=f"image{self.test_image.extension.decode()}", - url_cache=None, + url_cache=False, server_name=None, ) ) @@ -725,13 +726,13 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] - load_legacy_spam_checkers(hs) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def default_config(self) -> Dict[str, Any]: config = default_config("test") @@ -751,9 +752,7 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" - self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should @@ -762,9 +761,7 @@ class SpamCheckerTestCaseLegacy(unittest.HomeserverTestCase): data = b"Some evil data" - self.helper.upload_media( - self.upload_resource, data, tok=self.tok, expect_code=400 - ) + self.helper.upload_media(data, tok=self.tok, expect_code=400) EVIL_DATA = b"Some evil data" @@ -781,15 +778,15 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] - hs.get_module_api().register_spam_checker_callbacks( check_media_file_for_spam=self.check_media_file_for_spam ) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo ) -> Union[Codes, Literal["NOT_SPAM"], Tuple[Codes, JsonDict]]: @@ -805,21 +802,16 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" - self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should get rejected by the spam checker. """ - self.helper.upload_media( - self.upload_resource, EVIL_DATA, tok=self.tok, expect_code=400 - ) + self.helper.upload_media(EVIL_DATA, tok=self.tok, expect_code=400) self.helper.upload_media( - self.upload_resource, EVIL_DATA_EXPERIMENT, tok=self.tok, expect_code=400, diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py index 46ecde5344..04b69f378a 100644 --- a/tests/media/test_url_previewer.py +++ b/tests/media/test_url_previewer.py @@ -61,9 +61,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): return self.setup_test_homeserver(config=config) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo_resource = hs.get_media_repository_resource() - preview_url = media_repo_resource.children[b"preview_url"] - self.url_previewer = preview_url._url_previewer + media_repo = hs.get_media_repository() + assert media_repo.url_previewer is not None + self.url_previewer = media_repo.url_previewer def test_all_urls_allowed(self) -> None: self.assertFalse(self.url_previewer._is_url_blocked("http://matrix.org")) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 6e78daa830..b230a6c361 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -13,7 +13,7 @@ # limitations under the License. import logging import os -from typing import Optional, Tuple +from typing import Any, Optional, Tuple from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory @@ -29,7 +29,7 @@ from synapse.util import Clock from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.replication._base import BaseMultiWorkerStreamTestCase -from tests.server import FakeChannel, FakeSite, FakeTransport, make_request +from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG logger = logging.getLogger(__name__) @@ -56,6 +56,16 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): conf["federation_custom_ca_list"] = [get_test_ca_cert_file()] return conf + def make_worker_hs( + self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any + ) -> HomeServer: + worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs) + # Force the media paths onto the replication resource. + worker_hs.get_media_repository_resource().register_servlets( + self._hs_to_site[worker_hs].resource, worker_hs + ) + return worker_hs + def _get_media_req( self, hs: HomeServer, target: str, media_id: str ) -> Tuple[FakeChannel, Request]: @@ -68,12 +78,11 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): The channel for the *client* request and the *outbound* request for the media which the caller should respond to. """ - resource = hs.get_media_repository_resource().children[b"download"] channel = make_request( self.reactor, - FakeSite(resource, self.reactor), + self._hs_to_site[hs], "GET", - f"/{target}/{media_id}", + f"/_matrix/media/r0/download/{target}/{media_id}", shorthand=False, access_token=self.access_token, await_result=False, diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 359d131b37..8646b2f0fd 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -13,10 +13,12 @@ # limitations under the License. import urllib.parse +from typing import Dict from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.http.server import JsonResource @@ -26,7 +28,6 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG @@ -55,21 +56,18 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # Allow for uploading and downloading to/from the media repo - self.media_repo = hs.get_media_repository_resource() - self.download_resource = self.media_repo.children[b"download"] - self.upload_resource = self.media_repo.children[b"upload"] + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources def _ensure_quarantined( self, admin_user_tok: str, server_and_media_id: str ) -> None: """Ensure a piece of media is quarantined when trying to access it.""" - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=admin_user_tok, ) @@ -117,20 +115,16 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("id_nonadmin", "pass") # Upload some media into the room - response = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=admin_user_tok - ) + response = self.helper.upload_media(SMALL_PNG, tok=admin_user_tok) # Extract media ID from the response server_name_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_name_and_media_id.split("/") # Attempt to access the media - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_name_and_media_id, + f"/_matrix/media/v3/download/{server_name_and_media_id}", shorthand=False, access_token=non_admin_user_tok, ) @@ -173,12 +167,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self.helper.join(room_id, non_admin_user, tok=non_admin_user_tok) # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract mxcs mxc_1 = response_1["content_uri"] @@ -227,12 +217,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("user_nonadmin", "pass") # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract media IDs server_and_media_id_1 = response_1["content_uri"][6:] @@ -265,12 +251,8 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): non_admin_user_tok = self.login("user_nonadmin", "pass") # Upload some media - response_1 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) - response_2 = self.helper.upload_media( - self.upload_resource, SMALL_PNG, tok=non_admin_user_tok - ) + response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract media IDs server_and_media_id_1 = response_1["content_uri"][6:] @@ -304,11 +286,9 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) # Attempt to access each piece of media - channel = make_request( - self.reactor, - FakeSite(self.download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id_2, + f"/_matrix/media/v3/download/{server_and_media_id_2}", shorthand=False, access_token=non_admin_user_tok, ) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 6d04911d67..278808abb5 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -13,10 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +from typing import Dict from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.errors import Codes @@ -26,22 +28,27 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds -class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): +class _AdminMediaTests(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, login.register_servlets, ] + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + +class DeleteMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() self.server_name = hs.hostname self.admin_user = self.register_user("admin", "pass", admin=True) @@ -117,12 +124,8 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): Tests that delete a media is successfully """ - download_resource = self.media_repo.children[b"download"] - upload_resource = self.media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -134,11 +137,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): self.assertEqual(server_name, self.server_name) # Attempt to access media - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -173,11 +174,9 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): ) # Attempt to access media - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -194,7 +193,7 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(os.path.exists(local_path)) -class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): +class DeleteMediaByDateSizeTestCase(_AdminMediaTests): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -529,11 +528,8 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): """ Create a media and return media_id and server_and_media_id """ - upload_resource = self.media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -553,16 +549,12 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): """ Try to access a media and check the result """ - download_resource = self.media_repo.children[b"download"] - media_id = server_and_media_id.split("/")[1] local_path = self.filepaths.local_media_filepath(media_id) - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=self.admin_user_tok, ) @@ -591,27 +583,16 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): self.assertFalse(os.path.exists(local_path)) -class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - synapse.rest.admin.register_servlets_for_media_repo, - login.register_servlets, - ] - +class QuarantineMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo = hs.get_media_repository_resource() self.store = hs.get_datastores().main self.server_name = hs.hostname self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - # Create media - upload_resource = media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -720,26 +701,16 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(media_info["quarantined_by"]) -class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - synapse.rest.admin.register_servlets_for_media_repo, - login.register_servlets, - ] - +class ProtectMediaByIDTestCase(_AdminMediaTests): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_repo = hs.get_media_repository_resource() + hs.get_media_repository_resource() self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - # Create media - upload_resource = media_repo.children[b"upload"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, @@ -816,7 +787,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): self.assertFalse(media_info["safe_from_quarantine"]) -class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): +class PurgeMediaCacheTestCase(_AdminMediaTests): servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index b60f16b914..cd8ee274d8 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -12,9 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import Dict, List, Optional from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.errors import Codes @@ -34,8 +35,6 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() - self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -44,6 +43,11 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): self.url = "/_synapse/admin/v1/statistics/users/media" + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + def test_no_auth(self) -> None: """ Try to list users without authentication. @@ -470,12 +474,9 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): user_token: Access token of the user number_media: Number of media to be created for the user """ - upload_resource = self.media_repo.children[b"upload"] for _ in range(number_media): # Upload some media into the room - self.helper.upload_media( - upload_resource, SMALL_PNG, tok=user_token, expect_code=200 - ) + self.helper.upload_media(SMALL_PNG, tok=user_token, expect_code=200) def _check_fields(self, content: List[JsonDict]) -> None: """Checks that all attributes are present in content diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b326ad2c90..37f37a09d8 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -17,12 +17,13 @@ import hmac import os import urllib.parse from binascii import unhexlify -from typing import List, Optional +from typing import Dict, List, Optional from unittest.mock import AsyncMock, Mock, patch from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes @@ -45,7 +46,6 @@ from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request from tests.test_utils import SMALL_PNG from tests.unittest import override_config @@ -3421,7 +3421,6 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.media_repo = hs.get_media_repository_resource() self.filepaths = MediaFilePaths(hs.config.media.media_store_path) self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3432,6 +3431,11 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): self.other_user ) + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + @parameterized.expand(["GET", "DELETE"]) def test_no_auth(self, method: str) -> None: """Try to list media of an user without authentication.""" @@ -3907,12 +3911,9 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): Returns: The ID of the newly created media. """ - upload_resource = self.media_repo.children[b"upload"] - download_resource = self.media_repo.children[b"download"] - # Upload some media into the room response = self.helper.upload_media( - upload_resource, image_data, user_token, filename, expect_code=200 + image_data, user_token, filename, expect_code=200 ) # Extract media ID from the response @@ -3920,11 +3921,9 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): media_id = server_and_media_id.split("/")[1] # Try to access a media and to create `last_access_ts` - channel = make_request( - self.reactor, - FakeSite(download_resource, self.reactor), + channel = self.make_request( "GET", - server_and_media_id, + f"/_matrix/media/v3/download/{server_and_media_id}", shorthand=False, access_token=user_token, ) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 9532e5ddc1..465b696c0b 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -37,7 +37,6 @@ import attr from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactorClock -from twisted.web.resource import Resource from twisted.web.server import Site from synapse.api.constants import Membership @@ -45,7 +44,7 @@ from synapse.api.errors import Codes from synapse.server import HomeServer from synapse.types import JsonDict -from tests.server import FakeChannel, FakeSite, make_request +from tests.server import FakeChannel, make_request from tests.test_utils.html_parsers import TestHtmlParser from tests.test_utils.oidc import FakeAuthorizationGrant, FakeOidcServer @@ -558,7 +557,6 @@ class RestHelper: def upload_media( self, - resource: Resource, image_data: bytes, tok: str, filename: str = "test.png", @@ -576,7 +574,7 @@ class RestHelper: path = "/_matrix/media/r0/upload?filename=%s" % (filename,) channel = make_request( self.reactor, - FakeSite(resource, self.reactor), + self.site, "POST", path, content=image_data, diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index 05d5e39cab..24459c6af4 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -24,10 +24,10 @@ from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import IAddress, IResolutionReceiver from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor +from twisted.web.resource import Resource from synapse.config.oembed import OEmbedEndpointConfig from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS -from synapse.rest.media.media_repository_resource import MediaRepositoryResource from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -117,8 +117,8 @@ class URLPreviewTests(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.media_repo = hs.get_media_repository() - media_repo_resource = hs.get_media_repository_resource() - self.preview_url = media_repo_resource.children[b"preview_url"] + assert self.media_repo.url_previewer is not None + self.url_previewer = self.media_repo.url_previewer self.lookups: Dict[str, Any] = {} @@ -143,8 +143,15 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_test_resource(self) -> MediaRepositoryResource: - return self.hs.get_media_repository_resource() + def create_resource_dict(self) -> Dict[str, Resource]: + """Create a resource tree for the test server + + A resource tree is a mapping from path to twisted.web.resource. + + The default implementation creates a JsonResource and calls each function in + `servlets` to register servlets against it. + """ + return {"/_matrix/media": self.hs.get_media_repository_resource()} def _assert_small_png(self, json_body: JsonDict) -> None: """Assert properties from the SMALL_PNG test image.""" @@ -159,7 +166,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -183,7 +190,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check the cache returns the correct response channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://matrix.org", + shorthand=False, ) # Check the cache response has the same content @@ -193,13 +202,15 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) # Clear the in-memory cache - self.assertIn("http://matrix.org", self.preview_url._url_previewer._cache) - self.preview_url._url_previewer._cache.pop("http://matrix.org") - self.assertNotIn("http://matrix.org", self.preview_url._url_previewer._cache) + self.assertIn("http://matrix.org", self.url_previewer._cache) + self.url_previewer._cache.pop("http://matrix.org") + self.assertNotIn("http://matrix.org", self.url_previewer._cache) # Check the database cache returns the correct response channel = self.make_request( - "GET", "preview_url?url=http://matrix.org", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://matrix.org", + shorthand=False, ) # Check the cache response has the same content @@ -221,7 +232,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -251,7 +262,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -287,7 +298,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -328,7 +339,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -363,7 +374,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -396,7 +407,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -425,7 +436,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) # No requests made. @@ -446,7 +459,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) @@ -463,7 +478,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blocked IP addresses, accessed directly, are not spidered. """ channel = self.make_request( - "GET", "preview_url?url=http://192.168.1.1", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://192.168.1.1", + shorthand=False, ) # No requests made. @@ -479,7 +496,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): Blocked IP ranges, accessed directly, are not spidered. """ channel = self.make_request( - "GET", "preview_url?url=http://1.1.1.2", shorthand=False + "GET", "/_matrix/media/v3/preview_url?url=http://1.1.1.2", shorthand=False ) self.assertEqual(channel.code, 403) @@ -497,7 +514,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -533,7 +550,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) self.assertEqual( @@ -553,7 +572,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): ] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) # No requests made. @@ -574,7 +595,9 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.lookups["example.com"] = [(IPv6Address, "2001:800::1")] channel = self.make_request( - "GET", "preview_url?url=http://example.com", shorthand=False + "GET", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) self.assertEqual(channel.code, 502) @@ -591,10 +614,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): OPTIONS returns the OPTIONS. """ channel = self.make_request( - "OPTIONS", "preview_url?url=http://example.com", shorthand=False + "OPTIONS", + "/_matrix/media/v3/preview_url?url=http://example.com", + shorthand=False, ) - self.assertEqual(channel.code, 200) - self.assertEqual(channel.json_body, {}) + self.assertEqual(channel.code, 204) def test_accept_language_config_option(self) -> None: """ @@ -605,7 +629,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Build and make a request to the server channel = self.make_request( "GET", - "preview_url?url=http://example.com", + "/_matrix/media/v3/preview_url?url=http://example.com", shorthand=False, await_result=False, ) @@ -658,7 +682,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -708,7 +732,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -750,7 +774,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -790,7 +814,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -831,7 +855,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"preview_url?{query_params}", + f"/_matrix/media/v3/preview_url?{query_params}", shorthand=False, ) self.pump() @@ -852,7 +876,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://matrix.org", + "/_matrix/media/v3/preview_url?url=http://matrix.org", shorthand=False, await_result=False, ) @@ -889,7 +913,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -949,7 +973,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -998,7 +1022,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.hulu.com/watch/12345", + "/_matrix/media/v3/preview_url?url=http://www.hulu.com/watch/12345", shorthand=False, await_result=False, ) @@ -1043,7 +1067,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1072,7 +1096,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1164,7 +1188,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", + "/_matrix/media/v3/preview_url?url=http://www.twitter.com/matrixdotorg/status/12345", shorthand=False, await_result=False, ) @@ -1205,7 +1229,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=http://cdn.twitter.com/matrixdotorg", + "/_matrix/media/v3/preview_url?url=http://cdn.twitter.com/matrixdotorg", shorthand=False, await_result=False, ) @@ -1247,7 +1271,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"download/{host}/{media_id}", + f"/_matrix/media/v3/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1260,7 +1284,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"download/{host}/{media_id}", + f"/_matrix/media/v3/download/{host}/{media_id}", shorthand=False, await_result=False, ) @@ -1295,7 +1319,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): # Check fetching channel = self.make_request( "GET", - f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/media/v3/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1313,7 +1337,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - f"thumbnail/{host}/{media_id}?width=32&height=32&method=scale", + f"/_matrix/media/v3/thumbnail/{host}/{media_id}?width=32&height=32&method=scale", shorthand=False, await_result=False, ) @@ -1343,7 +1367,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertTrue(os.path.isdir(thumbnail_dir)) self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1) - self.get_success(self.preview_url._url_previewer._expire_url_cache_data()) + self.get_success(self.url_previewer._expire_url_cache_data()) for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs: self.assertFalse( @@ -1363,7 +1387,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + bad_url, + "/_matrix/media/v3/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) @@ -1372,7 +1396,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + good_url, + "/_matrix/media/v3/preview_url?url=" + good_url, shorthand=False, await_result=False, ) @@ -1404,7 +1428,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): channel = self.make_request( "GET", - "preview_url?url=" + bad_url, + "/_matrix/media/v3/preview_url?url=" + bad_url, shorthand=False, await_result=False, ) diff --git a/tests/unittest.py b/tests/unittest.py index dbaff361b4..99ad02eb06 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -60,7 +60,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.federation.transport.server import TransportLayerServer -from synapse.http.server import JsonResource +from synapse.http.server import JsonResource, OptionsResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import ( SENTINEL_CONTEXT, @@ -459,7 +459,7 @@ class HomeserverTestCase(TestCase): The default calls `self.create_resource_dict` and builds the resultant dict into a tree. """ - root_resource = Resource() + root_resource = OptionsResource() create_resource_tree(self.create_resource_dict(), root_resource) return root_resource From 694802eecdfe18544be5252605bd427e3a5a2b2e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:23:20 -0400 Subject: [PATCH 024/142] Add documentation on background updates. (#16420) --- changelog.d/16420.doc | 1 + docs/development/database_schema.md | 61 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 changelog.d/16420.doc diff --git a/changelog.d/16420.doc b/changelog.d/16420.doc new file mode 100644 index 0000000000..1c0c6b9577 --- /dev/null +++ b/changelog.d/16420.doc @@ -0,0 +1 @@ +Document internal background update mechanism. diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index 675080ae1b..37a06acc12 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -150,6 +150,67 @@ def run_upgrade( ... ``` +## Background updates + +It is sometimes appropriate to perform database migrations as part of a background +process (instead of blocking Synapse until the migration is done). In particular, +this is useful for migrating data when adding new columns or tables. + +Pending background updates stored in the `background_updates` table and are denoted +by a unique name, the current status (stored in JSON), and some dependency information: + +* Whether the update requires a previous update to be complete. +* A rough ordering for which to complete updates. + +A new background updates needs to be added to the `background_updates` table: + +```sql +INSERT INTO background_updates (ordering, update_name, depends_on, progress_json) VALUES + (7706, 'my_background_update', 'a_previous_background_update' '{}'); +``` + +And then needs an associated handler in the appropriate datastore: + +```python +self.db_pool.updates.register_background_update_handler( + "my_background_update", + update_handler=self._my_background_update, +) +``` + +There are a few types of updates that can be performed, see the `BackgroundUpdater`: + +* `register_background_update_handler`: A generic handler for custom SQL +* `register_background_index_update`: Create an index in the background +* `register_background_validate_constraint`: Validate a constraint in the background + (PostgreSQL-only) +* `register_background_validate_constraint_and_delete_rows`: Similar to + `register_background_validate_constraint`, but deletes rows which don't fit + the constraint. + +For `register_background_update_handler`, the generic handler must track progress +and then finalize the background update: + +```python +async def _my_background_update(self, progress: JsonDict, batch_size: int) -> int: + def _do_something(txn: LoggingTransaction) -> int: + ... + self.db_pool.updates._background_update_progress_txn( + txn, "my_background_update", {"last_processed": last_processed} + ) + return last_processed - prev_last_processed + + num_processed = await self.db_pool.runInteraction("_do_something", _do_something) + await self.db_pool.updates._end_background_update("my_background_update") + + return num_processed +``` + +Synapse will attempt to rate-limit how often background updates are run via the +given batch-size and the returned number of processed entries (and how long the +function took to run). See +[background update controller callbacks](../modules/background_update_controller_callbacks.md). + ## Boolean columns Boolean columns require special treatment, since SQLite treats booleans the From ae5b997cfac1a7d7540be7352f1c01295ce9100a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:25:44 -0400 Subject: [PATCH 025/142] Fix comments related to replication. (#16428) --- changelog.d/16428.misc | 1 + synapse/federation/sender/__init__.py | 2 +- synapse/replication/tcp/commands.py | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16428.misc diff --git a/changelog.d/16428.misc b/changelog.d/16428.misc new file mode 100644 index 0000000000..75c9c3b757 --- /dev/null +++ b/changelog.d/16428.misc @@ -0,0 +1 @@ +Improve code comments. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index fb20fd8a10..7b6b1da090 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -67,7 +67,7 @@ The loop continues so long as there is anything to send. At each iteration of th When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop* (`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`. -(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour +(Only once the catch-up mode has been exited can the regular transaction transmission behaviour be resumed.) *Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 1b92302fd3..0f0f851b79 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -446,8 +446,6 @@ class RemoteServerUpCommand(_SimpleCommand): """Sent when a worker has detected that a remote server is no longer "down" and retry timings should be reset. - If sent from a client the server will relay to all other workers. - Format:: REMOTE_SERVER_UP From fc31b495b3a7f170019591c2e40e699b61c067a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 07:27:35 -0400 Subject: [PATCH 026/142] Stop sending incorrect knock_state_events. (#16403) Synapse was incorrectly implemented with a knock_state_events property on some APIs (instead of knock_room_state). This was correct in Synapse 1.70.0, but *both* fields were sent to also be compatible with Synapse versions expecting the wrong field. Enough time has passed that only the correct field needs to be included/handled. --- changelog.d/16403.bugfix | 1 + synapse/federation/federation_client.py | 4 ++-- synapse/federation/federation_server.py | 9 +-------- synapse/federation/transport/client.py | 2 +- synapse/handlers/federation.py | 13 ++----------- tests/federation/transport/test_knocking.py | 2 +- 6 files changed, 8 insertions(+), 23 deletions(-) create mode 100644 changelog.d/16403.bugfix diff --git a/changelog.d/16403.bugfix b/changelog.d/16403.bugfix new file mode 100644 index 0000000000..453c975a63 --- /dev/null +++ b/changelog.d/16403.bugfix @@ -0,0 +1 @@ +Remove legacy unspecced `knock_state_events` field returned in some responses. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c8bc46415d..1a7fa175ec 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1402,7 +1402,7 @@ class FederationClient(FederationBase): The remote homeserver return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. @@ -1429,7 +1429,7 @@ class FederationClient(FederationBase): The remote homeserver can optionally return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. """ diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ec8e770430..6ac8d16095 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -850,14 +850,7 @@ class FederationServer(FederationBase): context, self._room_prejoin_state_types ) ) - return { - "knock_room_state": stripped_room_state, - # Since v1.37, Synapse incorrectly used "knock_state_events" for this field. - # Thus, we also populate a 'knock_state_events' with the same content to - # support old instances. - # See https://github.com/matrix-org/synapse/issues/14088. - "knock_state_events": stripped_room_state, - } + return {"knock_room_state": stripped_room_state} async def _on_send_membership_event( self, origin: str, content: JsonDict, membership_type: str, room_id: str diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index b5e4b2680e..fab4800717 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -431,7 +431,7 @@ class TransportLayerClient: The remote homeserver can optionally return some state from the room. The response dictionary is in the form: - {"knock_state_events": [, ...]} + {"knock_room_state": [, ...]} The list of state events may be empty. """ diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 29cd45550a..807a0867cc 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -868,19 +868,10 @@ class FederationHandler: # This is a bit of a hack and is cribbing off of invites. Basically we # store the room state here and retrieve it again when this event appears # in the invitee's sync stream. It is stripped out for all other local users. - stripped_room_state = ( - knock_response.get("knock_room_state") - # Since v1.37, Synapse incorrectly used "knock_state_events" for this field. - # Thus, we also check for a 'knock_state_events' to support old instances. - # See https://github.com/matrix-org/synapse/issues/14088. - or knock_response.get("knock_state_events") - ) + stripped_room_state = knock_response.get("knock_room_state") if stripped_room_state is None: - raise KeyError( - "Missing 'knock_room_state' (or legacy 'knock_state_events') field in " - "send_knock response" - ) + raise KeyError("Missing 'knock_room_state' field in send_knock response") event.unsigned["knock_room_state"] = stripped_room_state diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 3f42f79f26..b63ef3d4ed 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -308,7 +308,7 @@ class FederationKnockingTestCase( self.assertEqual(200, channel.code, channel.result) # Check that we got the stripped room state in return - room_state_events = channel.json_body["knock_state_events"] + room_state_events = channel.json_body["knock_room_state"] # Validate the stripped room state events self.check_knock_room_state_against_room_state( From cabd57746004fe2dacc11aa8d373854a3d25e306 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 08:29:33 -0400 Subject: [PATCH 027/142] Drop unused tables & unneeded access token ID for events. (#16268) Drop the event_txn_id table and the tables related to MSC2716, which is no longer supported in Synapse. --- changelog.d/16268.misc | 1 + synapse/handlers/message.py | 8 ++----- synapse/storage/schema/__init__.py | 4 ++-- .../main/delta/82/03_drop_old_tables.sql | 24 +++++++++++++++++++ 4 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 changelog.d/16268.misc create mode 100644 synapse/storage/schema/main/delta/82/03_drop_old_tables.sql diff --git a/changelog.d/16268.misc b/changelog.d/16268.misc new file mode 100644 index 0000000000..26059b108e --- /dev/null +++ b/changelog.d/16268.misc @@ -0,0 +1 @@ +Clean-up unused tables. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 44dbbf81dd..d0d4626ed6 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -693,13 +693,9 @@ class EventCreationHandler: if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) - # Save the access token ID, the device ID and the transaction ID in the event - # internal metadata. This is useful to determine if we should echo the - # transaction_id in events. + # Save the the device ID and the transaction ID in the event internal metadata. + # This is useful to determine if we should echo the transaction_id in events. # See `synapse.events.utils.EventClientSerializer.serialize_event` - if requester.access_token_id is not None: - builder.internal_metadata.token_id = requester.access_token_id - if requester.device_id is not None: builder.internal_metadata.device_id = requester.device_id diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5b50bd66bc..de89de7d74 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -125,8 +125,8 @@ Changes in SCHEMA_VERSION = 82 SCHEMA_COMPAT_VERSION = ( - # The `event_txn_id_device_id` must be written to for new events. - 80 + # The event_txn_id table and tables from MSC2716 no longer exist. + 82 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql b/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql new file mode 100644 index 0000000000..149020bbd7 --- /dev/null +++ b/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql @@ -0,0 +1,24 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Drop the old event transaction ID table, the event_txn_id_device_id table +-- should be used instead. +DROP TABLE IF EXISTS event_txn_id; + +-- Drop tables related to MSC2716 since the implementation is being removed +DROP TABLE insertion_events; +DROP TABLE insertion_event_edges; +DROP TABLE insertion_event_extremities; +DROP TABLE batch_events; From 7615e2bf48d7bed3da7235d60f84a3c847ac78f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 10:12:43 -0400 Subject: [PATCH 028/142] Return ThumbnailInfo in more places (#16438) Improves type hints by using concrete types instead of dictionaries. --- changelog.d/16438.misc | 1 + synapse/media/_base.py | 2 +- synapse/media/media_repository.py | 3 + synapse/rest/media/thumbnail_resource.py | 98 ++++++++----------- .../databases/main/media_repository.py | 30 +++++- tests/media/test_media_storage.py | 36 +++---- 6 files changed, 90 insertions(+), 80 deletions(-) create mode 100644 changelog.d/16438.misc diff --git a/changelog.d/16438.misc b/changelog.d/16438.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16438.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index d103b43449..13345acf75 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -332,7 +332,7 @@ class ThumbnailInfo: # Content type of thumbnail, e.g. image/png type: str # The size of the media file, in bytes. - length: Optional[int] = None + length: int @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index d11c2ff4ee..7fd46901f7 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -624,6 +624,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) @@ -694,6 +695,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) @@ -839,6 +841,7 @@ class MediaRepository: height=t_height, method=t_method, type=t_type, + length=t_byte_source.tell(), ), ) diff --git a/synapse/rest/media/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py index f9cd773f77..85b6bdbe72 100644 --- a/synapse/rest/media/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -15,7 +15,7 @@ import logging import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import Codes, SynapseError, cs_error from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP @@ -159,30 +159,24 @@ class ThumbnailResource(RestServlet): thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: - t_w = info["thumbnail_width"] == desired_width - t_h = info["thumbnail_height"] == desired_height - t_method = info["thumbnail_method"] == desired_method - t_type = info["thumbnail_type"] == desired_type + t_w = info.width == desired_width + t_h = info.height == desired_height + t_method = info.method == desired_method + t_type = info.type == desired_type if t_w and t_h and t_method and t_type: file_info = FileInfo( server_name=None, file_id=media_id, url_cache=media_info["url_cache"], - thumbnail=ThumbnailInfo( - width=info["thumbnail_width"], - height=info["thumbnail_height"], - type=info["thumbnail_type"], - method=info["thumbnail_method"], - ), + thumbnail=info, ) - t_type = file_info.thumbnail_type - t_length = info["thumbnail_length"] - responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder( + request, responder, info.type, info.length + ) return logger.debug("We don't have a thumbnail of that size. Generating") @@ -222,29 +216,23 @@ class ThumbnailResource(RestServlet): file_id = media_info["filesystem_id"] for info in thumbnail_infos: - t_w = info["thumbnail_width"] == desired_width - t_h = info["thumbnail_height"] == desired_height - t_method = info["thumbnail_method"] == desired_method - t_type = info["thumbnail_type"] == desired_type + t_w = info.width == desired_width + t_h = info.height == desired_height + t_method = info.method == desired_method + t_type = info.type == desired_type if t_w and t_h and t_method and t_type: file_info = FileInfo( server_name=server_name, file_id=media_info["filesystem_id"], - thumbnail=ThumbnailInfo( - width=info["thumbnail_width"], - height=info["thumbnail_height"], - type=info["thumbnail_type"], - method=info["thumbnail_method"], - ), + thumbnail=info, ) - t_type = file_info.thumbnail_type - t_length = info["thumbnail_length"] - responder = await self.media_storage.fetch_media(file_info) if responder: - await respond_with_responder(request, responder, t_type, t_length) + await respond_with_responder( + request, responder, info.type, info.length + ) return logger.debug("We don't have a thumbnail of that size. Generating") @@ -304,7 +292,7 @@ class ThumbnailResource(RestServlet): desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[Dict[str, Any]], + thumbnail_infos: List[ThumbnailInfo], media_id: str, file_id: str, url_cache: bool, @@ -319,7 +307,7 @@ class ThumbnailResource(RestServlet): desired_height: The desired height, the returned thumbnail may be larger than this. desired_method: The desired method used to generate the thumbnail. desired_type: The desired content-type of the thumbnail. - thumbnail_infos: A list of dictionaries of candidate thumbnails. + thumbnail_infos: A list of thumbnail info of candidate thumbnails. file_id: The ID of the media that a thumbnail is being requested for. url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. @@ -443,7 +431,7 @@ class ThumbnailResource(RestServlet): desired_height: int, desired_method: str, desired_type: str, - thumbnail_infos: List[Dict[str, Any]], + thumbnail_infos: List[ThumbnailInfo], file_id: str, url_cache: bool, server_name: Optional[str], @@ -456,7 +444,7 @@ class ThumbnailResource(RestServlet): desired_height: The desired height, the returned thumbnail may be larger than this. desired_method: The desired method used to generate the thumbnail. desired_type: The desired content-type of the thumbnail. - thumbnail_infos: A list of dictionaries of candidate thumbnails. + thumbnail_infos: A list of thumbnail infos of candidate thumbnails. file_id: The ID of the media that a thumbnail is being requested for. url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. @@ -474,21 +462,25 @@ class ThumbnailResource(RestServlet): if desired_method == "crop": # Thumbnails that match equal or larger sizes of desired width/height. - crop_info_list: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = [] + crop_info_list: List[ + Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + ] = [] # Other thumbnails. - crop_info_list2: List[Tuple[int, int, int, bool, int, Dict[str, Any]]] = [] + crop_info_list2: List[ + Tuple[int, int, int, bool, Optional[int], ThumbnailInfo] + ] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. - if info["thumbnail_method"] != "crop": + if info.method != "crop": continue - t_w = info["thumbnail_width"] - t_h = info["thumbnail_height"] + t_w = info.width + t_h = info.height aspect_quality = abs(d_w * t_h - d_h * t_w) min_quality = 0 if d_w <= t_w and d_h <= t_h else 1 size_quality = abs((d_w - t_w) * (d_h - t_h)) - type_quality = desired_type != info["thumbnail_type"] - length_quality = info["thumbnail_length"] + type_quality = desired_type != info.type + length_quality = info.length if t_w >= d_w or t_h >= d_h: crop_info_list.append( ( @@ -513,7 +505,7 @@ class ThumbnailResource(RestServlet): ) # Pick the most appropriate thumbnail. Some values of `desired_width` and # `desired_height` may result in a tie, in which case we avoid comparing on - # the thumbnail info dictionary and pick the thumbnail that appears earlier + # the thumbnail info and pick the thumbnail that appears earlier # in the list of candidates. if crop_info_list: thumbnail_info = min(crop_info_list, key=lambda t: t[:-1])[-1] @@ -521,20 +513,20 @@ class ThumbnailResource(RestServlet): thumbnail_info = min(crop_info_list2, key=lambda t: t[:-1])[-1] elif desired_method == "scale": # Thumbnails that match equal or larger sizes of desired width/height. - info_list: List[Tuple[int, bool, int, Dict[str, Any]]] = [] + info_list: List[Tuple[int, bool, int, ThumbnailInfo]] = [] # Other thumbnails. - info_list2: List[Tuple[int, bool, int, Dict[str, Any]]] = [] + info_list2: List[Tuple[int, bool, int, ThumbnailInfo]] = [] for info in thumbnail_infos: # Skip thumbnails generated with different methods. - if info["thumbnail_method"] != "scale": + if info.method != "scale": continue - t_w = info["thumbnail_width"] - t_h = info["thumbnail_height"] + t_w = info.width + t_h = info.height size_quality = abs((d_w - t_w) * (d_h - t_h)) - type_quality = desired_type != info["thumbnail_type"] - length_quality = info["thumbnail_length"] + type_quality = desired_type != info.type + length_quality = info.length if t_w >= d_w or t_h >= d_h: info_list.append((size_quality, type_quality, length_quality, info)) else: @@ -543,7 +535,7 @@ class ThumbnailResource(RestServlet): ) # Pick the most appropriate thumbnail. Some values of `desired_width` and # `desired_height` may result in a tie, in which case we avoid comparing on - # the thumbnail info dictionary and pick the thumbnail that appears earlier + # the thumbnail info and pick the thumbnail that appears earlier # in the list of candidates. if info_list: thumbnail_info = min(info_list, key=lambda t: t[:-1])[-1] @@ -555,13 +547,7 @@ class ThumbnailResource(RestServlet): file_id=file_id, url_cache=url_cache, server_name=server_name, - thumbnail=ThumbnailInfo( - width=thumbnail_info["thumbnail_width"], - height=thumbnail_info["thumbnail_height"], - type=thumbnail_info["thumbnail_type"], - method=thumbnail_info["thumbnail_method"], - length=thumbnail_info["thumbnail_length"], - ), + thumbnail=thumbnail_info, ) # No matching thumbnail was found. diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 8cebeb5189..2e6b176bd2 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -28,6 +28,7 @@ from typing import ( from synapse.api.constants import Direction from synapse.logging.opentracing import trace +from synapse.media._base import ThumbnailInfo from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -435,8 +436,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): desc="store_url_cache", ) - async def get_local_media_thumbnails(self, media_id: str) -> List[Dict[str, Any]]: - return await self.db_pool.simple_select_list( + async def get_local_media_thumbnails(self, media_id: str) -> List[ThumbnailInfo]: + rows = await self.db_pool.simple_select_list( "local_media_repository_thumbnails", {"media_id": media_id}, ( @@ -448,6 +449,16 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): ), desc="get_local_media_thumbnails", ) + return [ + ThumbnailInfo( + width=row["thumbnail_width"], + height=row["thumbnail_height"], + method=row["thumbnail_method"], + type=row["thumbnail_type"], + length=row["thumbnail_length"], + ) + for row in rows + ] @trace async def store_local_thumbnail( @@ -556,8 +567,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_thumbnails( self, origin: str, media_id: str - ) -> List[Dict[str, Any]]: - return await self.db_pool.simple_select_list( + ) -> List[ThumbnailInfo]: + rows = await self.db_pool.simple_select_list( "remote_media_cache_thumbnails", {"media_origin": origin, "media_id": media_id}, ( @@ -566,10 +577,19 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "thumbnail_method", "thumbnail_type", "thumbnail_length", - "filesystem_id", ), desc="get_remote_media_thumbnails", ) + return [ + ThumbnailInfo( + width=row["thumbnail_width"], + height=row["thumbnail_height"], + method=row["thumbnail_method"], + type=row["thumbnail_type"], + length=row["thumbnail_length"], + ) + for row in rows + ] @trace async def get_remote_media_thumbnail( diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index ba00e35a9e..15f5d644e4 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -34,7 +34,7 @@ from synapse.api.errors import Codes from synapse.events import EventBase from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable -from synapse.media._base import FileInfo +from synapse.media._base import FileInfo, ThumbnailInfo from synapse.media.filepath import MediaFilePaths from synapse.media.media_storage import MediaStorage, ReadableFileWrapper from synapse.media.storage_provider import FileStorageProviderBackend @@ -605,6 +605,8 @@ class MediaRepoTests(unittest.HomeserverTestCase): """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" + + content_type = self.test_image.content_type.decode() media_repo = self.hs.get_media_repository() thumbnail_resouce = ThumbnailResource( self.hs, media_repo, media_repo.media_storage @@ -615,26 +617,24 @@ class MediaRepoTests(unittest.HomeserverTestCase): desired_width=desired_size, desired_height=desired_size, desired_method=method, - desired_type=self.test_image.content_type, # type: ignore[arg-type] + desired_type=content_type, # Provide two identical thumbnails which are guaranteed to have the same # quality rating. thumbnail_infos=[ - { - "thumbnail_width": 32, - "thumbnail_height": 32, - "thumbnail_method": method, - "thumbnail_type": self.test_image.content_type, - "thumbnail_length": 256, - "filesystem_id": f"thumbnail1{self.test_image.extension.decode()}", - }, - { - "thumbnail_width": 32, - "thumbnail_height": 32, - "thumbnail_method": method, - "thumbnail_type": self.test_image.content_type, - "thumbnail_length": 256, - "filesystem_id": f"thumbnail2{self.test_image.extension.decode()}", - }, + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), + ThumbnailInfo( + width=32, + height=32, + method=method, + type=content_type, + length=256, + ), ], file_id=f"image{self.test_image.extension.decode()}", url_cache=False, From 06bbf1029cf2558213646d3b692621bed5178066 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 6 Oct 2023 11:41:57 -0400 Subject: [PATCH 029/142] Convert simple_select_list_paginate_txn to return tuples. (#16433) --- changelog.d/16433.misc | 1 + synapse/api/presence.py | 4 -- synapse/federation/send_queue.py | 2 +- synapse/rest/admin/federation.py | 8 ++- synapse/storage/database.py | 6 +- synapse/storage/databases/main/presence.py | 58 +++++++++++++------ .../storage/databases/main/transactions.py | 27 +++++---- 7 files changed, 67 insertions(+), 39 deletions(-) create mode 100644 changelog.d/16433.misc diff --git a/changelog.d/16433.misc b/changelog.d/16433.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16433.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b78f419994..afef6712e1 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -80,10 +80,6 @@ class UserPresenceState: def as_dict(self) -> JsonDict: return attr.asdict(self) - @staticmethod - def from_dict(d: JsonDict) -> "UserPresenceState": - return UserPresenceState(**d) - def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState": return attr.evolve(self, **kwargs) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 6520795635..525968bcba 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -395,7 +395,7 @@ class PresenceDestinationsRow(BaseFederationRow): @staticmethod def from_data(data: JsonDict) -> "PresenceDestinationsRow": return PresenceDestinationsRow( - state=UserPresenceState.from_dict(data["state"]), destinations=data["dests"] + state=UserPresenceState(**data["state"]), destinations=data["dests"] ) def to_data(self) -> JsonDict: diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index e0ee55bd0e..8a617af599 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -198,7 +198,13 @@ class DestinationMembershipRestServlet(RestServlet): rooms, total = await self._store.get_destination_rooms_paginate( destination, start, limit, direction ) - response = {"rooms": rooms, "total": total} + response = { + "rooms": [ + {"room_id": room_id, "stream_ordering": stream_ordering} + for room_id, stream_ordering in rooms + ], + "total": total, + } if (start + limit) < total: response["next_token"] = str(start + len(rooms)) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index ca894edd5a..7d8af5c610 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2418,7 +2418,7 @@ class DatabasePool: keyvalues: Optional[Dict[str, Any]] = None, exclude_keyvalues: Optional[Dict[str, Any]] = None, order_direction: str = "ASC", - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """ Executes a SELECT query on the named table with start and limit, of row numbers, which may return zero or number of rows from start to limit, @@ -2447,7 +2447,7 @@ class DatabasePool: order_direction: Whether the results should be ordered "ASC" or "DESC". Returns: - The result as a list of dictionaries. + The result as a list of tuples. """ if order_direction not in ["ASC", "DESC"]: raise ValueError("order_direction must be one of 'ASC' or 'DESC'.") @@ -2474,7 +2474,7 @@ class DatabasePool: ) txn.execute(sql, arg_list + [limit, start]) - return cls.cursor_to_dict(txn) + return txn.fetchall() async def simple_search_list( self, diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 805c23f89f..519f05fb60 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -20,6 +20,7 @@ from typing import ( Mapping, Optional, Tuple, + Union, cast, ) @@ -385,28 +386,47 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) limit = 100 offset = 0 while True: - rows = await self.db_pool.runInteraction( - "get_presence_for_all_users", - self.db_pool.simple_select_list_paginate_txn, - "presence_stream", - orderby="stream_id", - start=offset, - limit=limit, - exclude_keyvalues=exclude_keyvalues, - retcols=( - "user_id", - "state", - "last_active_ts", - "last_federation_update_ts", - "last_user_sync_ts", - "status_msg", - "currently_active", + rows = cast( + List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + await self.db_pool.runInteraction( + "get_presence_for_all_users", + self.db_pool.simple_select_list_paginate_txn, + "presence_stream", + orderby="stream_id", + start=offset, + limit=limit, + exclude_keyvalues=exclude_keyvalues, + retcols=( + "user_id", + "state", + "last_active_ts", + "last_federation_update_ts", + "last_user_sync_ts", + "status_msg", + "currently_active", + ), + order_direction="ASC", ), - order_direction="ASC", ) - for row in rows: - users_to_state[row["user_id"]] = UserPresenceState(**row) + for ( + user_id, + state, + last_active_ts, + last_federation_update_ts, + last_user_sync_ts, + status_msg, + currently_active, + ) in rows: + users_to_state[user_id] = UserPresenceState( + user_id=user_id, + state=state, + last_active_ts=last_active_ts, + last_federation_update_ts=last_federation_update_ts, + last_user_sync_ts=last_user_sync_ts, + status_msg=status_msg, + currently_active=bool(currently_active), + ) # We've run out of updates to query if len(rows) < limit: diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 8f70eff809..f35757280d 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -526,7 +526,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): start: int, limit: int, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, int]], int]: """Function to retrieve a paginated list of destination's rooms. This will return a json list of rooms and the total number of rooms. @@ -537,12 +537,14 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): limit: number of rows to retrieve direction: sort ascending or descending by room_id Returns: - A tuple of a dict of rooms and a count of total rooms. + A tuple of a list of room tuples and a count of total rooms. + + Each room tuple is room_id, stream_ordering. """ def get_destination_rooms_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, int]], int]: if direction == Direction.BACKWARDS: order = "DESC" else: @@ -556,14 +558,17 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): txn.execute(sql, [destination]) count = cast(Tuple[int], txn.fetchone())[0] - rooms = self.db_pool.simple_select_list_paginate_txn( - txn=txn, - table="destination_rooms", - orderby="room_id", - start=start, - limit=limit, - retcols=("room_id", "stream_ordering"), - order_direction=order, + rooms = cast( + List[Tuple[str, int]], + self.db_pool.simple_select_list_paginate_txn( + txn=txn, + table="destination_rooms", + orderby="room_id", + start=start, + limit=limit, + retcols=("room_id", "stream_ordering"), + order_direction=order, + ), ) return rooms, count From 1f10c208068ef8788b6796c54a3604ae51caf951 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 6 Oct 2023 18:31:52 +0100 Subject: [PATCH 030/142] Apply join rate limiter outside the lineariser (#16441) --- changelog.d/16441.misc | 1 + synapse/handlers/room_member.py | 43 ++++++++++++++++++--------------- tests/rest/client/test_rooms.py | 24 ++++++++++++++++++ 3 files changed, 48 insertions(+), 20 deletions(-) create mode 100644 changelog.d/16441.misc diff --git a/changelog.d/16441.misc b/changelog.d/16441.misc new file mode 100644 index 0000000000..32264a62b2 --- /dev/null +++ b/changelog.d/16441.misc @@ -0,0 +1 @@ +Improve rate limiting logic. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 90343c2306..1b50495af1 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -382,8 +382,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): and persist a new event for the new membership change. Args: - requester: - target: + requester: User requesting the membership change, i.e. the sender of the + desired membership event. + target: Use whose membership should change, i.e. the state_key of the + desired membership event. room_id: membership: @@ -415,7 +417,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): Returns: Tuple of event ID and stream ordering position """ - user_id = target.to_string() if content is None: @@ -475,21 +476,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): (EventTypes.Member, user_id), None ) - if event.membership == Membership.JOIN: - newly_joined = True - if prev_member_event_id: - prev_member_event = await self.store.get_event( - prev_member_event_id - ) - newly_joined = prev_member_event.membership != Membership.JOIN - - # Only rate-limit if the user actually joined the room, otherwise we'll end - # up blocking profile updates. - if newly_joined and ratelimit: - await self._join_rate_limiter_local.ratelimit(requester) - await self._join_rate_per_room_limiter.ratelimit( - requester, key=room_id, update=False - ) with opentracing.start_active_span("handle_new_client_event"): result_event = ( await self.event_creation_handler.handle_new_client_event( @@ -618,6 +604,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): Raises: ShadowBanError if a shadow-banned requester attempts to send an invite. """ + if ratelimit: + if action == Membership.JOIN: + # Only rate-limit if the user isn't already joined to the room, otherwise + # we'll end up blocking profile updates. + ( + current_membership, + _, + ) = await self.store.get_local_current_membership_for_user_in_room( + requester.user.to_string(), + room_id, + ) + if current_membership != Membership.JOIN: + await self._join_rate_limiter_local.ratelimit(requester) + await self._join_rate_per_room_limiter.ratelimit( + requester, key=room_id, update=False + ) + elif action == Membership.INVITE: + await self.ratelimit_invite(requester, room_id, target.to_string()) + if action == Membership.INVITE and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) @@ -794,8 +799,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if effective_membership_state == Membership.INVITE: target_id = target.to_string() - if ratelimit: - await self.ratelimit_invite(requester, room_id, target_id) # block any attempts to invite the server notices mxid if target_id == self._server_notices_mxid: diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 7627823d3f..aaa4f3bba0 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1444,6 +1444,30 @@ class RoomJoinRatelimitTestCase(RoomBase): room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS ) + @unittest.override_config( + {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} + ) + def test_join_attempts_local_ratelimit(self) -> None: + """Tests that unsuccessful joins that end up being denied are rate-limited.""" + # Create 4 rooms + room_ids = [ + self.helper.create_room_as(self.user_id, is_public=True) for _ in range(4) + ] + # Pre-emptively ban the user who will attempt to join. + joiner_user_id = self.register_user("joiner", "secret") + for room_id in room_ids: + self.helper.ban(room_id, self.user_id, joiner_user_id) + + # Now make a new user try to join some of them. + # The user can make 3 requests, each of which should be denied. + for room_id in room_ids[0:3]: + self.helper.join(room_id, joiner_user_id, expect_code=HTTPStatus.FORBIDDEN) + + # The fourth attempt should be rate limited. + self.helper.join( + room_ids[3], joiner_user_id, expect_code=HTTPStatus.TOO_MANY_REQUESTS + ) + @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) From 32fd9bc673ec025af5b49f4ed0961134a6101c38 Mon Sep 17 00:00:00 2001 From: Christoph <47949835+Sir-Photch@users.noreply.github.com> Date: Mon, 9 Oct 2023 02:16:07 -0700 Subject: [PATCH 031/142] Fix possible AttributeError when account-api is called over unix socket (#16404) Fixes #16396 --- changelog.d/16404.bugfix | 1 + synapse/api/auth/internal.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16404.bugfix diff --git a/changelog.d/16404.bugfix b/changelog.d/16404.bugfix new file mode 100644 index 0000000000..3fd5028b33 --- /dev/null +++ b/changelog.d/16404.bugfix @@ -0,0 +1 @@ +Fixes possbile `AttributeError` when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py index a75f6f2cc4..36ee9c8b8f 100644 --- a/synapse/api/auth/internal.py +++ b/synapse/api/auth/internal.py @@ -115,7 +115,7 @@ class InternalAuth(BaseAuth): Once get_user_by_req has set up the opentracing span, this does the actual work. """ try: - ip_addr = request.getClientAddress().host + ip_addr = request.get_client_ip_if_available() user_agent = get_request_user_agent(request) access_token = self.get_access_token_from_request(request) From 3d2f5332c00784d4271cfb681be19d575aedaaae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:40:00 +0100 Subject: [PATCH 032/142] Bump types-bleach from 6.0.0.4 to 6.1.0.0 (#16450) Bumps [types-bleach](https://github.com/python/typeshed) from 6.0.0.4 to 6.1.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-bleach dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 13884e6698..ab61b9b042 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3037,13 +3037,13 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.0.0.4" +version = "6.1.0.0" description = "Typing stubs for bleach" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-bleach-6.0.0.4.tar.gz", hash = "sha256:357b0226f65c4f20ab3b13ca8d78a6b91c78aad256d8ec168d4e90fc3303ebd4"}, - {file = "types_bleach-6.0.0.4-py3-none-any.whl", hash = "sha256:2b8767eb407c286b7f02803678732e522e04db8d56cbc9f1270bee49627eae92"}, + {file = "types-bleach-6.1.0.0.tar.gz", hash = "sha256:3cf0e55d4618890a00af1151f878b2e2a7a96433850b74e12bede7663d774532"}, + {file = "types_bleach-6.1.0.0-py3-none-any.whl", hash = "sha256:f0bc75d0f6475036ac69afebf37c41d116dfba78dae55db80437caf0fcd35c28"}, ] [[package]] From 45738e273955d83c636a6361f7b79d66a5c458f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:40:09 +0100 Subject: [PATCH 033/142] Bump bleach from 6.0.0 to 6.1.0 (#16451) Bumps [bleach](https://github.com/mozilla/bleach) from 6.0.0 to 6.1.0. - [Changelog](https://github.com/mozilla/bleach/blob/main/CHANGES) - [Commits](https://github.com/mozilla/bleach/compare/v6.0.0...v6.1.0) --- updated-dependencies: - dependency-name: bleach dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index ab61b9b042..bcc6504b51 100644 --- a/poetry.lock +++ b/poetry.lock @@ -208,13 +208,13 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "bleach" -version = "6.0.0" +version = "6.1.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, - {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, + {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, + {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, ] [package.dependencies] @@ -222,7 +222,7 @@ six = ">=1.9.0" webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.2)"] +css = ["tinycss2 (>=1.1.0,<1.3)"] [[package]] name = "canonicaljson" From 6fb0c431067d106e184376c0a19a5a8784f39e25 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:40:31 +0100 Subject: [PATCH 034/142] Bump psycopg2 from 2.9.8 to 2.9.9 (#16452) Bumps [psycopg2](https://github.com/psycopg/psycopg2) from 2.9.8 to 2.9.9. - [Changelog](https://github.com/psycopg/psycopg2/blob/master/NEWS) - [Commits](https://github.com/psycopg/psycopg2/compare/2.9.8...2.9.9) --- updated-dependencies: - dependency-name: psycopg2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/poetry.lock b/poetry.lock index bcc6504b51..fc06d76d90 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1749,22 +1749,22 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.8" +version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "psycopg2-2.9.8-cp310-cp310-win32.whl", hash = "sha256:2f8594f92bbb5d8b59ffec04e2686c416401e2d4297de1193f8e75235937e71d"}, - {file = "psycopg2-2.9.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9ecbf504c4eaff90139d5c9b95d47275f2b2651e14eba56392b4041fbf4c2b3"}, - {file = "psycopg2-2.9.8-cp311-cp311-win32.whl", hash = "sha256:65f81e72136d8b9ac8abf5206938d60f50da424149a43b6073f1546063c0565e"}, - {file = "psycopg2-2.9.8-cp311-cp311-win_amd64.whl", hash = "sha256:f7e62095d749359b7854143843f27edd7dccfcd3e1d833b880562aa5702d92b0"}, - {file = "psycopg2-2.9.8-cp37-cp37m-win32.whl", hash = "sha256:81b21424023a290a40884c7f8b0093ba6465b59bd785c18f757e76945f65594c"}, - {file = "psycopg2-2.9.8-cp37-cp37m-win_amd64.whl", hash = "sha256:67c2f32f3aba79afb15799575e77ee2db6b46b8acf943c21d34d02d4e1041d50"}, - {file = "psycopg2-2.9.8-cp38-cp38-win32.whl", hash = "sha256:287a64ef168ef7fb9f382964705ff664b342bfff47e7242bf0a04ef203269dd5"}, - {file = "psycopg2-2.9.8-cp38-cp38-win_amd64.whl", hash = "sha256:dcde3cad4920e29e74bf4e76c072649764914facb2069e6b7fa1ddbebcd49e9f"}, - {file = "psycopg2-2.9.8-cp39-cp39-win32.whl", hash = "sha256:d4ad050ea50a16731d219c3a85e8f2debf49415a070f0b8331ccc96c81700d9b"}, - {file = "psycopg2-2.9.8-cp39-cp39-win_amd64.whl", hash = "sha256:d39bb3959788b2c9d7bf5ff762e29f436172b241cd7b47529baac77746fd7918"}, - {file = "psycopg2-2.9.8.tar.gz", hash = "sha256:3da6488042a53b50933244085f3f91803f1b7271f970f3e5536efa69314f6a49"}, + {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, + {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, + {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, + {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, + {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, + {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, + {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, + {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, + {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, + {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, + {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, ] [[package]] From 3727b84a511bdcf7e00e0382ab21f28008cab0f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:40:55 +0100 Subject: [PATCH 035/142] Bump netaddr from 0.8.0 to 0.9.0 (#16453) Bumps [netaddr](https://github.com/drkjam/netaddr) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/drkjam/netaddr/releases) - [Changelog](https://github.com/netaddr/netaddr/blob/master/CHANGELOG) - [Commits](https://github.com/drkjam/netaddr/compare/0.8.0...0.9.0) --- updated-dependencies: - dependency-name: netaddr dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index fc06d76d90..a68e301ca4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1557,13 +1557,13 @@ testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4, [[package]] name = "netaddr" -version = "0.8.0" +version = "0.9.0" description = "A network address manipulation library for Python" optional = false python-versions = "*" files = [ - {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, - {file = "netaddr-0.8.0.tar.gz", hash = "sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243"}, + {file = "netaddr-0.9.0-py3-none-any.whl", hash = "sha256:5148b1055679d2a1ec070c521b7db82137887fabd6d7e37f5199b44f775c3bb1"}, + {file = "netaddr-0.9.0.tar.gz", hash = "sha256:7b46fa9b1a2d71fd5de9e4a3784ef339700a53a08c8040f08baf5f1194da0128"}, ] [[package]] From 0a67743d9e3fe5f8661d43d853980a0111806c7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:46:17 +0100 Subject: [PATCH 036/142] Bump ruff from 0.0.290 to 0.0.292 (#16449) * Bump ruff from 0.0.290 to 0.0.292 Bumps [ruff](https://github.com/astral-sh/ruff) from 0.0.290 to 0.0.292. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/astral-sh/ruff/compare/v0.0.290...v0.0.292) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Fix up lint --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Erik Johnston --- poetry.lock | 38 ++++++++++++++++----------------- pyproject.toml | 2 +- synapse/handlers/device.py | 1 - synapse/handlers/federation.py | 2 -- synapse/handlers/message.py | 2 -- synapse/handlers/room.py | 1 - synapse/handlers/room_member.py | 2 -- 7 files changed, 20 insertions(+), 28 deletions(-) diff --git a/poetry.lock b/poetry.lock index a68e301ca4..a1a2b83764 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2427,28 +2427,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.290" +version = "0.0.292" description = "An extremely fast Python linter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.290-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:0e2b09ac4213b11a3520221083866a5816616f3ae9da123037b8ab275066fbac"}, - {file = "ruff-0.0.290-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4ca6285aa77b3d966be32c9a3cd531655b3d4a0171e1f9bf26d66d0372186767"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35e3550d1d9f2157b0fcc77670f7bb59154f223bff281766e61bdd1dd854e0c5"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d748c8bd97874f5751aed73e8dde379ce32d16338123d07c18b25c9a2796574a"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:982af5ec67cecd099e2ef5e238650407fb40d56304910102d054c109f390bf3c"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bbd37352cea4ee007c48a44c9bc45a21f7ba70a57edfe46842e346651e2b995a"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d9be6351b7889462912e0b8185a260c0219c35dfd920fb490c7f256f1d8313e"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75cdc7fe32dcf33b7cec306707552dda54632ac29402775b9e212a3c16aad5e6"}, - {file = "ruff-0.0.290-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb07f37f7aecdbbc91d759c0c09870ce0fb3eed4025eebedf9c4b98c69abd527"}, - {file = "ruff-0.0.290-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2ab41bc0ba359d3f715fc7b705bdeef19c0461351306b70a4e247f836b9350ed"}, - {file = "ruff-0.0.290-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:150bf8050214cea5b990945b66433bf9a5e0cef395c9bc0f50569e7de7540c86"}, - {file = "ruff-0.0.290-py3-none-musllinux_1_2_i686.whl", hash = "sha256:75386ebc15fe5467248c039f5bf6a0cfe7bfc619ffbb8cd62406cd8811815fca"}, - {file = "ruff-0.0.290-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ac93eadf07bc4ab4c48d8bb4e427bf0f58f3a9c578862eb85d99d704669f5da0"}, - {file = "ruff-0.0.290-py3-none-win32.whl", hash = "sha256:461fbd1fb9ca806d4e3d5c745a30e185f7cf3ca77293cdc17abb2f2a990ad3f7"}, - {file = "ruff-0.0.290-py3-none-win_amd64.whl", hash = "sha256:f1f49f5ec967fd5778813780b12a5650ab0ebcb9ddcca28d642c689b36920796"}, - {file = "ruff-0.0.290-py3-none-win_arm64.whl", hash = "sha256:ae5a92dfbdf1f0c689433c223f8dac0782c2b2584bd502dfdbc76475669f1ba1"}, - {file = "ruff-0.0.290.tar.gz", hash = "sha256:949fecbc5467bb11b8db810a7fa53c7e02633856ee6bd1302b2f43adcd71b88d"}, + {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"}, + {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"}, + {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"}, + {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"}, + {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"}, + {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"}, ] [[package]] @@ -3444,4 +3444,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "364c309486e9d93d4da8a1a3784d5ecd7d2a9734cf84dcd4a991f2cd54f0b5b5" +content-hash = "a08543c65f18cc7e9dea648e89c18ab88fc1747aa2e029aa208f777fc3db06dd" diff --git a/pyproject.toml b/pyproject.toml index b22172291a..ce51021f9c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -321,7 +321,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.290" +ruff = "0.0.292" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 86ad96d030..50df4f2b06 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -845,7 +845,6 @@ class DeviceHandler(DeviceWorkerHandler): else: assert max_stream_id == stream_id # Avoid moving `room_id` backwards. - pass if self._handle_new_device_update_new_data: continue diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 807a0867cc..9d72794e8b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1497,7 +1497,6 @@ class FederationHandler: # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass else: destinations = {x.split(":", 1)[-1] for x in (sender_user_id, room_id)} @@ -1573,7 +1572,6 @@ class FederationHandler: # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass async def add_display_name_to_third_party_invite( self, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d0d4626ed6..8de4b8e816 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1129,7 +1129,6 @@ class EventCreationHandler: # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass # we know it was persisted, so must have a stream ordering assert ev.internal_metadata.stream_ordering @@ -2034,7 +2033,6 @@ class EventCreationHandler: # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass return True except AuthError: logger.info( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 4cdf0a8502..97c9f01245 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -261,7 +261,6 @@ class RoomCreationHandler: # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass # This is to satisfy mypy and should never happen raise PartialStateConflictError() diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1b50495af1..130eee7e1d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -500,7 +500,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass # we know it was persisted, so should have a stream ordering assert result_event.internal_metadata.stream_ordering @@ -2005,7 +2004,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): # in the meantime and context needs to be recomputed, so let's do so. if i == max_retries - 1: raise e - pass # we know it was persisted, so must have a stream ordering assert result_event.internal_metadata.stream_ordering From a6abee36bc53feb4e5ffc6284af0b059f8f0ab2c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 9 Oct 2023 14:22:54 +0300 Subject: [PATCH 037/142] Don't block CI on sign-off (#16454) As this doesn't work with the private sign off flow. --- .github/workflows/tests.yml | 1 - changelog.d/16454.misc | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/16454.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 96750cb6c8..7dbd83908e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -280,7 +280,6 @@ jobs: - check-lockfile - lint-clippy - lint-rustfmt - - check-signoff runs-on: ubuntu-latest steps: - run: "true" diff --git a/changelog.d/16454.misc b/changelog.d/16454.misc new file mode 100644 index 0000000000..1e75dc436f --- /dev/null +++ b/changelog.d/16454.misc @@ -0,0 +1 @@ +Do not block running of CI behind the check for sign-off on PRs. From 8902b3031d1437b6a8779cc4831e3f17d732a6a6 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 9 Oct 2023 14:41:17 +0000 Subject: [PATCH 038/142] Disable statement timeout whilst purging rooms (#16455) * Disable statement timeout whilst purging rooms * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Note the introduction version --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16455.bugfix | 1 + synapse/storage/databases/main/purge_events.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/16455.bugfix diff --git a/changelog.d/16455.bugfix b/changelog.d/16455.bugfix new file mode 100644 index 0000000000..653a25d3b6 --- /dev/null +++ b/changelog.d/16455.bugfix @@ -0,0 +1 @@ +Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index dea0e0458c..1e11bf2706 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -89,6 +89,11 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # furthermore, we might already have the table from a previous (failed) # purge attempt, so let's drop the table first. + if isinstance(self.database_engine, PostgresEngine): + # Disable statement timeouts for this transaction; purging rooms can + # take a while! + txn.execute("SET LOCAL statement_timeout = 0") + txn.execute("DROP TABLE IF EXISTS events_to_purge") txn.execute( From 28fd28e92e9743ae98833fdb2a233aee64568a06 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 10 Oct 2023 10:33:39 +0100 Subject: [PATCH 039/142] Add DB indices to speed up purging rooms (#16457) --- changelog.d/16457.bugfix | 1 + .../storage/databases/main/account_data.py | 7 +++++++ .../storage/databases/main/e2e_room_keys.py | 7 +++++++ .../82/04_add_indices_for_purging_rooms.sql | 20 +++++++++++++++++++ 4 files changed, 35 insertions(+) create mode 100644 changelog.d/16457.bugfix create mode 100644 synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql diff --git a/changelog.d/16457.bugfix b/changelog.d/16457.bugfix new file mode 100644 index 0000000000..b9a95cc510 --- /dev/null +++ b/changelog.d/16457.bugfix @@ -0,0 +1 @@ +Improve the performance of purging rooms, particularly encrypted rooms. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 16c284807a..39498d52c6 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -103,6 +103,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) "AccountDataAndTagsChangeCache", account_max ) + self.db_pool.updates.register_background_index_update( + update_name="room_account_data_index_room_id", + index_name="room_account_data_room_id", + table="room_account_data", + columns=("room_id",), + ) + self.db_pool.updates.register_background_update_handler( "delete_account_data_for_deactivated_users", self._delete_account_data_for_deactivated_users, diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index bc7c6a6346..aac4cfb054 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -53,6 +53,13 @@ class EndToEndRoomKeyBackgroundStore(SQLBaseStore): ): super().__init__(database, db_conn, hs) + self.db_pool.updates.register_background_index_update( + update_name="e2e_room_keys_index_room_id", + index_name="e2e_room_keys_room_id", + table="e2e_room_keys", + columns=("room_id",), + ) + self.db_pool.updates.register_background_update_handler( "delete_e2e_backup_keys_for_deactivated_users", self._delete_e2e_backup_keys_for_deactivated_users, diff --git a/synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql b/synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql new file mode 100644 index 0000000000..fc948166e6 --- /dev/null +++ b/synapse/storage/schema/main/delta/82/04_add_indices_for_purging_rooms.sql @@ -0,0 +1,20 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8204, 'e2e_room_keys_index_room_id', '{}'); + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8204, 'room_account_data_index_room_id', '{}'); From 25c412b3c57962104d7a9452f03a0fca7e999bc2 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 10 Oct 2023 10:57:54 +0100 Subject: [PATCH 040/142] 1.94.0 --- CHANGES.md | 5 +++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6c30c40858..123ac25460 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,8 @@ +# Synapse 1.94.0 (2023-10-10) + +No significant changes since 1.94.0rc1. + + # Synapse 1.94.0rc1 (2023-10-03) ### Features diff --git a/debian/changelog b/debian/changelog index 78da69ebb0..57479ca8e5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.94.0) stable; urgency=medium + + * New Synapse release 1.94.0. + + -- Synapse Packaging team Tue, 10 Oct 2023 10:57:41 +0100 + matrix-synapse-py3 (1.94.0~rc1) stable; urgency=medium * New Synapse release 1.94.0rc1. diff --git a/pyproject.toml b/pyproject.toml index b22172291a..672dfa8a8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.94.0rc1" +version = "1.94.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 4f87edc6e82055372bbf424c8e27bcdbbd566381 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 10 Oct 2023 13:20:59 +0100 Subject: [PATCH 041/142] Add security advisory note to the changelog --- CHANGES.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 123ac25460..0ee3970e2b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,19 @@ # Synapse 1.94.0 (2023-10-10) No significant changes since 1.94.0rc1. +However, please take note of the security advisory that follows. + +## Security advisory + +The following issue is fixed in 1.94.0 (and RC). + +- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) — Moderate Severity + + A malicious server ACL event can impact performance temporarily or permanently leading to a persistent denial of service. + + Homeservers running on a closed federation (which presumably do not need to use server ACLs) are not affected. + +See the advisory for more details. If you have any questions, email security@matrix.org. # Synapse 1.94.0rc1 (2023-10-03) From 5f12090fd7ee5e062620ef706f36bdd2a2e53896 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 10 Oct 2023 11:38:56 -0400 Subject: [PATCH 042/142] Add CVE number for advisory GHSA-5chr-wjw5-3gq4. --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0ee3970e2b..6f42ebba93 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,7 +7,7 @@ However, please take note of the security advisory that follows. The following issue is fixed in 1.94.0 (and RC). -- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) — Moderate Severity +- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) / [CVE-2023-45129](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-45129) — Moderate Severity A malicious server ACL event can impact performance temporarily or permanently leading to a persistent denial of service. From f1e43018b7d526f3e969796bf882c1848b663449 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 10 Oct 2023 12:16:36 -0400 Subject: [PATCH 043/142] Inline simple_search_list/simple_search_list_txn. (#16434) This only has a single use and is over abstracted. Inline it so that we can improve type hints. --- changelog.d/16434.misc | 1 + synapse/rest/admin/users.py | 13 ++++- synapse/storage/database.py | 62 ---------------------- synapse/storage/databases/main/__init__.py | 46 ++++++++++++---- 4 files changed, 49 insertions(+), 73 deletions(-) create mode 100644 changelog.d/16434.misc diff --git a/changelog.d/16434.misc b/changelog.d/16434.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16434.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 5b743a1d03..cd995e8dbb 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -842,7 +842,18 @@ class SearchUsersRestServlet(RestServlet): logger.info("term: %s ", term) ret = await self.store.search_users(term) - return HTTPStatus.OK, ret + results = [ + { + "name": name, + "password_hash": password_hash, + "is_guest": bool(is_guest), + "admin": bool(admin), + "user_type": user_type, + } + for name, password_hash, is_guest, admin, user_type in ret + ] + + return HTTPStatus.OK, results class UserAdminServlet(RestServlet): diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 7d8af5c610..7714ec2bf9 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2476,68 +2476,6 @@ class DatabasePool: return txn.fetchall() - async def simple_search_list( - self, - table: str, - term: Optional[str], - col: str, - retcols: Collection[str], - desc: str = "simple_search_list", - ) -> Optional[List[Dict[str, Any]]]: - """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. - - Args: - table: the table name - term: term for searching the table matched to a column. - col: column to query term should be matched to - retcols: the names of the columns to return - - Returns: - A list of dictionaries or None. - """ - - return await self.runInteraction( - desc, - self.simple_search_list_txn, - table, - term, - col, - retcols, - db_autocommit=True, - ) - - @classmethod - def simple_search_list_txn( - cls, - txn: LoggingTransaction, - table: str, - term: Optional[str], - col: str, - retcols: Iterable[str], - ) -> Optional[List[Dict[str, Any]]]: - """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. - - Args: - txn: Transaction object - table: the table name - term: term for searching the table matched to a column. - col: column to query term should be matched to - retcols: the names of the columns to return - - Returns: - None if no term is given, otherwise a list of dictionaries. - """ - if term: - sql = "SELECT %s FROM %s WHERE %s LIKE ?" % (", ".join(retcols), table, col) - termvalues = ["%%" + term + "%%"] - txn.execute(sql, termvalues) - else: - return None - - return cls.cursor_to_dict(txn) - def make_in_list_sql_clause( database_engine: BaseDatabaseEngine, column: str, iterable: Collection[Any] diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index dfcbf0a175..840d725114 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig @@ -296,7 +296,11 @@ class DataStore( "get_users_paginate_txn", get_users_paginate_txn ) - async def search_users(self, term: str) -> Optional[List[JsonDict]]: + async def search_users( + self, term: str + ) -> List[ + Tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] + ]: """Function to search users list for one or more users with the matched term. @@ -304,15 +308,37 @@ class DataStore( term: search term Returns: - A list of dictionaries or None. + A list of tuples of name, password_hash, is_guest, admin, user_type or None. """ - return await self.db_pool.simple_search_list( - table="users", - term=term, - col="name", - retcols=["name", "password_hash", "is_guest", "admin", "user_type"], - desc="search_users", - ) + + def search_users( + txn: LoggingTransaction, + ) -> List[ + Tuple[str, Optional[str], Union[int, bool], Union[int, bool], Optional[str]] + ]: + search_term = "%%" + term + "%%" + + sql = """ + SELECT name, password_hash, is_guest, admin, user_type + FROM users + WHERE name LIKE ? + """ + txn.execute(sql, (search_term,)) + + return cast( + List[ + Tuple[ + str, + Optional[str], + Union[int, bool], + Union[int, bool], + Optional[str], + ] + ], + txn.fetchall(), + ) + + return await self.db_pool.runInteraction("search_users", search_users) def check_database_before_upgrade( From d6b7d49a61ca4c6f87d93ff9eb6a9fa6faef443c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 11 Oct 2023 07:50:34 -0400 Subject: [PATCH 044/142] Handle content types with parameters. (#16440) --- changelog.d/16440.bugfix | 1 + synapse/media/_base.py | 4 +++- tests/media/test_base.py | 19 ++++++++++++++++++- 3 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16440.bugfix diff --git a/changelog.d/16440.bugfix b/changelog.d/16440.bugfix new file mode 100644 index 0000000000..6ce0b1e4af --- /dev/null +++ b/changelog.d/16440.bugfix @@ -0,0 +1 @@ +Properly return inline media when content types have parameters. diff --git a/synapse/media/_base.py b/synapse/media/_base.py index 13345acf75..860e5ddca2 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py @@ -148,7 +148,9 @@ def add_file_headers( # A strict subset of content types is allowed to be inlined so that they may # be viewed directly in a browser. Other file types are forced to be downloads. - if media_type.lower() in INLINE_CONTENT_TYPES: + # + # Only the type & subtype are important, parameters can be ignored. + if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES: disposition = "inline" else: disposition = "attachment" diff --git a/tests/media/test_base.py b/tests/media/test_base.py index 119d7ba66f..144948f23c 100644 --- a/tests/media/test_base.py +++ b/tests/media/test_base.py @@ -42,18 +42,35 @@ class GetFileNameFromHeadersTests(unittest.TestCase): class AddFileHeadersTests(unittest.TestCase): TEST_CASES = { + # Safe values use inline. "text/plain": b"inline; filename=file.name", "text/csv": b"inline; filename=file.name", "image/png": b"inline; filename=file.name", + # Unlisted values are set to attachment. "text/html": b"attachment; filename=file.name", "any/thing": b"attachment; filename=file.name", + # Parameters get ignored. + "text/plain; charset=utf-8": b"inline; filename=file.name", + "text/markdown; charset=utf-8; variant=CommonMark": b"attachment; filename=file.name", + # Parsed as lowercase. + "Text/Plain": b"inline; filename=file.name", + # Bad values don't choke. + "": b"attachment; filename=file.name", + ";": b"attachment; filename=file.name", } def test_content_disposition(self) -> None: for media_type, expected in self.TEST_CASES.items(): request = Mock() add_file_headers(request, media_type, 0, "file.name") - request.setHeader.assert_any_call(b"Content-Disposition", expected) + # There should be a single call to set Content-Disposition. + for call in request.setHeader.call_args_list: + args, _ = call + if args[0] == b"Content-Disposition": + break + else: + self.fail(f"No Content-Disposition header found for {media_type}") + self.assertEqual(args[1], expected, media_type) def test_no_filename(self) -> None: request = Mock() From a4904dcb04b31ce8ed0deaa2c5c80657780f6618 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 11 Oct 2023 13:24:56 -0400 Subject: [PATCH 045/142] Convert simple_select_many_batch, simple_select_many_txn to tuples. (#16444) --- changelog.d/16444.misc | 1 + synapse/storage/database.py | 18 ++- synapse/storage/databases/main/deviceinbox.py | 42 +++---- synapse/storage/databases/main/devices.py | 53 +++++---- .../storage/databases/main/end_to_end_keys.py | 19 ++-- .../databases/main/event_federation.py | 107 ++++++++++-------- synapse/storage/databases/main/events.py | 79 +++++++------ .../databases/main/events_bg_updates.py | 62 +++++----- .../storage/databases/main/events_worker.py | 36 +++--- synapse/storage/databases/main/keys.py | 46 ++++---- synapse/storage/databases/main/presence.py | 51 ++++++--- synapse/storage/databases/main/push_rule.py | 97 +++++++++++----- synapse/storage/databases/main/relations.py | 19 ++-- synapse/storage/databases/main/room.py | 17 +-- synapse/storage/databases/main/roommember.py | 78 +++++++------ synapse/storage/databases/main/state.py | 62 ++++++---- synapse/storage/databases/main/stats.py | 37 +++--- .../storage/databases/main/transactions.py | 28 +++-- synapse/storage/databases/main/ui_auth.py | 41 ++++--- .../storage/databases/main/user_directory.py | 54 ++++----- .../databases/main/user_erasure_store.py | 19 ++-- synapse/storage/databases/state/store.py | 54 +++++---- tests/storage/test_event_chain.py | 64 ++++++----- 23 files changed, 641 insertions(+), 443 deletions(-) create mode 100644 changelog.d/16444.misc diff --git a/changelog.d/16444.misc b/changelog.d/16444.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16444.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 7714ec2bf9..81f661160c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1874,9 +1874,9 @@ class DatabasePool: keyvalues: Optional[Dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows. Filters rows by whether the value of `column` is in `iterable`. @@ -1888,10 +1888,13 @@ class DatabasePool: keyvalues: dict of column names and values to select the rows with desc: description of the transaction, for logging and metrics batch_size: the number of rows for each select query + + Returns: + The results as a list of tuples. """ keyvalues = keyvalues or {} - results: List[Dict[str, Any]] = [] + results: List[Tuple[Any, ...]] = [] for chunk in batch_iter(iterable, batch_size): rows = await self.runInteraction( @@ -1918,9 +1921,9 @@ class DatabasePool: iterable: Collection[Any], keyvalues: Dict[str, Any], retcols: Iterable[str], - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows. Filters rows by whether the value of `column` is in `iterable`. @@ -1931,6 +1934,9 @@ class DatabasePool: iterable: list keyvalues: dict of column names and values to select the rows with retcols: list of strings giving the names of the columns to return + + Returns: + The results as a list of tuples. """ if not iterable: return [] @@ -1949,7 +1955,7 @@ class DatabasePool: ) txn.execute(sql, values) - return cls.cursor_to_dict(txn) + return txn.fetchall() async def simple_update( self, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 744e98c6d0..1cf649d371 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -344,18 +344,19 @@ class DeviceInboxWorkerStore(SQLBaseStore): # Note that this is more efficient than just dropping `device_id` from the query, # since device_inbox has an index on `(user_id, device_id, stream_id)` if not device_ids_to_query: - user_device_dicts = self.db_pool.simple_select_many_txn( - txn, - table="devices", - column="user_id", - iterable=user_ids_to_query, - keyvalues={"hidden": False}, - retcols=("device_id",), + user_device_dicts = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="devices", + column="user_id", + iterable=user_ids_to_query, + keyvalues={"hidden": False}, + retcols=("device_id",), + ), ) - device_ids_to_query.update( - {row["device_id"] for row in user_device_dicts} - ) + device_ids_to_query.update({row[0] for row in user_device_dicts}) if not device_ids_to_query: # We've ended up with no devices to query. @@ -845,20 +846,21 @@ class DeviceInboxWorkerStore(SQLBaseStore): # We exclude hidden devices (such as cross-signing keys) here as they are # not expected to receive to-device messages. - rows = self.db_pool.simple_select_many_txn( - txn, - table="devices", - keyvalues={"user_id": user_id, "hidden": False}, - column="device_id", - iterable=devices, - retcols=("device_id",), + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="devices", + keyvalues={"user_id": user_id, "hidden": False}, + column="device_id", + iterable=devices, + retcols=("device_id",), + ), ) - for row in rows: + for (device_id,) in rows: # Only insert into the local inbox if the device exists on # this server - device_id = row["device_id"] - with start_active_span("serialise_to_device_message"): msg = messages_by_device[device_id] set_tag(SynapseTags.TO_DEVICE_TYPE, msg["type"]) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 9f3804a504..fc23d18eba 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1052,16 +1052,19 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_device_list_last_stream_id_for_remotes( self, user_ids: Iterable[str] ) -> Mapping[str, Optional[str]]: - rows = await self.db_pool.simple_select_many_batch( - table="device_lists_remote_extremeties", - column="user_id", - iterable=user_ids, - retcols=("user_id", "stream_id"), - desc="get_device_list_last_stream_id_for_remotes", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_many_batch( + table="device_lists_remote_extremeties", + column="user_id", + iterable=user_ids, + retcols=("user_id", "stream_id"), + desc="get_device_list_last_stream_id_for_remotes", + ), ) results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids} - results.update({row["user_id"]: row["stream_id"] for row in rows}) + results.update(rows) return results @@ -1077,22 +1080,30 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): The IDs of users whose device lists need resync. """ if user_ids: - rows = await self.db_pool.simple_select_many_batch( - table="device_lists_remote_resync", - column="user_id", - iterable=user_ids, - retcols=("user_id",), - desc="get_user_ids_requiring_device_list_resync_with_iterable", - ) - else: - rows = await self.db_pool.simple_select_list( - table="device_lists_remote_resync", - keyvalues=None, - retcols=("user_id",), - desc="get_user_ids_requiring_device_list_resync", + row_tuples = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="device_lists_remote_resync", + column="user_id", + iterable=user_ids, + retcols=("user_id",), + desc="get_user_ids_requiring_device_list_resync_with_iterable", + ), ) - return {row["user_id"] for row in rows} + return {row[0] for row in row_tuples} + else: + rows = cast( + List[Dict[str, str]], + await self.db_pool.simple_select_list( + table="device_lists_remote_resync", + keyvalues=None, + retcols=("user_id",), + desc="get_user_ids_requiring_device_list_resync", + ), + ) + + return {row["user_id"] for row in rows} async def mark_remote_users_device_caches_as_stale( self, user_ids: StrCollection diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 749ae54e20..f13d776b0d 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -493,15 +493,18 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker A map from (algorithm, key_id) to json string for key """ - rows = await self.db_pool.simple_select_many_batch( - table="e2e_one_time_keys_json", - column="key_id", - iterable=key_ids, - retcols=("algorithm", "key_id", "key_json"), - keyvalues={"user_id": user_id, "device_id": device_id}, - desc="add_e2e_one_time_keys_check", + rows = cast( + List[Tuple[str, str, str]], + await self.db_pool.simple_select_many_batch( + table="e2e_one_time_keys_json", + column="key_id", + iterable=key_ids, + retcols=("algorithm", "key_id", "key_json"), + keyvalues={"user_id": user_id, "device_id": device_id}, + desc="add_e2e_one_time_keys_check", + ), ) - result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} + result = {(algorithm, key_id): key_json for algorithm, key_id, key_json in rows} log_kv({"message": "Fetched one time keys for user", "one_time_keys": result}) return result diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index afffa54985..4f80ce75cc 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1049,15 +1049,18 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas Args: event_ids: The event IDs to calculate the max depth of. """ - rows = await self.db_pool.simple_select_many_batch( - table="events", - column="event_id", - iterable=event_ids, - retcols=( - "event_id", - "depth", + rows = cast( + List[Tuple[str, int]], + await self.db_pool.simple_select_many_batch( + table="events", + column="event_id", + iterable=event_ids, + retcols=( + "event_id", + "depth", + ), + desc="get_max_depth_of", ), - desc="get_max_depth_of", ) if not rows: @@ -1065,10 +1068,10 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas else: max_depth_event_id = "" current_max_depth = 0 - for row in rows: - if row["depth"] > current_max_depth: - max_depth_event_id = row["event_id"] - current_max_depth = row["depth"] + for event_id, depth in rows: + if depth > current_max_depth: + max_depth_event_id = event_id + current_max_depth = depth return max_depth_event_id, current_max_depth @@ -1078,15 +1081,18 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas Args: event_ids: The event IDs to calculate the max depth of. """ - rows = await self.db_pool.simple_select_many_batch( - table="events", - column="event_id", - iterable=event_ids, - retcols=( - "event_id", - "depth", + rows = cast( + List[Tuple[str, int]], + await self.db_pool.simple_select_many_batch( + table="events", + column="event_id", + iterable=event_ids, + retcols=( + "event_id", + "depth", + ), + desc="get_min_depth_of", ), - desc="get_min_depth_of", ) if not rows: @@ -1094,10 +1100,10 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas else: min_depth_event_id = "" current_min_depth = MAX_DEPTH - for row in rows: - if row["depth"] < current_min_depth: - min_depth_event_id = row["event_id"] - current_min_depth = row["depth"] + for event_id, depth in rows: + if depth < current_min_depth: + min_depth_event_id = event_id + current_min_depth = depth return min_depth_event_id, current_min_depth @@ -1553,19 +1559,18 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas A filtered down list of `event_ids` that have previous failed pull attempts. """ - rows = await self.db_pool.simple_select_many_batch( - table="event_failed_pull_attempts", - column="event_id", - iterable=event_ids, - keyvalues={}, - retcols=("event_id",), - desc="get_event_ids_with_failed_pull_attempts", + rows = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="event_failed_pull_attempts", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id",), + desc="get_event_ids_with_failed_pull_attempts", + ), ) - event_ids_with_failed_pull_attempts: Set[str] = { - row["event_id"] for row in rows - } - - return event_ids_with_failed_pull_attempts + return {row[0] for row in rows} @trace async def get_event_ids_to_not_pull_from_backoff( @@ -1585,32 +1590,34 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas A dictionary of event_ids that should not be attempted to be pulled and the next timestamp at which we may try pulling them again. """ - event_failed_pull_attempts = await self.db_pool.simple_select_many_batch( - table="event_failed_pull_attempts", - column="event_id", - iterable=event_ids, - keyvalues={}, - retcols=( - "event_id", - "last_attempt_ts", - "num_attempts", + event_failed_pull_attempts = cast( + List[Tuple[str, int, int]], + await self.db_pool.simple_select_many_batch( + table="event_failed_pull_attempts", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=( + "event_id", + "last_attempt_ts", + "num_attempts", + ), + desc="get_event_ids_to_not_pull_from_backoff", ), - desc="get_event_ids_to_not_pull_from_backoff", ) current_time = self._clock.time_msec() event_ids_with_backoff = {} - for event_failed_pull_attempt in event_failed_pull_attempts: - event_id = event_failed_pull_attempt["event_id"] + for event_id, last_attempt_ts, num_attempts in event_failed_pull_attempts: # Exponential back-off (up to the upper bound) so we don't try to # pull the same event over and over. ex. 2hr, 4hr, 8hr, 16hr, etc. backoff_end_time = ( - event_failed_pull_attempt["last_attempt_ts"] + last_attempt_ts + ( 2 ** min( - event_failed_pull_attempt["num_attempts"], + num_attempts, BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, ) ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index d4dcdb898c..ef6766b5e0 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -27,6 +27,7 @@ from typing import ( Optional, Set, Tuple, + Union, cast, ) @@ -501,16 +502,19 @@ class PersistEventsStore: # We ignore legacy rooms that we aren't filling the chain cover index # for. - rows = self.db_pool.simple_select_many_txn( - txn, - table="rooms", - column="room_id", - iterable={event.room_id for event in events if event.is_state()}, - keyvalues={}, - retcols=("room_id", "has_auth_chain_index"), + rows = cast( + List[Tuple[str, Optional[Union[int, bool]]]], + self.db_pool.simple_select_many_txn( + txn, + table="rooms", + column="room_id", + iterable={event.room_id for event in events if event.is_state()}, + keyvalues={}, + retcols=("room_id", "has_auth_chain_index"), + ), ) rooms_using_chain_index = { - row["room_id"] for row in rows if row["has_auth_chain_index"] + room_id for room_id, has_auth_chain_index in rows if has_auth_chain_index } state_events = { @@ -571,19 +575,18 @@ class PersistEventsStore: # We check if there are any events that need to be handled in the rooms # we're looking at. These should just be out of band memberships, where # we didn't have the auth chain when we first persisted. - rows = db_pool.simple_select_many_txn( - txn, - table="event_auth_chain_to_calculate", - keyvalues={}, - column="room_id", - iterable=set(event_to_room_id.values()), - retcols=("event_id", "type", "state_key"), + auth_chain_to_calc_rows = cast( + List[Tuple[str, str, str]], + db_pool.simple_select_many_txn( + txn, + table="event_auth_chain_to_calculate", + keyvalues={}, + column="room_id", + iterable=set(event_to_room_id.values()), + retcols=("event_id", "type", "state_key"), + ), ) - for row in rows: - event_id = row["event_id"] - event_type = row["type"] - state_key = row["state_key"] - + for event_id, event_type, state_key in auth_chain_to_calc_rows: # (We could pull out the auth events for all rows at once using # simple_select_many, but this case happens rarely and almost always # with a single row.) @@ -753,23 +756,31 @@ class PersistEventsStore: # Step 1, fetch all existing links from all the chains we've seen # referenced. chain_links = _LinkMap() - rows = db_pool.simple_select_many_txn( - txn, - table="event_auth_chain_links", - column="origin_chain_id", - iterable={chain_id for chain_id, _ in chain_map.values()}, - keyvalues={}, - retcols=( - "origin_chain_id", - "origin_sequence_number", - "target_chain_id", - "target_sequence_number", + auth_chain_rows = cast( + List[Tuple[int, int, int, int]], + db_pool.simple_select_many_txn( + txn, + table="event_auth_chain_links", + column="origin_chain_id", + iterable={chain_id for chain_id, _ in chain_map.values()}, + keyvalues={}, + retcols=( + "origin_chain_id", + "origin_sequence_number", + "target_chain_id", + "target_sequence_number", + ), ), ) - for row in rows: + for ( + origin_chain_id, + origin_sequence_number, + target_chain_id, + target_sequence_number, + ) in auth_chain_rows: chain_links.add_link( - (row["origin_chain_id"], row["origin_sequence_number"]), - (row["target_chain_id"], row["target_sequence_number"]), + (origin_chain_id, origin_sequence_number), + (target_chain_id, target_sequence_number), new=False, ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index daef3685b0..c5fce1c82b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -369,18 +369,20 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] for chunk in chunks: - ev_rows = self.db_pool.simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, + ev_rows = cast( + List[Tuple[str, str]], + self.db_pool.simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ), ) - for row in ev_rows: - event_id = row["event_id"] - event_json = db_to_json(row["json"]) + for event_id, json in ev_rows: + event_json = db_to_json(json) try: origin_server_ts = event_json["origin_server_ts"] except (KeyError, AttributeError): @@ -563,15 +565,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): if deleted: # We now need to invalidate the caches of these rooms - rows = self.db_pool.simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",), + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",), + ), ) - room_ids = {row["room_id"] for row in rows} + room_ids = {row[0] for row in rows} for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined] @@ -1038,18 +1043,21 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): count = len(rows) # We also need to fetch the auth events for them. - auth_events = self.db_pool.simple_select_many_txn( - txn, - table="event_auth", - column="event_id", - iterable=event_to_room_id, - keyvalues={}, - retcols=("event_id", "auth_id"), + auth_events = cast( + List[Tuple[str, str]], + self.db_pool.simple_select_many_txn( + txn, + table="event_auth", + column="event_id", + iterable=event_to_room_id, + keyvalues={}, + retcols=("event_id", "auth_id"), + ), ) event_to_auth_chain: Dict[str, List[str]] = {} - for row in auth_events: - event_to_auth_chain.setdefault(row["event_id"], []).append(row["auth_id"]) + for event_id, auth_id in auth_events: + event_to_auth_chain.setdefault(event_id, []).append(auth_id) # Calculate and persist the chain cover index for this set of events. # diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b788d70fc5..8af638d60f 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1584,16 +1584,19 @@ class EventsWorkerStore(SQLBaseStore): """Given a list of event ids, check if we have already processed and stored them as non outliers. """ - rows = await self.db_pool.simple_select_many_batch( - table="events", - retcols=("event_id",), - column="event_id", - iterable=list(event_ids), - keyvalues={"outlier": False}, - desc="have_events_in_timeline", + rows = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="events", + retcols=("event_id",), + column="event_id", + iterable=list(event_ids), + keyvalues={"outlier": False}, + desc="have_events_in_timeline", + ), ) - return {r["event_id"] for r in rows} + return {r[0] for r in rows} @trace @tag_args @@ -2336,15 +2339,18 @@ class EventsWorkerStore(SQLBaseStore): a dict mapping from event id to partial-stateness. We return True for any of the events which are unknown (or are outliers). """ - result = await self.db_pool.simple_select_many_batch( - table="partial_state_events", - column="event_id", - iterable=event_ids, - retcols=["event_id"], - desc="get_partial_state_events", + result = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="partial_state_events", + column="event_id", + iterable=event_ids, + retcols=["event_id"], + desc="get_partial_state_events", + ), ) # convert the result to a dict, to make @cachedList work - partial = {r["event_id"] for r in result} + partial = {r[0] for r in result} return {e_id: e_id in partial for e_id in event_ids} @cached() diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 889c578b9c..ea797864b9 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -16,7 +16,7 @@ import itertools import json import logging -from typing import Dict, Iterable, Mapping, Optional, Tuple +from typing import Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -205,35 +205,39 @@ class KeyStore(CacheInvalidationWorkerStore): If we have multiple entries for a given key ID, returns the most recent. """ - rows = await self.db_pool.simple_select_many_batch( - table="server_keys_json", - column="key_id", - iterable=key_ids, - keyvalues={"server_name": server_name}, - retcols=( - "key_id", - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "key_json", + rows = cast( + List[Tuple[str, str, int, int, Union[bytes, memoryview]]], + await self.db_pool.simple_select_many_batch( + table="server_keys_json", + column="key_id", + iterable=key_ids, + keyvalues={"server_name": server_name}, + retcols=( + "key_id", + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + desc="get_server_keys_json_for_remote", ), - desc="get_server_keys_json_for_remote", ) if not rows: return {} - # We sort the rows so that the most recently added entry is picked up. - rows.sort(key=lambda r: r["ts_added_ms"]) + # We sort the rows by ts_added_ms so that the most recently added entry + # will stomp over older entries in the dictionary. + rows.sort(key=lambda r: r[2]) return { - row["key_id"]: FetchKeyResultForRemote( + key_id: FetchKeyResultForRemote( # Cast to bytes since postgresql returns a memoryview. - key_json=bytes(row["key_json"]), - valid_until_ts=row["ts_valid_until_ms"], - added_ts=row["ts_added_ms"], + key_json=bytes(key_json), + valid_until_ts=ts_valid_until_ms, + added_ts=ts_added_ms, ) - for row in rows + for key_id, from_server, ts_added_ms, ts_valid_until_ms, key_json in rows } async def get_all_server_keys_json_for_remote( @@ -260,6 +264,8 @@ class KeyStore(CacheInvalidationWorkerStore): if not rows: return {} + # We sort the rows by ts_added_ms so that the most recently added entry + # will stomp over older entries in the dictionary. rows.sort(key=lambda r: r["ts_added_ms"]) return { diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 519f05fb60..3b444d2d07 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -261,27 +261,40 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) async def get_presence_for_users( self, user_ids: Iterable[str] ) -> Mapping[str, UserPresenceState]: - rows = await self.db_pool.simple_select_many_batch( - table="presence_stream", - column="user_id", - iterable=user_ids, - keyvalues={}, - retcols=( - "user_id", - "state", - "last_active_ts", - "last_federation_update_ts", - "last_user_sync_ts", - "status_msg", - "currently_active", + # TODO All these columns are nullable, but we don't expect that: + # https://github.com/matrix-org/synapse/issues/16467 + rows = cast( + List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], + await self.db_pool.simple_select_many_batch( + table="presence_stream", + column="user_id", + iterable=user_ids, + keyvalues={}, + retcols=( + "user_id", + "state", + "last_active_ts", + "last_federation_update_ts", + "last_user_sync_ts", + "status_msg", + "currently_active", + ), + desc="get_presence_for_users", ), - desc="get_presence_for_users", ) - for row in rows: - row["currently_active"] = bool(row["currently_active"]) - - return {row["user_id"]: UserPresenceState(**row) for row in rows} + return { + user_id: UserPresenceState( + user_id=user_id, + state=state, + last_active_ts=last_active_ts, + last_federation_update_ts=last_federation_update_ts, + last_user_sync_ts=last_user_sync_ts, + status_msg=status_msg, + currently_active=bool(currently_active), + ) + for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows + } async def should_user_receive_full_presence_with_token( self, @@ -386,6 +399,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore) limit = 100 offset = 0 while True: + # TODO All these columns are nullable, but we don't expect that: + # https://github.com/matrix-org/synapse/issues/16467 rows = cast( List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]], await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 923166974c..f5356e7f80 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -62,20 +62,34 @@ logger = logging.getLogger(__name__) def _load_rules( - rawrules: List[JsonDict], + rawrules: List[Tuple[str, int, str, str]], enabled_map: Dict[str, bool], experimental_config: ExperimentalConfig, ) -> FilteredPushRules: """Take the DB rows returned from the DB and convert them into a full `FilteredPushRules` object. + + Args: + rawrules: List of tuples of: + * rule ID + * Priority lass + * Conditions (as serialized JSON) + * Actions (as serialized JSON) + enabled_map: A dictionary of rule ID to a boolean of whether the rule is + enabled. This might not include all rule IDs from rawrules. + experimental_config: The `experimental_features` section of the Synapse + config. (Used to check if various features are enabled.) + + Returns: + A new FilteredPushRules object. """ ruleslist = [ PushRule.from_db( - rule_id=rawrule["rule_id"], - priority_class=rawrule["priority_class"], - conditions=rawrule["conditions"], - actions=rawrule["actions"], + rule_id=rawrule[0], + priority_class=rawrule[1], + conditions=rawrule[2], + actions=rawrule[3], ) for rawrule in rawrules ] @@ -183,7 +197,19 @@ class PushRulesWorkerStore( enabled_map = await self.get_push_rules_enabled_for_user(user_id) - return _load_rules(rows, enabled_map, self.hs.config.experimental) + return _load_rules( + [ + ( + row["rule_id"], + row["priority_class"], + row["conditions"], + row["actions"], + ) + for row in rows + ], + enabled_map, + self.hs.config.experimental, + ) async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: results = await self.db_pool.simple_select_list( @@ -221,21 +247,36 @@ class PushRulesWorkerStore( if not user_ids: return {} - raw_rules: Dict[str, List[JsonDict]] = {user_id: [] for user_id in user_ids} + raw_rules: Dict[str, List[Tuple[str, int, str, str]]] = { + user_id: [] for user_id in user_ids + } - rows = await self.db_pool.simple_select_many_batch( - table="push_rules", - column="user_name", - iterable=user_ids, - retcols=("*",), - desc="bulk_get_push_rules", - batch_size=1000, + rows = cast( + List[Tuple[str, str, int, int, str, str]], + await self.db_pool.simple_select_many_batch( + table="push_rules", + column="user_name", + iterable=user_ids, + retcols=( + "user_name", + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ), + desc="bulk_get_push_rules", + batch_size=1000, + ), ) - rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) + # Sort by highest priority_class, then highest priority. + rows.sort(key=lambda row: (-int(row[2]), -int(row[3]))) - for row in rows: - raw_rules.setdefault(row["user_name"], []).append(row) + for user_name, rule_id, priority_class, _, conditions, actions in rows: + raw_rules.setdefault(user_name, []).append( + (rule_id, priority_class, conditions, actions) + ) enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) @@ -256,17 +297,19 @@ class PushRulesWorkerStore( results: Dict[str, Dict[str, bool]] = {user_id: {} for user_id in user_ids} - rows = await self.db_pool.simple_select_many_batch( - table="push_rules_enable", - column="user_name", - iterable=user_ids, - retcols=("user_name", "rule_id", "enabled"), - desc="bulk_get_push_rules_enabled", - batch_size=1000, + rows = cast( + List[Tuple[str, str, Optional[int]]], + await self.db_pool.simple_select_many_batch( + table="push_rules_enable", + column="user_name", + iterable=user_ids, + retcols=("user_name", "rule_id", "enabled"), + desc="bulk_get_push_rules_enabled", + batch_size=1000, + ), ) - for row in rows: - enabled = bool(row["enabled"]) - results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled + for user_name, rule_id, enabled in rows: + results.setdefault(user_name, {})[rule_id] = bool(enabled) return results async def get_all_push_rule_updates( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 9246b418f5..7f40e2c446 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -349,16 +349,19 @@ class RelationsWorkerStore(SQLBaseStore): def get_all_relation_ids_for_event_with_types_txn( txn: LoggingTransaction, ) -> List[str]: - rows = self.db_pool.simple_select_many_txn( - txn=txn, - table="event_relations", - column="relation_type", - iterable=relation_types, - keyvalues={"relates_to_id": event_id}, - retcols=["event_id"], + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn=txn, + table="event_relations", + column="relation_type", + iterable=relation_types, + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ), ) - return [row["event_id"] for row in rows] + return [row[0] for row in rows] return await self.db_pool.runInteraction( desc="get_all_relation_ids_for_event_with_types", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 1d4d99932b..9d24d2c347 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1296,14 +1296,17 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): complete. """ - rows: List[Dict[str, str]] = await self.db_pool.simple_select_many_batch( - table="partial_state_rooms", - column="room_id", - iterable=room_ids, - retcols=("room_id",), - desc="is_partial_state_room_batched", + rows = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="partial_state_rooms", + column="room_id", + iterable=room_ids, + retcols=("room_id",), + desc="is_partial_state_room_batched", + ), ) - partial_state_rooms = {row_dict["room_id"] for row_dict in rows} + partial_state_rooms = {row[0] for row in rows} return {room_id: room_id in partial_state_rooms for room_id in room_ids} async def get_join_event_id_and_device_lists_stream_id_for_partial_state( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index bbe08368db..3a87eba430 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -27,6 +27,7 @@ from typing import ( Set, Tuple, Union, + cast, ) import attr @@ -683,25 +684,28 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): Map from user_id to set of rooms that is currently in. """ - rows = await self.db_pool.simple_select_many_batch( - table="current_state_events", - column="state_key", - iterable=user_ids, - retcols=( - "state_key", - "room_id", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_many_batch( + table="current_state_events", + column="state_key", + iterable=user_ids, + retcols=( + "state_key", + "room_id", + ), + keyvalues={ + "type": EventTypes.Member, + "membership": Membership.JOIN, + }, + desc="get_rooms_for_users", ), - keyvalues={ - "type": EventTypes.Member, - "membership": Membership.JOIN, - }, - desc="get_rooms_for_users", ) user_rooms: Dict[str, Set[str]] = {user_id: set() for user_id in user_ids} - for row in rows: - user_rooms[row["state_key"]].add(row["room_id"]) + for state_key, room_id in rows: + user_rooms[state_key].add(room_id) return {key: frozenset(rooms) for key, rooms in user_rooms.items()} @@ -892,17 +896,20 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): Map from event ID to `user_id`, or None if event is not a join. """ - rows = await self.db_pool.simple_select_many_batch( - table="room_memberships", - column="event_id", - iterable=event_ids, - retcols=("user_id", "event_id"), - keyvalues={"membership": Membership.JOIN}, - batch_size=1000, - desc="_get_user_ids_from_membership_event_ids", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_many_batch( + table="room_memberships", + column="event_id", + iterable=event_ids, + retcols=("event_id", "user_id"), + keyvalues={"membership": Membership.JOIN}, + batch_size=1000, + desc="_get_user_ids_from_membership_event_ids", + ), ) - return {row["event_id"]: row["user_id"] for row in rows} + return dict(rows) @cached(max_entries=10000) async def is_host_joined(self, room_id: str, host: str) -> bool: @@ -1202,21 +1209,22 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): membership event, otherwise the value is None. """ - rows = await self.db_pool.simple_select_many_batch( - table="room_memberships", - column="event_id", - iterable=member_event_ids, - retcols=("user_id", "membership", "event_id"), - keyvalues={}, - batch_size=500, - desc="get_membership_from_event_ids", + rows = cast( + List[Tuple[str, str, str]], + await self.db_pool.simple_select_many_batch( + table="room_memberships", + column="event_id", + iterable=member_event_ids, + retcols=("user_id", "membership", "event_id"), + keyvalues={}, + batch_size=500, + desc="get_membership_from_event_ids", + ), ) return { - row["event_id"]: EventIdMembership( - membership=row["membership"], user_id=row["user_id"] - ) - for row in rows + event_id: EventIdMembership(membership=membership, user_id=user_id) + for user_id, membership, event_id in rows } async def is_local_host_in_room_ignoring_users( diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5eaaff5b68..598025dd91 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -20,10 +20,12 @@ from typing import ( Collection, Dict, Iterable, + List, Mapping, Optional, Set, Tuple, + cast, ) import attr @@ -388,16 +390,19 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): Raises: RuntimeError if the state is unknown at any of the given events """ - rows = await self.db_pool.simple_select_many_batch( - table="event_to_state_groups", - column="event_id", - iterable=event_ids, - keyvalues={}, - retcols=("event_id", "state_group"), - desc="_get_state_group_for_events", + rows = cast( + List[Tuple[str, int]], + await self.db_pool.simple_select_many_batch( + table="event_to_state_groups", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("event_id", "state_group"), + desc="_get_state_group_for_events", + ), ) - res = {row["event_id"]: row["state_group"] for row in rows} + res = dict(rows) for e in event_ids: if e not in res: raise RuntimeError("No state group for unknown or outlier event %s" % e) @@ -415,16 +420,19 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): The subset of state groups that are referenced. """ - rows = await self.db_pool.simple_select_many_batch( - table="event_to_state_groups", - column="state_group", - iterable=state_groups, - keyvalues={}, - retcols=("DISTINCT state_group",), - desc="get_referenced_state_groups", + rows = cast( + List[Tuple[int]], + await self.db_pool.simple_select_many_batch( + table="event_to_state_groups", + column="state_group", + iterable=state_groups, + keyvalues={}, + retcols=("DISTINCT state_group",), + desc="get_referenced_state_groups", + ), ) - return {row["state_group"] for row in rows} + return {row[0] for row in rows} async def update_state_for_partial_state_event( self, @@ -624,16 +632,22 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): # potentially stale, since there may have been a period where the # server didn't share a room with the remote user and therefore may # have missed any device updates. - rows = self.db_pool.simple_select_many_txn( - txn, - table="current_state_events", - column="room_id", - iterable=to_delete, - keyvalues={"type": EventTypes.Member, "membership": Membership.JOIN}, - retcols=("state_key",), + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="current_state_events", + column="room_id", + iterable=to_delete, + keyvalues={ + "type": EventTypes.Member, + "membership": Membership.JOIN, + }, + retcols=("state_key",), + ), ) - potentially_left_users = {row["state_key"] for row in rows} + potentially_left_users = {row[0] for row in rows} # Now lets actually delete the rooms from the DB. self.db_pool.simple_delete_many_txn( diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 9d403919e4..5b2d0ba870 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -506,25 +506,28 @@ class StatsStore(StateDeltasStore): ) -> Tuple[List[str], Dict[str, int], int, List[str], int]: pos = self.get_room_max_stream_ordering() # type: ignore[attr-defined] - rows = self.db_pool.simple_select_many_txn( - txn, - table="current_state_events", - column="type", - iterable=[ - EventTypes.Create, - EventTypes.JoinRules, - EventTypes.RoomHistoryVisibility, - EventTypes.RoomEncryption, - EventTypes.Name, - EventTypes.Topic, - EventTypes.RoomAvatar, - EventTypes.CanonicalAlias, - ], - keyvalues={"room_id": room_id, "state_key": ""}, - retcols=["event_id"], + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="current_state_events", + column="type", + iterable=[ + EventTypes.Create, + EventTypes.JoinRules, + EventTypes.RoomHistoryVisibility, + EventTypes.RoomEncryption, + EventTypes.Name, + EventTypes.Topic, + EventTypes.RoomAvatar, + EventTypes.CanonicalAlias, + ], + keyvalues={"room_id": room_id, "state_key": ""}, + retcols=["event_id"], + ), ) - event_ids = cast(List[str], [row["event_id"] for row in rows]) + event_ids = [row[0] for row in rows] txn.execute( """ diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index f35757280d..c4a6475060 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -211,18 +211,28 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): async def get_destination_retry_timings_batch( self, destinations: StrCollection ) -> Mapping[str, Optional[DestinationRetryTimings]]: - rows = await self.db_pool.simple_select_many_batch( - table="destinations", - iterable=destinations, - column="destination", - retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"), - desc="get_destination_retry_timings_batch", + rows = cast( + List[Tuple[str, Optional[int], Optional[int], Optional[int]]], + await self.db_pool.simple_select_many_batch( + table="destinations", + iterable=destinations, + column="destination", + retcols=( + "destination", + "failure_ts", + "retry_last_ts", + "retry_interval", + ), + desc="get_destination_retry_timings_batch", + ), ) return { - row.pop("destination"): DestinationRetryTimings(**row) - for row in rows - if row["retry_last_ts"] and row["failure_ts"] and row["retry_interval"] + destination: DestinationRetryTimings( + failure_ts, retry_last_ts, retry_interval + ) + for destination, failure_ts, retry_last_ts, retry_interval in rows + if retry_last_ts and failure_ts and retry_interval } async def set_destination_retry_timings( diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index f38bedbbcd..919c66f553 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -337,13 +337,16 @@ class UIAuthWorkerStore(SQLBaseStore): # If a registration token was used, decrement the pending counter # before deleting the session. - rows = self.db_pool.simple_select_many_txn( - txn, - table="ui_auth_sessions_credentials", - column="session_id", - iterable=session_ids, - keyvalues={"stage_type": LoginType.REGISTRATION_TOKEN}, - retcols=["result"], + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_many_txn( + txn, + table="ui_auth_sessions_credentials", + column="session_id", + iterable=session_ids, + keyvalues={"stage_type": LoginType.REGISTRATION_TOKEN}, + retcols=["result"], + ), ) # Get the tokens used and how much pending needs to be decremented by. @@ -353,23 +356,25 @@ class UIAuthWorkerStore(SQLBaseStore): # registration token stage for that session will be True. # If a token was used to authenticate, but registration was # never completed, the result will be the token used. - token = db_to_json(r["result"]) + token = db_to_json(r[0]) if isinstance(token, str): token_counts[token] = token_counts.get(token, 0) + 1 # Update the `pending` counters. if len(token_counts) > 0: - token_rows = self.db_pool.simple_select_many_txn( - txn, - table="registration_tokens", - column="token", - iterable=list(token_counts.keys()), - keyvalues={}, - retcols=["token", "pending"], + token_rows = cast( + List[Tuple[str, int]], + self.db_pool.simple_select_many_txn( + txn, + table="registration_tokens", + column="token", + iterable=list(token_counts.keys()), + keyvalues={}, + retcols=["token", "pending"], + ), ) - for token_row in token_rows: - token = token_row["token"] - new_pending = token_row["pending"] - token_counts[token] + for token, pending in token_rows: + new_pending = pending - token_counts[token] self.db_pool.simple_update_one_txn( txn, table="registration_tokens", diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f0dc31fee6..23eb92c514 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -410,25 +410,24 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) # Next fetch their profiles. Note that not all users have profiles. - profile_rows = self.db_pool.simple_select_many_txn( - txn, - table="profiles", - column="full_user_id", - iterable=list(users_to_insert), - retcols=( - "full_user_id", - "displayname", - "avatar_url", + profile_rows = cast( + List[Tuple[str, Optional[str], Optional[str]]], + self.db_pool.simple_select_many_txn( + txn, + table="profiles", + column="full_user_id", + iterable=list(users_to_insert), + retcols=( + "full_user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, ), - keyvalues={}, ) profiles = { - row["full_user_id"]: _UserDirProfile( - row["full_user_id"], - row["displayname"], - row["avatar_url"], - ) - for row in profile_rows + full_user_id: _UserDirProfile(full_user_id, displayname, avatar_url) + for full_user_id, displayname, avatar_url in profile_rows } profiles_to_insert = [ @@ -517,18 +516,21 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): and not self.get_if_app_services_interested_in_user(user) # type: ignore[attr-defined] ] - rows = self.db_pool.simple_select_many_txn( - txn, - table="users", - column="name", - iterable=users, - keyvalues={ - "deactivated": 0, - }, - retcols=("name", "user_type"), + rows = cast( + List[Tuple[str, Optional[str]]], + self.db_pool.simple_select_many_txn( + txn, + table="users", + column="name", + iterable=users, + keyvalues={ + "deactivated": 0, + }, + retcols=("name", "user_type"), + ), ) - return [row["name"] for row in rows if row["user_type"] != UserTypes.SUPPORT] + return [name for name, user_type in rows if user_type != UserTypes.SUPPORT] async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool: """Check if the room is either world_readable or publically joinable""" diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index 06fcbe5e54..8bd58c6e3d 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Mapping +from typing import Iterable, List, Mapping, Tuple, cast from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore @@ -50,14 +50,17 @@ class UserErasureWorkerStore(CacheInvalidationWorkerStore): Returns: for each user, whether the user has requested erasure. """ - rows = await self.db_pool.simple_select_many_batch( - table="erased_users", - column="user_id", - iterable=user_ids, - retcols=("user_id",), - desc="are_users_erased", + rows = cast( + List[Tuple[str]], + await self.db_pool.simple_select_many_batch( + table="erased_users", + column="user_id", + iterable=user_ids, + retcols=("user_id",), + desc="are_users_erased", + ), ) - erased_users = {row["user_id"] for row in rows} + erased_users = {row[0] for row in rows} return {u: u in erased_users for u in user_ids} diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 6984d11352..09d2a8c5b3 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -13,7 +13,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + cast, +) import attr @@ -730,19 +740,22 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): "[purge] found %i state groups to delete", len(state_groups_to_delete) ) - rows = self.db_pool.simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=state_groups_to_delete, - keyvalues={}, - retcols=("state_group",), + rows = cast( + List[Tuple[int]], + self.db_pool.simple_select_many_txn( + txn, + table="state_group_edges", + column="prev_state_group", + iterable=state_groups_to_delete, + keyvalues={}, + retcols=("state_group",), + ), ) remaining_state_groups = { - row["state_group"] - for row in rows - if row["state_group"] not in state_groups_to_delete + state_group + for state_group, in rows + if state_group not in state_groups_to_delete } logger.info( @@ -799,16 +812,19 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): A mapping from state group to previous state group. """ - rows = await self.db_pool.simple_select_many_batch( - table="state_group_edges", - column="prev_state_group", - iterable=state_groups, - keyvalues={}, - retcols=("prev_state_group", "state_group"), - desc="get_previous_state_groups", + rows = cast( + List[Tuple[int, int]], + await self.db_pool.simple_select_many_batch( + table="state_group_edges", + column="prev_state_group", + iterable=state_groups, + keyvalues={}, + retcols=("state_group", "prev_state_group"), + desc="get_previous_state_groups", + ), ) - return {row["state_group"]: row["prev_state_group"] for row in rows} + return dict(rows) async def purge_room_state( self, room_id: str, state_groups_to_delete: Collection[int] diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index b55dd07f14..2f6499966c 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Set, Tuple +from typing import Dict, List, Set, Tuple, cast from twisted.test.proto_helpers import MemoryReactor from twisted.trial import unittest @@ -421,41 +421,53 @@ class EventChainStoreTestCase(HomeserverTestCase): self, events: List[EventBase] ) -> Tuple[Dict[str, Tuple[int, int]], _LinkMap]: # Fetch the map from event ID -> (chain ID, sequence number) - rows = self.get_success( - self.store.db_pool.simple_select_many_batch( - table="event_auth_chains", - column="event_id", - iterable=[e.event_id for e in events], - retcols=("event_id", "chain_id", "sequence_number"), - keyvalues={}, - ) + rows = cast( + List[Tuple[str, int, int]], + self.get_success( + self.store.db_pool.simple_select_many_batch( + table="event_auth_chains", + column="event_id", + iterable=[e.event_id for e in events], + retcols=("event_id", "chain_id", "sequence_number"), + keyvalues={}, + ) + ), ) chain_map = { - row["event_id"]: (row["chain_id"], row["sequence_number"]) for row in rows + event_id: (chain_id, sequence_number) + for event_id, chain_id, sequence_number in rows } # Fetch all the links and pass them to the _LinkMap. - rows = self.get_success( - self.store.db_pool.simple_select_many_batch( - table="event_auth_chain_links", - column="origin_chain_id", - iterable=[chain_id for chain_id, _ in chain_map.values()], - retcols=( - "origin_chain_id", - "origin_sequence_number", - "target_chain_id", - "target_sequence_number", - ), - keyvalues={}, - ) + auth_chain_rows = cast( + List[Tuple[int, int, int, int]], + self.get_success( + self.store.db_pool.simple_select_many_batch( + table="event_auth_chain_links", + column="origin_chain_id", + iterable=[chain_id for chain_id, _ in chain_map.values()], + retcols=( + "origin_chain_id", + "origin_sequence_number", + "target_chain_id", + "target_sequence_number", + ), + keyvalues={}, + ) + ), ) link_map = _LinkMap() - for row in rows: + for ( + origin_chain_id, + origin_sequence_number, + target_chain_id, + target_sequence_number, + ) in auth_chain_rows: added = link_map.add_link( - (row["origin_chain_id"], row["origin_sequence_number"]), - (row["target_chain_id"], row["target_sequence_number"]), + (origin_chain_id, origin_sequence_number), + (target_chain_id, target_sequence_number), ) # We shouldn't have persisted any redundant links From cc865fffc0e556005a6ab596717a77230ba82ee7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 11 Oct 2023 20:08:11 -0400 Subject: [PATCH 046/142] Convert user_get_threepids response to attrs. (#16468) This improves type annotations by not having a dictionary of Any values. --- changelog.d/16468.misc | 1 + synapse/handlers/account_validity.py | 4 ++-- synapse/handlers/admin.py | 4 +++- synapse/handlers/deactivate_account.py | 4 ++-- synapse/module_api/__init__.py | 2 +- synapse/rest/admin/users.py | 3 +-- synapse/rest/client/account.py | 4 +++- .../storage/databases/main/registration.py | 19 ++++++++++++++----- tests/module_api/test_api.py | 8 ++++---- 9 files changed, 31 insertions(+), 18 deletions(-) create mode 100644 changelog.d/16468.misc diff --git a/changelog.d/16468.misc b/changelog.d/16468.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16468.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index f1a7a05df6..6c2a49a3b9 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -212,8 +212,8 @@ class AccountValidityHandler: addresses = [] for threepid in threepids: - if threepid["medium"] == "email": - addresses.append(threepid["address"]) + if threepid.medium == "email": + addresses.append(threepid.address) return addresses diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 97fd1fd427..2c2baeac67 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -16,6 +16,8 @@ import abc import logging from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set +import attr + from synapse.api.constants import Direction, Membership from synapse.events import EventBase from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo @@ -93,7 +95,7 @@ class AdminHandler: ] user_info_dict["displayname"] = profile.display_name user_info_dict["avatar_url"] = profile.avatar_url - user_info_dict["threepids"] = threepids + user_info_dict["threepids"] = [attr.asdict(t) for t in threepids] user_info_dict["external_ids"] = external_ids user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 67adeae6a7..6a8f8f2fd1 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -117,9 +117,9 @@ class DeactivateAccountHandler: # Remove any local threepid associations for this account. local_threepids = await self.store.user_get_threepids(user_id) - for threepid in local_threepids: + for local_threepid in local_threepids: await self._auth_handler.delete_local_threepid( - user_id, threepid["medium"], threepid["address"] + user_id, local_threepid.medium, local_threepid.address ) # delete any devices belonging to the user, which will also diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 65e2aca456..0786d20635 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -678,7 +678,7 @@ class ModuleApi: "msisdn" for phone numbers, and an "address" key which value is the threepid's address. """ - return await self._store.user_get_threepids(user_id) + return [attr.asdict(t) for t in await self._store.user_get_threepids(user_id)] def check_user_exists(self, user_id: str) -> "defer.Deferred[Optional[str]]": """Check if user exists. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index cd995e8dbb..7fe16130e7 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -329,9 +329,8 @@ class UserRestServletV2(RestServlet): if threepids is not None: # get changed threepids (added and removed) - # convert List[Dict[str, Any]] into Set[Tuple[str, str]] cur_threepids = { - (threepid["medium"], threepid["address"]) + (threepid.medium, threepid.address) for threepid in await self.store.user_get_threepids(user_id) } add_threepids = new_threepids - cur_threepids diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index e74a87af4d..641390cb30 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -24,6 +24,8 @@ if TYPE_CHECKING or HAS_PYDANTIC_V2: from pydantic.v1 import StrictBool, StrictStr, constr else: from pydantic import StrictBool, StrictStr, constr + +import attr from typing_extensions import Literal from twisted.web.server import Request @@ -595,7 +597,7 @@ class ThreepidRestServlet(RestServlet): threepids = await self.datastore.user_get_threepids(requester.user.to_string()) - return 200, {"threepids": threepids} + return 200, {"threepids": [attr.asdict(t) for t in threepids]} # NOTE(dmr): I have chosen not to use Pydantic to parse this request's body, because # the endpoint is deprecated. (If you really want to, you could do this by reusing diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 64a2c31a5d..9e8643ae4d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -143,6 +143,14 @@ class LoginTokenLookupResult: """The session ID advertised by the SSO Identity Provider.""" +@attr.s(frozen=True, slots=True, auto_attribs=True) +class ThreepidResult: + medium: str + address: str + validated_at: int + added_at: int + + class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__( self, @@ -988,13 +996,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): {"user_id": user_id, "validated_at": validated_at, "added_at": added_at}, ) - async def user_get_threepids(self, user_id: str) -> List[Dict[str, Any]]: - return await self.db_pool.simple_select_list( + async def user_get_threepids(self, user_id: str) -> List[ThreepidResult]: + results = await self.db_pool.simple_select_list( "user_threepids", - {"user_id": user_id}, - ["medium", "address", "validated_at", "added_at"], - "user_get_threepids", + keyvalues={"user_id": user_id}, + retcols=["medium", "address", "validated_at", "added_at"], + desc="user_get_threepids", ) + return [ThreepidResult(**r) for r in results] async def user_delete_threepid( self, user_id: str, medium: str, address: str diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 172fc3a736..1dabf52156 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -94,12 +94,12 @@ class ModuleApiTestCase(BaseModuleApiTestCase): self.assertEqual(len(emails), 1) email = emails[0] - self.assertEqual(email["medium"], "email") - self.assertEqual(email["address"], "bob@bobinator.bob") + self.assertEqual(email.medium, "email") + self.assertEqual(email.address, "bob@bobinator.bob") # Should these be 0? - self.assertEqual(email["validated_at"], 0) - self.assertEqual(email["added_at"], 0) + self.assertEqual(email.validated_at, 0) + self.assertEqual(email.added_at, 0) # Check that the displayname was assigned displayname = self.get_success( From f710d5480bc965a4697d42d293f1f46ee9905603 Mon Sep 17 00:00:00 2001 From: kegsay Date: Thu, 12 Oct 2023 11:33:14 +0100 Subject: [PATCH 047/142] Update complement.sh to match new public API shape (#16466) * Update complement.sh to match new public API shape Sister PR to https://github.com/matrix-org/complement/pull/666 Context: https://github.com/matrix-org/complement/issues/654#issuecomment-1746613495 * Changelog * Pedantry * Run complement plz --- .github/workflows/tests.yml | 1 + changelog.d/16466.misc | 1 + scripts-dev/complement.sh | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16466.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7dbd83908e..fcbd40b746 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -56,6 +56,7 @@ jobs: - 'pyproject.toml' - 'poetry.lock' - 'docker/**' + - 'scripts-dev/complement.sh' linting: - 'synapse/**' diff --git a/changelog.d/16466.misc b/changelog.d/16466.misc new file mode 100644 index 0000000000..471056bb0f --- /dev/null +++ b/changelog.d/16466.misc @@ -0,0 +1 @@ +Update complement.sh to match new public API shape. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 8416b55674..3e0cddb527 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -214,7 +214,7 @@ fi extra_test_args=() -test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins" +test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) @@ -277,4 +277,4 @@ export PASS_SYNAPSE_LOG_TESTING=1 echo "Images built; running complement" cd "$COMPLEMENT_DIR" -go test -v -tags $test_tags -count=1 "${extra_test_args[@]}" "$@" ./tests/... +go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages From 4cc729d48006964438e28c5489d7abfb9e3380df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 12 Oct 2023 08:56:10 -0400 Subject: [PATCH 048/142] Revert "Drop unused tables & unneeded access token ID for events. (#16268)" (#16465) This reverts commit cabd57746004fe2dacc11aa8d373854a3d25e306. There are additional usages of these tables which need to be removed first. --- changelog.d/16268.misc | 1 - synapse/handlers/message.py | 8 +++++-- synapse/storage/schema/__init__.py | 4 ++-- .../main/delta/82/03_drop_old_tables.sql | 24 ------------------- 4 files changed, 8 insertions(+), 29 deletions(-) delete mode 100644 changelog.d/16268.misc delete mode 100644 synapse/storage/schema/main/delta/82/03_drop_old_tables.sql diff --git a/changelog.d/16268.misc b/changelog.d/16268.misc deleted file mode 100644 index 26059b108e..0000000000 --- a/changelog.d/16268.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up unused tables. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8de4b8e816..41a35ce510 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -693,9 +693,13 @@ class EventCreationHandler: if require_consent and not is_exempt: await self.assert_accepted_privacy_policy(requester) - # Save the the device ID and the transaction ID in the event internal metadata. - # This is useful to determine if we should echo the transaction_id in events. + # Save the access token ID, the device ID and the transaction ID in the event + # internal metadata. This is useful to determine if we should echo the + # transaction_id in events. # See `synapse.events.utils.EventClientSerializer.serialize_event` + if requester.access_token_id is not None: + builder.internal_metadata.token_id = requester.access_token_id + if requester.device_id is not None: builder.internal_metadata.device_id = requester.device_id diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index de89de7d74..5b50bd66bc 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -125,8 +125,8 @@ Changes in SCHEMA_VERSION = 82 SCHEMA_COMPAT_VERSION = ( - # The event_txn_id table and tables from MSC2716 no longer exist. - 82 + # The `event_txn_id_device_id` must be written to for new events. + 80 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql b/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql deleted file mode 100644 index 149020bbd7..0000000000 --- a/synapse/storage/schema/main/delta/82/03_drop_old_tables.sql +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright 2023 The Matrix.org Foundation C.I.C - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- Drop the old event transaction ID table, the event_txn_id_device_id table --- should be used instead. -DROP TABLE IF EXISTS event_txn_id; - --- Drop tables related to MSC2716 since the implementation is being removed -DROP TABLE insertion_events; -DROP TABLE insertion_event_edges; -DROP TABLE insertion_event_extremities; -DROP TABLE batch_events; From 166ffc0f23419bc99d9597fe95deaae3bbee7caf Mon Sep 17 00:00:00 2001 From: Laurence Gill Date: Thu, 12 Oct 2023 16:18:32 +0100 Subject: [PATCH 049/142] Fix typo in useful_sql_for_admins.md (#16477) --- changelog.d/16477.doc | 1 + docs/usage/administration/useful_sql_for_admins.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16477.doc diff --git a/changelog.d/16477.doc b/changelog.d/16477.doc new file mode 100644 index 0000000000..ef66e5d305 --- /dev/null +++ b/changelog.d/16477.doc @@ -0,0 +1 @@ +Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). diff --git a/docs/usage/administration/useful_sql_for_admins.md b/docs/usage/administration/useful_sql_for_admins.md index f3b97f9576..9f2cc9b957 100644 --- a/docs/usage/administration/useful_sql_for_admins.md +++ b/docs/usage/administration/useful_sql_for_admins.md @@ -193,7 +193,7 @@ SELECT rss.room_id, rss.name, rss.canonical_alias, rss.topic, rss.encryption, rsc.joined_members, rsc.local_users_in_room, rss.join_rules FROM room_stats_state rss LEFT JOIN room_stats_current rsc USING (room_id) - WHERE room_id IN ( WHERE room_id IN ( + WHERE room_id IN ( '!OGEhHVWSdvArJzumhm:matrix.org', '!YTvKGNlinIzlkMTVRl:matrix.org' ); From 109882230c309d48df143ad370192f0541b636f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Sat, 14 Oct 2023 17:57:27 +0100 Subject: [PATCH 050/142] Clean up logging on event persister endpoints (#16488) --- changelog.d/16488.misc | 1 + synapse/replication/http/federation.py | 6 +++++- synapse/replication/http/send_events.py | 13 ++++++++----- 3 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16488.misc diff --git a/changelog.d/16488.misc b/changelog.d/16488.misc new file mode 100644 index 0000000000..9e70e45b22 --- /dev/null +++ b/changelog.d/16488.misc @@ -0,0 +1 @@ +Clean up logging on event persister endpoints. diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 53ad327030..e728297dce 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -138,7 +138,11 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts.append((event, context)) - logger.info("Got %d events from federation", len(event_and_contexts)) + logger.info( + "Got batch of %i events to persist to room %s", + len(event_and_contexts), + room_id, + ) max_stream_id = await self.federation_event_handler.persist_events_and_notify( room_id, event_and_contexts, backfilled diff --git a/synapse/replication/http/send_events.py b/synapse/replication/http/send_events.py index 4f82c9f96d..8eea256063 100644 --- a/synapse/replication/http/send_events.py +++ b/synapse/replication/http/send_events.py @@ -118,6 +118,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint): with Measure(self.clock, "repl_send_events_parse"): events_and_context = [] events = payload["events"] + rooms = set() for event_payload in events: event_dict = event_payload["event"] @@ -144,11 +145,13 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint): UserID.from_string(u) for u in event_payload["extra_users"] ] - logger.info( - "Got batch of events to send, last ID of batch is: %s, sending into room: %s", - event.event_id, - event.room_id, - ) + # all the rooms *should* be the same, but we'll log separately to be + # sure. + rooms.add(event.room_id) + + logger.info( + "Got batch of %i events to persist to rooms %s", len(events), rooms + ) last_event = ( await self.event_creation_handler.persist_and_notify_client_events( From a832212d4f8df6a315397a4059215910090b78c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:03:40 +0100 Subject: [PATCH 051/142] Bump pyo3-log from 0.8.3 to 0.8.4 (#16495) Bumps [pyo3-log](https://github.com/vorner/pyo3-log) from 0.8.3 to 0.8.4. - [Changelog](https://github.com/vorner/pyo3-log/blob/main/CHANGELOG.md) - [Commits](https://github.com/vorner/pyo3-log/compare/v0.8.3...v0.8.4) --- updated-dependencies: - dependency-name: pyo3-log dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2b44b5448..c4821d8254 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,9 +229,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605" +checksum = "c09c2b349b6538d8a73d436ca606dab6ce0aaab4dad9e6b7bdd57a4f556c3bc3" dependencies = [ "arc-swap", "log", From b220f8224eabcf3ca325dc8a3d3becd98093e423 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:04:23 +0100 Subject: [PATCH 052/142] Bump packaging from 23.1 to 23.2 (#16497) Bumps [packaging](https://github.com/pypa/packaging) from 23.1 to 23.2. - [Release notes](https://github.com/pypa/packaging/releases) - [Changelog](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pypa/packaging/compare/23.1...23.2) --- updated-dependencies: - dependency-name: packaging dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index a1a2b83764..94ed0389bb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -767,6 +767,17 @@ files = [ {file = "ijson-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a3a6a2fbbe7550ffe52d151cf76065e6b89cfb3e9d0463e49a7e322a25d0426"}, {file = "ijson-3.2.3-cp311-cp311-win32.whl", hash = "sha256:6a4db2f7fb9acfb855c9ae1aae602e4648dd1f88804a0d5cfb78c3639bcf156c"}, {file = "ijson-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:ccd6be56335cbb845f3d3021b1766299c056c70c4c9165fb2fbe2d62258bae3f"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:055b71bbc37af5c3c5861afe789e15211d2d3d06ac51ee5a647adf4def19c0ea"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c075a547de32f265a5dd139ab2035900fef6653951628862e5cdce0d101af557"}, + {file = "ijson-3.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:457f8a5fc559478ac6b06b6d37ebacb4811f8c5156e997f0d87d708b0d8ab2ae"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9788f0c915351f41f0e69ec2618b81ebfcf9f13d9d67c6d404c7f5afda3e4afb"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa234ab7a6a33ed51494d9d2197fb96296f9217ecae57f5551a55589091e7853"}, + {file = "ijson-3.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd0dc5da4f9dc6d12ab6e8e0c57d8b41d3c8f9ceed31a99dae7b2baf9ea769a"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c6beb80df19713e39e68dc5c337b5c76d36ccf69c30b79034634e5e4c14d6904"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a2973ce57afb142d96f35a14e9cfec08308ef178a2c76b8b5e1e98f3960438bf"}, + {file = "ijson-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:105c314fd624e81ed20f925271ec506523b8dd236589ab6c0208b8707d652a0e"}, + {file = "ijson-3.2.3-cp312-cp312-win32.whl", hash = "sha256:ac44781de5e901ce8339352bb5594fcb3b94ced315a34dbe840b4cff3450e23b"}, + {file = "ijson-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:0567e8c833825b119e74e10a7c29761dc65fcd155f5d4cb10f9d3b8916ef9912"}, {file = "ijson-3.2.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:eeb286639649fb6bed37997a5e30eefcacddac79476d24128348ec890b2a0ccb"}, {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:396338a655fb9af4ac59dd09c189885b51fa0eefc84d35408662031023c110d1"}, {file = "ijson-3.2.3-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e0243d166d11a2a47c17c7e885debf3b19ed136be2af1f5d1c34212850236ac"}, @@ -1581,13 +1592,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "23.1" +version = "23.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] [[package]] From aaca9773e3797eb14b28d403cc74cd7bf571d908 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:05:20 +0100 Subject: [PATCH 053/142] Bump types-jsonschema from 4.17.0.10 to 4.19.0.3 (#16499) Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.10 to 4.19.0.3. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 94ed0389bb..0a9676244c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3070,15 +3070,18 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.10" +version = "4.19.0.3" description = "Typing stubs for jsonschema" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.17.0.10.tar.gz", hash = "sha256:8e979db34d69bc9f9b3d6e8b89bdbc60b3a41cfce4e1fb87bf191d205c7f5098"}, - {file = "types_jsonschema-4.17.0.10-py3-none-any.whl", hash = "sha256:3aa2a89afbd9eaa6ce0c15618b36f02692a621433889ce73014656f7d8caf971"}, + {file = "types-jsonschema-4.19.0.3.tar.gz", hash = "sha256:e0fc0f5d51fd0988bf193be42174a5376b0096820ff79505d9c1b66de23f0581"}, + {file = "types_jsonschema-4.19.0.3-py3-none-any.whl", hash = "sha256:5cedbb661e5ca88d95b94b79902423e3f97a389c245e5fe0ab384122f27d56b9"}, ] +[package.dependencies] +referencing = "*" + [[package]] name = "types-netaddr" version = "0.9.0.1" From 9be4db29f24a9f8598210acd7b623ad314172c64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:05:28 +0100 Subject: [PATCH 054/142] Bump jsonschema from 4.19.0 to 4.19.1 (#16500) Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.19.0 to 4.19.1. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.19.0...v4.19.1) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0a9676244c..965f1dcce6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -998,13 +998,13 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jsonschema" -version = "4.19.0" +version = "4.19.1" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.19.0-py3-none-any.whl", hash = "sha256:043dc26a3845ff09d20e4420d6012a9c91c9aa8999fa184e7efcfeccb41e32cb"}, - {file = "jsonschema-4.19.0.tar.gz", hash = "sha256:6e1e7569ac13be8139b2dd2c21a55d350066ee3f80df06c608b398cdc6f30e8f"}, + {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, + {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, ] [package.dependencies] From eee6474bce4e387a05428de6f8291933ea6b72f7 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Mon, 16 Oct 2023 12:06:27 +0200 Subject: [PATCH 055/142] Remove useless async job to delete device messages on sync (#16491) --- changelog.d/16491.misc | 1 + synapse/handlers/sync.py | 22 ------------------- synapse/storage/databases/main/deviceinbox.py | 5 +++-- 3 files changed, 4 insertions(+), 24 deletions(-) create mode 100644 changelog.d/16491.misc diff --git a/changelog.d/16491.misc b/changelog.d/16491.misc new file mode 100644 index 0000000000..70b5771373 --- /dev/null +++ b/changelog.d/16491.misc @@ -0,0 +1 @@ +Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 744e080309..60b4d95cd7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -40,7 +40,6 @@ from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase -from synapse.handlers.device import DELETE_DEVICE_MSGS_TASK_NAME from synapse.handlers.relations import BundledAggregations from synapse.logging import issue9533_logger from synapse.logging.context import current_context @@ -363,36 +362,15 @@ class SyncHandler: # (since we now know that the device has received them) if since_token is not None: since_stream_id = since_token.to_device_key - # Fast path: delete a limited number of to-device messages up front. - # We do this to avoid the overhead of scheduling a task for every - # sync. - device_deletion_limit = 100 deleted = await self.store.delete_messages_for_device( sync_config.user.to_string(), sync_config.device_id, since_stream_id, - limit=device_deletion_limit, ) logger.debug( "Deleted %d to-device messages up to %d", deleted, since_stream_id ) - # If we hit the limit, schedule a background task to delete the rest. - if deleted >= device_deletion_limit: - await self._task_scheduler.schedule_task( - DELETE_DEVICE_MSGS_TASK_NAME, - resource_id=sync_config.device_id, - params={ - "user_id": sync_config.user.to_string(), - "device_id": sync_config.device_id, - "up_to_stream_id": since_stream_id, - }, - ) - logger.debug( - "Deletion of to-device messages up to %d scheduled", - since_stream_id, - ) - if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 1cf649d371..1faa6f04b2 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): user_id: str, device_id: Optional[str], up_to_stream_id: int, - limit: int, + limit: Optional[int] = None, ) -> int: """ Args: @@ -481,11 +481,12 @@ class DeviceInboxWorkerStore(SQLBaseStore): ROW_ID_NAME = self.database_engine.row_id_name def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: + limit_statement = "" if limit is None else f"LIMIT {limit}" sql = f""" DELETE FROM device_inbox WHERE {ROW_ID_NAME} IN ( SELECT {ROW_ID_NAME} FROM device_inbox WHERE user_id = ? AND device_id = ? AND stream_id <= ? - LIMIT {limit} + {limit_statement} ) """ txn.execute(sql, (user_id, device_id, up_to_stream_id)) From 37d9edcef2cc05f591c5131206447f744e496415 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:23:33 +0000 Subject: [PATCH 056/142] Bump sentry-sdk from 1.31.0 to 1.32.0 (#16496) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.31.0 to 1.32.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.31.0...1.32.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 965f1dcce6..6b72b605a1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2494,13 +2494,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.31.0" +version = "1.32.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.31.0.tar.gz", hash = "sha256:6de2e88304873484207fed836388e422aeff000609b104c802749fd89d56ba5b"}, - {file = "sentry_sdk-1.31.0-py2.py3-none-any.whl", hash = "sha256:64a7141005fb775b9db298a30de93e3b83e0ddd1232dc6f36eb38aebc1553291"}, + {file = "sentry-sdk-1.32.0.tar.gz", hash = "sha256:935e8fbd7787a3702457393b74b13d89a5afb67185bc0af85c00cb27cbd42e7c"}, + {file = "sentry_sdk-1.32.0-py2.py3-none-any.whl", hash = "sha256:eeb0b3550536f3bbc05bb1c7e0feb3a78d74acb43b607159a606ed2ec0a33a4d"}, ] [package.dependencies] From 71547246719a449fc04757a6d5712360dad88903 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:31:05 +0000 Subject: [PATCH 057/142] Bump serde from 1.0.188 to 1.0.189 (#16494) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.188 to 1.0.189. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.188...v1.0.189) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c4821d8254..5acf47cea8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", From 4fe73f8f2f42e9e0ab86003489161ac7ed3d9f51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:17:30 +0100 Subject: [PATCH 058/142] Bump pillow from 10.0.1 to 10.1.0 (#16498) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 110 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6b72b605a1..d447411b90 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1639,65 +1639,65 @@ files = [ [[package]] name = "pillow" -version = "10.0.1" +version = "10.1.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"}, - {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"}, - {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"}, - {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"}, - {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"}, - {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"}, - {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"}, - {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"}, - {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"}, - {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"}, - {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"}, - {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"}, - {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"}, - {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"}, - {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"}, - {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"}, - {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"}, - {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"}, - {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"}, - {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"}, - {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"}, - {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"}, - {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"}, - {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"}, - {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"}, - {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"}, - {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"}, - {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"}, - {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"}, - {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"}, - {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"}, - {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"}, - {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, + {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, + {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, + {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, + {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, + {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, + {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, ] [package.extras] From e3e0ae4ab1f48974ca66a4c4e6be8019aaa38fd1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 16 Oct 2023 07:35:22 -0400 Subject: [PATCH 059/142] Convert state delta processing from a dict to attrs. (#16469) For improved type checking & memory usage. --- changelog.d/16469.misc | 1 + synapse/handlers/presence.py | 32 +++++----- synapse/handlers/room_member.py | 21 +++--- synapse/handlers/stats.py | 64 +++++++++---------- synapse/handlers/user_directory.py | 34 +++++----- synapse/storage/controllers/state.py | 14 +--- .../storage/databases/main/state_deltas.py | 52 +++++++++------ tests/handlers/test_typing.py | 2 +- 8 files changed, 111 insertions(+), 109 deletions(-) create mode 100644 changelog.d/16469.misc diff --git a/changelog.d/16469.misc b/changelog.d/16469.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16469.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 7c7cda3e95..dfc0b9db07 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -110,6 +110,7 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream from synapse.storage.databases.main import DataStore +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.streams import EventSource from synapse.types import ( JsonDict, @@ -1499,9 +1500,9 @@ class PresenceHandler(BasePresenceHandler): # We may get multiple deltas for different rooms, but we want to # handle them on a room by room basis, so we batch them up by # room. - deltas_by_room: Dict[str, List[JsonDict]] = {} + deltas_by_room: Dict[str, List[StateDelta]] = {} for delta in deltas: - deltas_by_room.setdefault(delta["room_id"], []).append(delta) + deltas_by_room.setdefault(delta.room_id, []).append(delta) for room_id, deltas_for_room in deltas_by_room.items(): await self._handle_state_delta(room_id, deltas_for_room) @@ -1513,7 +1514,7 @@ class PresenceHandler(BasePresenceHandler): max_pos ) - async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None: + async def _handle_state_delta(self, room_id: str, deltas: List[StateDelta]) -> None: """Process current state deltas for the room to find new joins that need to be handled. """ @@ -1524,31 +1525,30 @@ class PresenceHandler(BasePresenceHandler): newly_joined_users = set() for delta in deltas: - assert room_id == delta["room_id"] + assert room_id == delta.room_id - typ = delta["type"] - state_key = delta["state_key"] - event_id = delta["event_id"] - prev_event_id = delta["prev_event_id"] - - logger.debug("Handling: %r %r, %s", typ, state_key, event_id) + logger.debug( + "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id + ) # Drop any event that isn't a membership join - if typ != EventTypes.Member: + if delta.event_type != EventTypes.Member: continue - if event_id is None: + if delta.event_id is None: # state has been deleted, so this is not a join. We only care about # joins. continue - event = await self.store.get_event(event_id, allow_none=True) + event = await self.store.get_event(delta.event_id, allow_none=True) if not event or event.content.get("membership") != Membership.JOIN: # We only care about joins continue - if prev_event_id: - prev_event = await self.store.get_event(prev_event_id, allow_none=True) + if delta.prev_event_id: + prev_event = await self.store.get_event( + delta.prev_event_id, allow_none=True + ) if ( prev_event and prev_event.content.get("membership") == Membership.JOIN @@ -1556,7 +1556,7 @@ class PresenceHandler(BasePresenceHandler): # Ignore changes to join events. continue - newly_joined_users.add(state_key) + newly_joined_users.add(delta.state_key) if not newly_joined_users: # If nobody has joined then there's nothing to do. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 130eee7e1d..918eb203e2 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -16,7 +16,7 @@ import abc import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple from synapse import types from synapse.api.constants import ( @@ -44,6 +44,7 @@ from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging import opentracing from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import ( JsonDict, Requester, @@ -2146,24 +2147,18 @@ class RoomForgetterHandler(StateDeltasHandler): await self._store.update_room_forgetter_stream_pos(max_pos) - async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: + async def _handle_deltas(self, deltas: List[StateDelta]) -> None: """Called with the state deltas to process""" for delta in deltas: - typ = delta["type"] - state_key = delta["state_key"] - room_id = delta["room_id"] - event_id = delta["event_id"] - prev_event_id = delta["prev_event_id"] - - if typ != EventTypes.Member: + if delta.event_type != EventTypes.Member: continue - if not self._hs.is_mine_id(state_key): + if not self._hs.is_mine_id(delta.state_key): continue change = await self._get_key_change( - prev_event_id, - event_id, + delta.prev_event_id, + delta.event_id, key_name="membership", public_value=Membership.JOIN, ) @@ -2172,7 +2167,7 @@ class RoomForgetterHandler(StateDeltasHandler): if is_leave: try: await self._room_member_handler.forget( - UserID.from_string(state_key), room_id + UserID.from_string(delta.state_key), delta.room_id ) except SynapseError as e: if e.code == 400: diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 3dde19fc81..817b41aa37 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -27,6 +27,7 @@ from typing import ( from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import JsonDict if TYPE_CHECKING: @@ -142,7 +143,7 @@ class StatsHandler: self.pos = max_pos async def _handle_deltas( - self, deltas: Iterable[JsonDict] + self, deltas: Iterable[StateDelta] ) -> Tuple[Dict[str, CounterType[str]], Dict[str, CounterType[str]]]: """Called with the state deltas to process @@ -157,51 +158,50 @@ class StatsHandler: room_to_state_updates: Dict[str, Dict[str, Any]] = {} for delta in deltas: - typ = delta["type"] - state_key = delta["state_key"] - room_id = delta["room_id"] - event_id = delta["event_id"] - stream_id = delta["stream_id"] - prev_event_id = delta["prev_event_id"] + logger.debug( + "Handling: %r, %r %r, %s", + delta.room_id, + delta.event_type, + delta.state_key, + delta.event_id, + ) - logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id) - - token = await self.store.get_earliest_token_for_stats("room", room_id) + token = await self.store.get_earliest_token_for_stats("room", delta.room_id) # If the earliest token to begin from is larger than our current # stream ID, skip processing this delta. - if token is not None and token >= stream_id: + if token is not None and token >= delta.stream_id: logger.debug( "Ignoring: %s as earlier than this room's initial ingestion event", - event_id, + delta.event_id, ) continue - if event_id is None and prev_event_id is None: + if delta.event_id is None and delta.prev_event_id is None: logger.error( "event ID is None and so is the previous event ID. stream_id: %s", - stream_id, + delta.stream_id, ) continue event_content: JsonDict = {} - if event_id is not None: - event = await self.store.get_event(event_id, allow_none=True) + if delta.event_id is not None: + event = await self.store.get_event(delta.event_id, allow_none=True) if event: event_content = event.content or {} # All the values in this dict are deltas (RELATIVE changes) - room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter()) + room_stats_delta = room_to_stats_deltas.setdefault(delta.room_id, Counter()) - room_state = room_to_state_updates.setdefault(room_id, {}) + room_state = room_to_state_updates.setdefault(delta.room_id, {}) - if prev_event_id is None: + if delta.prev_event_id is None: # this state event doesn't overwrite another, # so it is a new effective/current state event room_stats_delta["current_state_events"] += 1 - if typ == EventTypes.Member: + if delta.event_type == EventTypes.Member: # we could use StateDeltasHandler._get_key_change here but it's # a bit inefficient given we're not testing for a specific # result; might as well just grab the prev_membership and @@ -210,9 +210,9 @@ class StatsHandler: # in the absence of a previous event because we do not want to # reduce the leave count when a new-to-the-room user joins. prev_membership = None - if prev_event_id is not None: + if delta.prev_event_id is not None: prev_event = await self.store.get_event( - prev_event_id, allow_none=True + delta.prev_event_id, allow_none=True ) if prev_event: prev_event_content = prev_event.content @@ -256,7 +256,7 @@ class StatsHandler: else: raise ValueError("%r is not a valid membership" % (membership,)) - user_id = state_key + user_id = delta.state_key if self.is_mine_id(user_id): # this accounts for transitions like leave → ban and so on. has_changed_joinedness = (prev_membership == Membership.JOIN) != ( @@ -272,30 +272,30 @@ class StatsHandler: room_stats_delta["local_users_in_room"] += membership_delta - elif typ == EventTypes.Create: + elif delta.event_type == EventTypes.Create: room_state["is_federatable"] = ( event_content.get(EventContentFields.FEDERATE, True) is True ) room_type = event_content.get(EventContentFields.ROOM_TYPE) if isinstance(room_type, str): room_state["room_type"] = room_type - elif typ == EventTypes.JoinRules: + elif delta.event_type == EventTypes.JoinRules: room_state["join_rules"] = event_content.get("join_rule") - elif typ == EventTypes.RoomHistoryVisibility: + elif delta.event_type == EventTypes.RoomHistoryVisibility: room_state["history_visibility"] = event_content.get( "history_visibility" ) - elif typ == EventTypes.RoomEncryption: + elif delta.event_type == EventTypes.RoomEncryption: room_state["encryption"] = event_content.get("algorithm") - elif typ == EventTypes.Name: + elif delta.event_type == EventTypes.Name: room_state["name"] = event_content.get("name") - elif typ == EventTypes.Topic: + elif delta.event_type == EventTypes.Topic: room_state["topic"] = event_content.get("topic") - elif typ == EventTypes.RoomAvatar: + elif delta.event_type == EventTypes.RoomAvatar: room_state["avatar"] = event_content.get("url") - elif typ == EventTypes.CanonicalAlias: + elif delta.event_type == EventTypes.CanonicalAlias: room_state["canonical_alias"] = event_content.get("alias") - elif typ == EventTypes.GuestAccess: + elif delta.event_type == EventTypes.GuestAccess: room_state["guest_access"] = event_content.get( EventContentFields.GUEST_ACCESS ) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index a0f5568000..75717ba4f9 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -14,7 +14,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, List, Optional, Set, Tuple from twisted.internet.interfaces import IDelayedCall @@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Memb from synapse.api.errors import Codes, SynapseError from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo from synapse.types import UserID @@ -247,32 +248,31 @@ class UserDirectoryHandler(StateDeltasHandler): await self.store.update_user_directory_stream_pos(max_pos) - async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: + async def _handle_deltas(self, deltas: List[StateDelta]) -> None: """Called with the state deltas to process""" for delta in deltas: - typ = delta["type"] - state_key = delta["state_key"] - room_id = delta["room_id"] - event_id: Optional[str] = delta["event_id"] - prev_event_id: Optional[str] = delta["prev_event_id"] - - logger.debug("Handling: %r %r, %s", typ, state_key, event_id) + logger.debug( + "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id + ) # For join rule and visibility changes we need to check if the room # may have become public or not and add/remove the users in said room - if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules): + if delta.event_type in ( + EventTypes.RoomHistoryVisibility, + EventTypes.JoinRules, + ): await self._handle_room_publicity_change( - room_id, prev_event_id, event_id, typ + delta.room_id, delta.prev_event_id, delta.event_id, delta.event_type ) - elif typ == EventTypes.Member: + elif delta.event_type == EventTypes.Member: await self._handle_room_membership_event( - room_id, - prev_event_id, - event_id, - state_key, + delta.room_id, + delta.prev_event_id, + delta.event_id, + delta.state_key, ) else: - logger.debug("Ignoring irrelevant type: %r", typ) + logger.debug("Ignoring irrelevant type: %r", delta.event_type) async def _handle_room_publicity_change( self, diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 46957723a1..9f7959c45d 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -16,7 +16,6 @@ from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, - Any, Callable, Collection, Dict, @@ -32,6 +31,7 @@ from typing import ( from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.logging.opentracing import tag_args, trace +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.roommember import ProfileInfo from synapse.storage.util.partial_state_events_tracker import ( PartialCurrentStateTracker, @@ -531,19 +531,9 @@ class StateStorageController: @tag_args async def get_current_state_deltas( self, prev_stream_id: int, max_stream_id: int - ) -> Tuple[int, List[Dict[str, Any]]]: + ) -> Tuple[int, List[StateDelta]]: """Fetch a list of room state changes since the given stream id - Each entry in the result contains the following fields: - - stream_id (int) - - room_id (str) - - type (str): event type - - state_key (str): - - event_id (str|None): new event_id for this state key. None if the - state has been deleted. - - prev_event_id (str|None): previous event_id for this state key. None - if it's new state. - Args: prev_stream_id: point to get changes since (exclusive) max_stream_id: the point that we know has been correctly persisted diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 445213e12a..3151186e0c 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -13,7 +13,9 @@ # limitations under the License. import logging -from typing import Any, Dict, List, Tuple +from typing import List, Optional, Tuple + +import attr from synapse.storage._base import SQLBaseStore from synapse.storage.database import LoggingTransaction @@ -22,6 +24,20 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class StateDelta: + stream_id: int + room_id: str + event_type: str + state_key: str + + event_id: Optional[str] + """new event_id for this state key. None if the state has been deleted.""" + + prev_event_id: Optional[str] + """previous event_id for this state key. None if it's new state.""" + + class StateDeltasStore(SQLBaseStore): # This class must be mixed in with a child class which provides the following # attribute. TODO: can we get static analysis to enforce this? @@ -29,31 +45,21 @@ class StateDeltasStore(SQLBaseStore): async def get_partial_current_state_deltas( self, prev_stream_id: int, max_stream_id: int - ) -> Tuple[int, List[Dict[str, Any]]]: + ) -> Tuple[int, List[StateDelta]]: """Fetch a list of room state changes since the given stream id - Each entry in the result contains the following fields: - - stream_id (int) - - room_id (str) - - type (str): event type - - state_key (str): - - event_id (str|None): new event_id for this state key. None if the - state has been deleted. - - prev_event_id (str|None): previous event_id for this state key. None - if it's new state. - This may be the partial state if we're lazy joining the room. Args: prev_stream_id: point to get changes since (exclusive) max_stream_id: the point that we know has been correctly persisted - - ie, an upper limit to return changes from. + - ie, an upper limit to return changes from. Returns: A tuple consisting of: - - the stream id which these results go up to - - list of current_state_delta_stream rows. If it is empty, we are - up to date. + - the stream id which these results go up to + - list of current_state_delta_stream rows. If it is empty, we are + up to date. """ prev_stream_id = int(prev_stream_id) @@ -72,7 +78,7 @@ class StateDeltasStore(SQLBaseStore): def get_current_state_deltas_txn( txn: LoggingTransaction, - ) -> Tuple[int, List[Dict[str, Any]]]: + ) -> Tuple[int, List[StateDelta]]: # First we calculate the max stream id that will give us less than # N results. # We arbitrarily limit to 100 stream_id entries to ensure we don't @@ -112,7 +118,17 @@ class StateDeltasStore(SQLBaseStore): ORDER BY stream_id ASC """ txn.execute(sql, (prev_stream_id, clipped_stream_id)) - return clipped_stream_id, self.db_pool.cursor_to_dict(txn) + return clipped_stream_id, [ + StateDelta( + stream_id=row[0], + room_id=row[1], + event_type=row[2], + state_key=row[3], + event_id=row[4], + prev_event_id=row[5], + ) + for row in txn.fetchall() + ] return await self.db_pool.runInteraction( "get_current_state_deltas", get_current_state_deltas_txn diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 3060bc9744..d7025c6f2c 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -174,7 +174,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): return_value=1 ) - self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) # type: ignore[method-assign] + self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, [])) # type: ignore[method-assign] self.datastore.get_to_device_stream_token = Mock( # type: ignore[method-assign] return_value=0 From 7291c68eea5479d07739bef6af9b9d086095b4d4 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 16 Oct 2023 14:22:50 +0000 Subject: [PATCH 060/142] Update the release script to remind releaser to check for special release notes. (#16461) * Add reminder to check special release notes board in release script * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Update release.py * Bah, black --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/16461.misc | 1 + scripts-dev/release.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/16461.misc diff --git a/changelog.d/16461.misc b/changelog.d/16461.misc new file mode 100644 index 0000000000..96d040b742 --- /dev/null +++ b/changelog.d/16461.misc @@ -0,0 +1 @@ +Update the release script to remind releaser to check for special release notes. \ No newline at end of file diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 74f41a40ec..7508ae5096 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -684,6 +684,10 @@ def full(gh_token: str) -> None: click.echo("1. If this is a security release, read the security wiki page.") click.echo("2. Check for any release blockers before proceeding.") click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker") + click.echo( + "3. Check for any other special release notes, including announcements to add to the changelog or special deployment instructions." + ) + click.echo(" See the 'Synapse Maintainer Report'.") click.confirm("Ready?", abort=True) From 77dfc1f93967f4157ba961c3b5201206c1bbf797 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Oct 2023 07:32:40 -0400 Subject: [PATCH 061/142] Fix a bug where servers could be marked as up when they were failing (#16506) After this change a server will only be reported as back online if they were previously having requests fail. --- changelog.d/16506.bugfix | 1 + synapse/util/retryutils.py | 28 +++++++------ tests/util/test_retryutils.py | 75 +++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 changelog.d/16506.bugfix diff --git a/changelog.d/16506.bugfix b/changelog.d/16506.bugfix new file mode 100644 index 0000000000..a2c7e82b9e --- /dev/null +++ b/changelog.d/16506.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.59.0 where servers would be incorrectly marked as available when a request resulted in an error. diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 0e1f907667..547202c96b 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -170,10 +170,10 @@ class RetryDestinationLimiter: database in milliseconds, or zero if the last request was successful. backoff_on_404: Back off if we get a 404 - backoff_on_failure: set to False if we should not increase the retry interval on a failure. - + notifier: A notifier used to mark servers as up. + replication_client A replication client used to mark servers as up. backoff_on_all_error_codes: Whether we should back off on any error code. """ @@ -237,6 +237,9 @@ class RetryDestinationLimiter: else: valid_err_code = False + # Whether previous requests to the destination had been failing. + previously_failing = bool(self.failure_ts) + if success: # We connected successfully. if not self.retry_interval: @@ -282,6 +285,9 @@ class RetryDestinationLimiter: if self.failure_ts is None: self.failure_ts = retry_last_ts + # Whether the current request to the destination had been failing. + currently_failing = bool(self.failure_ts) + async def store_retry_timings() -> None: try: await self.store.set_destination_retry_timings( @@ -291,17 +297,15 @@ class RetryDestinationLimiter: self.retry_interval, ) - if self.notifier: - # Inform the relevant places that the remote server is back up. - self.notifier.notify_remote_server_up(self.destination) + # If the server was previously failing, but is no longer. + if previously_failing and not currently_failing: + if self.notifier: + # Inform the relevant places that the remote server is back up. + self.notifier.notify_remote_server_up(self.destination) - if self.replication_client: - # If we're on a worker we try and inform master about this. The - # replication client doesn't hook into the notifier to avoid - # infinite loops where we send a `REMOTE_SERVER_UP` command to - # master, which then echoes it back to us which in turn pokes - # the notifier. - self.replication_client.send_remote_server_up(self.destination) + if self.replication_client: + # Inform other workers that the remote server is up. + self.replication_client.send_remote_server_up(self.destination) except Exception: logger.exception("Failed to store destination_retry_timings") diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 4bcd17a6fc..ad88b24566 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -11,6 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from unittest import mock + +from synapse.notifier import Notifier +from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from tests.unittest import HomeserverTestCase @@ -109,6 +113,77 @@ class RetryLimiterTestCase(HomeserverTestCase): new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) self.assertIsNone(new_timings) + def test_notifier_replication(self) -> None: + """Ensure the notifier/replication client is called only when expected.""" + store = self.hs.get_datastores().main + + notifier = mock.Mock(spec=Notifier) + replication_client = mock.Mock(spec=ReplicationCommandHandler) + + limiter = self.get_success( + get_retry_limiter( + "test_dest", + self.clock, + store, + notifier=notifier, + replication_client=replication_client, + ) + ) + + # The server is already up, nothing should occur. + self.pump(1) + with limiter: + pass + self.pump() + + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + self.assertIsNone(new_timings) + notifier.notify_remote_server_up.assert_not_called() + replication_client.send_remote_server_up.assert_not_called() + + # Attempt again, but return an error. This will cause new retry timings, but + # should not trigger server up notifications. + self.pump(1) + try: + with limiter: + raise AssertionError("argh") + except AssertionError: + pass + self.pump() + + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + # The exact retry timings are tested separately. + self.assertIsNotNone(new_timings) + notifier.notify_remote_server_up.assert_not_called() + replication_client.send_remote_server_up.assert_not_called() + + # A second failing request should be treated as the above. + self.pump(1) + try: + with limiter: + raise AssertionError("argh") + except AssertionError: + pass + self.pump() + + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + # The exact retry timings are tested separately. + self.assertIsNotNone(new_timings) + notifier.notify_remote_server_up.assert_not_called() + replication_client.send_remote_server_up.assert_not_called() + + # A final successful attempt should generate a server up notification. + self.pump(1) + with limiter: + pass + self.pump() + + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + # The exact retry timings are tested separately. + self.assertIsNone(new_timings) + notifier.notify_remote_server_up.assert_called_once_with("test_dest") + replication_client.send_remote_server_up.assert_called_once_with("test_dest") + def test_max_retry_interval(self) -> None: """Test that `destination_max_retry_interval` setting works as expected""" store = self.hs.get_datastores().main From 6ad1f9eac2c5ffc496597acbc5728482441c64c7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Oct 2023 08:47:42 -0400 Subject: [PATCH 062/142] Convert DeviceLastConnectionInfo to attrs. (#16507) To improve type safety & memory usage. --- changelog.d/16507.misc | 1 + synapse/handlers/device.py | 23 +--- synapse/storage/databases/main/client_ips.py | 46 ++++--- tests/storage/test_client_ips.py | 137 ++++++++++--------- 4 files changed, 104 insertions(+), 103 deletions(-) create mode 100644 changelog.d/16507.misc diff --git a/changelog.d/16507.misc b/changelog.d/16507.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16507.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 50df4f2b06..544bc7c13d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -14,17 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Iterable, - List, - Mapping, - Optional, - Set, - Tuple, -) +from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple from synapse.api import errors from synapse.api.constants import EduTypes, EventTypes @@ -41,6 +31,7 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) +from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo from synapse.types import ( JsonDict, JsonMapping, @@ -1008,14 +999,14 @@ class DeviceHandler(DeviceWorkerHandler): def _update_device_from_client_ips( - device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]] + device: JsonDict, client_ips: Mapping[Tuple[str, str], DeviceLastConnectionInfo] ) -> None: - ip = client_ips.get((device["user_id"], device["device_id"]), {}) + ip = client_ips.get((device["user_id"], device["device_id"])) device.update( { - "last_seen_user_agent": ip.get("user_agent"), - "last_seen_ts": ip.get("last_seen"), - "last_seen_ip": ip.get("ip"), + "last_seen_user_agent": ip.user_agent if ip else None, + "last_seen_ts": ip.last_seen if ip else None, + "last_seen_ip": ip.ip if ip else None, } ) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 7da47c3dd7..8be1511859 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -15,6 +15,7 @@ import logging from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union, cast +import attr from typing_extensions import TypedDict from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -42,7 +43,8 @@ logger = logging.getLogger(__name__) LAST_SEEN_GRANULARITY = 120 * 1000 -class DeviceLastConnectionInfo(TypedDict): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DeviceLastConnectionInfo: """Metadata for the last connection seen for a user and device combination""" # These types must match the columns in the `devices` table @@ -499,24 +501,29 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke device_id: If None fetches all devices for the user Returns: - A dictionary mapping a tuple of (user_id, device_id) to dicts, with - keys giving the column names from the devices table. + A dictionary mapping a tuple of (user_id, device_id) to DeviceLastConnectionInfo. """ keyvalues = {"user_id": user_id} if device_id is not None: keyvalues["device_id"] = device_id - res = cast( - List[DeviceLastConnectionInfo], - await self.db_pool.simple_select_list( - table="devices", - keyvalues=keyvalues, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), - ), + res = await self.db_pool.simple_select_list( + table="devices", + keyvalues=keyvalues, + retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), ) - return {(d["user_id"], d["device_id"]): d for d in res} + return { + (d["user_id"], d["device_id"]): DeviceLastConnectionInfo( + user_id=d["user_id"], + device_id=d["device_id"], + ip=d["ip"], + user_agent=d["user_agent"], + last_seen=d["last_seen"], + ) + for d in res + } async def _get_user_ip_and_agents_from_database( self, user: UserID, since_ts: int = 0 @@ -683,8 +690,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke device_id: If None fetches all devices for the user Returns: - A dictionary mapping a tuple of (user_id, device_id) to dicts, with - keys giving the column names from the devices table. + A dictionary mapping a tuple of (user_id, device_id) to DeviceLastConnectionInfo. """ ret = await self._get_last_client_ip_by_device_from_database(user_id, device_id) @@ -705,13 +711,13 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke continue if not device_id or did == device_id: - ret[(user_id, did)] = { - "user_id": user_id, - "ip": ip, - "user_agent": user_agent, - "device_id": did, - "last_seen": last_seen, - } + ret[(user_id, did)] = DeviceLastConnectionInfo( + user_id=user_id, + ip=ip, + user_agent=user_agent, + device_id=did, + last_seen=last_seen, + ) return ret async def get_user_ip_and_agents( diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 6b9692c486..0c054a598f 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -24,7 +24,10 @@ import synapse.rest.admin from synapse.http.site import XForwardedForRequest from synapse.rest.client import login from synapse.server import HomeServer -from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY +from synapse.storage.databases.main.client_ips import ( + LAST_SEEN_GRANULARITY, + DeviceLastConnectionInfo, +) from synapse.types import UserID from synapse.util import Clock @@ -65,15 +68,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertLessEqual( - { - "user_id": user_id, - "device_id": device_id, - "ip": "ip", - "user_agent": "user_agent", - "last_seen": 12345678000, - }.items(), - r.items(), + self.assertEqual( + DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip="ip", + user_agent="user_agent", + last_seen=12345678000, + ), + r, ) def test_insert_new_client_ip_none_device_id(self) -> None: @@ -201,13 +204,13 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( result, { - (user_id, device_id): { - "user_id": user_id, - "device_id": device_id, - "ip": "ip", - "user_agent": "user_agent", - "last_seen": 12345678000, - }, + (user_id, device_id): DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip="ip", + user_agent="user_agent", + last_seen=12345678000, + ), }, ) @@ -292,20 +295,20 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( result, { - (user_id, device_id_1): { - "user_id": user_id, - "device_id": device_id_1, - "ip": "ip_1", - "user_agent": "user_agent_1", - "last_seen": 12345678000, - }, - (user_id, device_id_2): { - "user_id": user_id, - "device_id": device_id_2, - "ip": "ip_2", - "user_agent": "user_agent_3", - "last_seen": 12345688000 + LAST_SEEN_GRANULARITY, - }, + (user_id, device_id_1): DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id_1, + ip="ip_1", + user_agent="user_agent_1", + last_seen=12345678000, + ), + (user_id, device_id_2): DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id_2, + ip="ip_2", + user_agent="user_agent_3", + last_seen=12345688000 + LAST_SEEN_GRANULARITY, + ), }, ) @@ -526,15 +529,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertLessEqual( - { - "user_id": user_id, - "device_id": device_id, - "ip": None, - "user_agent": None, - "last_seen": None, - }.items(), - r.items(), + self.assertEqual( + DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip=None, + user_agent=None, + last_seen=None, + ), + r, ) # Register the background update to run again. @@ -561,15 +564,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result[(user_id, device_id)] - self.assertLessEqual( - { - "user_id": user_id, - "device_id": device_id, - "ip": "ip", - "user_agent": "user_agent", - "last_seen": 0, - }.items(), - r.items(), + self.assertEqual( + DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip="ip", + user_agent="user_agent", + last_seen=0, + ), + r, ) def test_old_user_ips_pruned(self) -> None: @@ -640,15 +643,15 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) r = result2[(user_id, device_id)] - self.assertLessEqual( - { - "user_id": user_id, - "device_id": device_id, - "ip": "ip", - "user_agent": "user_agent", - "last_seen": 0, - }.items(), - r.items(), + self.assertEqual( + DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip="ip", + user_agent="user_agent", + last_seen=0, + ), + r, ) def test_invalid_user_agents_are_ignored(self) -> None: @@ -777,13 +780,13 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase): self.store.get_last_client_ip_by_device(self.user_id, device_id) ) r = result[(self.user_id, device_id)] - self.assertLessEqual( - { - "user_id": self.user_id, - "device_id": device_id, - "ip": expected_ip, - "user_agent": "Mozzila pizza", - "last_seen": 123456100, - }.items(), - r.items(), + self.assertEqual( + DeviceLastConnectionInfo( + user_id=self.user_id, + device_id=device_id, + ip=expected_ip, + user_agent="Mozzila pizza", + last_seen=123456100, + ), + r, ) From 6e6d611f555245a8302396936dd4dae1f28e3b2a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Oct 2023 11:54:45 -0400 Subject: [PATCH 063/142] 1.95.0rc1 --- CHANGES.md | 48 ++++++++++++++++++++++++++++++++++++++++ changelog.d/16162.misc | 1 - changelog.d/16403.bugfix | 1 - changelog.d/16404.bugfix | 1 - changelog.d/16419.misc | 1 - changelog.d/16420.doc | 1 - changelog.d/16421.misc | 1 - changelog.d/16426.misc | 1 - changelog.d/16427.misc | 1 - changelog.d/16428.misc | 1 - changelog.d/16429.misc | 1 - changelog.d/16431.misc | 1 - changelog.d/16433.misc | 1 - changelog.d/16434.misc | 1 - changelog.d/16435.misc | 1 - changelog.d/16438.misc | 1 - changelog.d/16440.bugfix | 1 - changelog.d/16441.misc | 1 - changelog.d/16444.misc | 1 - changelog.d/16454.misc | 1 - changelog.d/16455.bugfix | 1 - changelog.d/16457.bugfix | 1 - changelog.d/16461.misc | 1 - changelog.d/16466.misc | 1 - changelog.d/16468.misc | 1 - changelog.d/16469.misc | 1 - changelog.d/16477.doc | 1 - changelog.d/16488.misc | 1 - changelog.d/16491.misc | 1 - changelog.d/16506.bugfix | 1 - changelog.d/16507.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 33 files changed, 55 insertions(+), 31 deletions(-) delete mode 100644 changelog.d/16162.misc delete mode 100644 changelog.d/16403.bugfix delete mode 100644 changelog.d/16404.bugfix delete mode 100644 changelog.d/16419.misc delete mode 100644 changelog.d/16420.doc delete mode 100644 changelog.d/16421.misc delete mode 100644 changelog.d/16426.misc delete mode 100644 changelog.d/16427.misc delete mode 100644 changelog.d/16428.misc delete mode 100644 changelog.d/16429.misc delete mode 100644 changelog.d/16431.misc delete mode 100644 changelog.d/16433.misc delete mode 100644 changelog.d/16434.misc delete mode 100644 changelog.d/16435.misc delete mode 100644 changelog.d/16438.misc delete mode 100644 changelog.d/16440.bugfix delete mode 100644 changelog.d/16441.misc delete mode 100644 changelog.d/16444.misc delete mode 100644 changelog.d/16454.misc delete mode 100644 changelog.d/16455.bugfix delete mode 100644 changelog.d/16457.bugfix delete mode 100644 changelog.d/16461.misc delete mode 100644 changelog.d/16466.misc delete mode 100644 changelog.d/16468.misc delete mode 100644 changelog.d/16469.misc delete mode 100644 changelog.d/16477.doc delete mode 100644 changelog.d/16488.misc delete mode 100644 changelog.d/16491.misc delete mode 100644 changelog.d/16506.bugfix delete mode 100644 changelog.d/16507.misc diff --git a/CHANGES.md b/CHANGES.md index 6f42ebba93..1943f7e2ea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,51 @@ +# Synapse 1.95.0rc1 (2023-10-17) + +### Bugfixes + +- Remove legacy unspecced `knock_state_events` field returned in some responses. ([\#16403](https://github.com/matrix-org/synapse/issues/16403)) +- Fixes possbile `AttributeError` when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404)) +- Properly return inline media when content types have parameters. ([\#16440](https://github.com/matrix-org/synapse/issues/16440)) +- Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. ([\#16455](https://github.com/matrix-org/synapse/issues/16455)) +- Improve the performance of purging rooms, particularly encrypted rooms. ([\#16457](https://github.com/matrix-org/synapse/issues/16457)) +- Fix a bug introduced in Synapse 1.59.0 where servers would be incorrectly marked as available when a request resulted in an error. ([\#16506](https://github.com/matrix-org/synapse/issues/16506)) + +### Improved Documentation + +- Document internal background update mechanism. ([\#16420](https://github.com/matrix-org/synapse/issues/16420)) +- Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). ([\#16477](https://github.com/matrix-org/synapse/issues/16477)) + +### Internal Changes + +- Bump pyo3 from 0.17.1 to 0.19.2. ([\#16162](https://github.com/matrix-org/synapse/issues/16162)) +- Update registration of media repository URLs. ([\#16419](https://github.com/matrix-org/synapse/issues/16419)) +- Improve type hints. ([\#16421](https://github.com/matrix-org/synapse/issues/16421), [\#16468](https://github.com/matrix-org/synapse/issues/16468), [\#16469](https://github.com/matrix-org/synapse/issues/16469), [\#16507](https://github.com/matrix-org/synapse/issues/16507)) +- Refactor some code to simplify and better type receipts stream adjacent code. ([\#16426](https://github.com/matrix-org/synapse/issues/16426)) +- Factor out `MultiWriter` token from `RoomStreamToken`. ([\#16427](https://github.com/matrix-org/synapse/issues/16427)) +- Improve code comments. ([\#16428](https://github.com/matrix-org/synapse/issues/16428)) +- Reduce memory allocations. ([\#16429](https://github.com/matrix-org/synapse/issues/16429), [\#16431](https://github.com/matrix-org/synapse/issues/16431), [\#16433](https://github.com/matrix-org/synapse/issues/16433), [\#16434](https://github.com/matrix-org/synapse/issues/16434), [\#16438](https://github.com/matrix-org/synapse/issues/16438), [\#16444](https://github.com/matrix-org/synapse/issues/16444)) +- Remove unused method. ([\#16435](https://github.com/matrix-org/synapse/issues/16435)) +- Improve rate limiting logic. ([\#16441](https://github.com/matrix-org/synapse/issues/16441)) +- Do not block running of CI behind the check for sign-off on PRs. ([\#16454](https://github.com/matrix-org/synapse/issues/16454)) +- Update the release script to remind releaser to check for special release notes. ([\#16461](https://github.com/matrix-org/synapse/issues/16461)) +- Update complement.sh to match new public API shape. ([\#16466](https://github.com/matrix-org/synapse/issues/16466)) +- Clean up logging on event persister endpoints. ([\#16488](https://github.com/matrix-org/synapse/issues/16488)) +- Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. ([\#16491](https://github.com/matrix-org/synapse/issues/16491)) + +### Updates to locked dependencies + +* Bump bleach from 6.0.0 to 6.1.0. ([\#16451](https://github.com/matrix-org/synapse/issues/16451)) +* Bump jsonschema from 4.19.0 to 4.19.1. ([\#16500](https://github.com/matrix-org/synapse/issues/16500)) +* Bump netaddr from 0.8.0 to 0.9.0. ([\#16453](https://github.com/matrix-org/synapse/issues/16453)) +* Bump packaging from 23.1 to 23.2. ([\#16497](https://github.com/matrix-org/synapse/issues/16497)) +* Bump pillow from 10.0.1 to 10.1.0. ([\#16498](https://github.com/matrix-org/synapse/issues/16498)) +* Bump psycopg2 from 2.9.8 to 2.9.9. ([\#16452](https://github.com/matrix-org/synapse/issues/16452)) +* Bump pyo3-log from 0.8.3 to 0.8.4. ([\#16495](https://github.com/matrix-org/synapse/issues/16495)) +* Bump ruff from 0.0.290 to 0.0.292. ([\#16449](https://github.com/matrix-org/synapse/issues/16449)) +* Bump sentry-sdk from 1.31.0 to 1.32.0. ([\#16496](https://github.com/matrix-org/synapse/issues/16496)) +* Bump serde from 1.0.188 to 1.0.189. ([\#16494](https://github.com/matrix-org/synapse/issues/16494)) +* Bump types-bleach from 6.0.0.4 to 6.1.0.0. ([\#16450](https://github.com/matrix-org/synapse/issues/16450)) +* Bump types-jsonschema from 4.17.0.10 to 4.19.0.3. ([\#16499](https://github.com/matrix-org/synapse/issues/16499)) + # Synapse 1.94.0 (2023-10-10) No significant changes since 1.94.0rc1. diff --git a/changelog.d/16162.misc b/changelog.d/16162.misc deleted file mode 100644 index b6c77229c1..0000000000 --- a/changelog.d/16162.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pyo3 from 0.17.1 to 0.19.2. diff --git a/changelog.d/16403.bugfix b/changelog.d/16403.bugfix deleted file mode 100644 index 453c975a63..0000000000 --- a/changelog.d/16403.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove legacy unspecced `knock_state_events` field returned in some responses. diff --git a/changelog.d/16404.bugfix b/changelog.d/16404.bugfix deleted file mode 100644 index 3fd5028b33..0000000000 --- a/changelog.d/16404.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes possbile `AttributeError` when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. diff --git a/changelog.d/16419.misc b/changelog.d/16419.misc deleted file mode 100644 index 591f371d00..0000000000 --- a/changelog.d/16419.misc +++ /dev/null @@ -1 +0,0 @@ -Update registration of media repository URLs. diff --git a/changelog.d/16420.doc b/changelog.d/16420.doc deleted file mode 100644 index 1c0c6b9577..0000000000 --- a/changelog.d/16420.doc +++ /dev/null @@ -1 +0,0 @@ -Document internal background update mechanism. diff --git a/changelog.d/16421.misc b/changelog.d/16421.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16421.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16426.misc b/changelog.d/16426.misc deleted file mode 100644 index 208a007171..0000000000 --- a/changelog.d/16426.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor some code to simplify and better type receipts stream adjacent code. diff --git a/changelog.d/16427.misc b/changelog.d/16427.misc deleted file mode 100644 index 44f0e0595e..0000000000 --- a/changelog.d/16427.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out `MultiWriter` token from `RoomStreamToken`. diff --git a/changelog.d/16428.misc b/changelog.d/16428.misc deleted file mode 100644 index 75c9c3b757..0000000000 --- a/changelog.d/16428.misc +++ /dev/null @@ -1 +0,0 @@ -Improve code comments. diff --git a/changelog.d/16429.misc b/changelog.d/16429.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16429.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16431.misc b/changelog.d/16431.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16431.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16433.misc b/changelog.d/16433.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16433.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16434.misc b/changelog.d/16434.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16434.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16435.misc b/changelog.d/16435.misc deleted file mode 100644 index e541607161..0000000000 --- a/changelog.d/16435.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused method. diff --git a/changelog.d/16438.misc b/changelog.d/16438.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16438.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16440.bugfix b/changelog.d/16440.bugfix deleted file mode 100644 index 6ce0b1e4af..0000000000 --- a/changelog.d/16440.bugfix +++ /dev/null @@ -1 +0,0 @@ -Properly return inline media when content types have parameters. diff --git a/changelog.d/16441.misc b/changelog.d/16441.misc deleted file mode 100644 index 32264a62b2..0000000000 --- a/changelog.d/16441.misc +++ /dev/null @@ -1 +0,0 @@ -Improve rate limiting logic. diff --git a/changelog.d/16444.misc b/changelog.d/16444.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16444.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16454.misc b/changelog.d/16454.misc deleted file mode 100644 index 1e75dc436f..0000000000 --- a/changelog.d/16454.misc +++ /dev/null @@ -1 +0,0 @@ -Do not block running of CI behind the check for sign-off on PRs. diff --git a/changelog.d/16455.bugfix b/changelog.d/16455.bugfix deleted file mode 100644 index 653a25d3b6..0000000000 --- a/changelog.d/16455.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. diff --git a/changelog.d/16457.bugfix b/changelog.d/16457.bugfix deleted file mode 100644 index b9a95cc510..0000000000 --- a/changelog.d/16457.bugfix +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of purging rooms, particularly encrypted rooms. diff --git a/changelog.d/16461.misc b/changelog.d/16461.misc deleted file mode 100644 index 96d040b742..0000000000 --- a/changelog.d/16461.misc +++ /dev/null @@ -1 +0,0 @@ -Update the release script to remind releaser to check for special release notes. \ No newline at end of file diff --git a/changelog.d/16466.misc b/changelog.d/16466.misc deleted file mode 100644 index 471056bb0f..0000000000 --- a/changelog.d/16466.misc +++ /dev/null @@ -1 +0,0 @@ -Update complement.sh to match new public API shape. diff --git a/changelog.d/16468.misc b/changelog.d/16468.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16468.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16469.misc b/changelog.d/16469.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16469.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16477.doc b/changelog.d/16477.doc deleted file mode 100644 index ef66e5d305..0000000000 --- a/changelog.d/16477.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). diff --git a/changelog.d/16488.misc b/changelog.d/16488.misc deleted file mode 100644 index 9e70e45b22..0000000000 --- a/changelog.d/16488.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up logging on event persister endpoints. diff --git a/changelog.d/16491.misc b/changelog.d/16491.misc deleted file mode 100644 index 70b5771373..0000000000 --- a/changelog.d/16491.misc +++ /dev/null @@ -1 +0,0 @@ -Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. diff --git a/changelog.d/16506.bugfix b/changelog.d/16506.bugfix deleted file mode 100644 index a2c7e82b9e..0000000000 --- a/changelog.d/16506.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.59.0 where servers would be incorrectly marked as available when a request resulted in an error. diff --git a/changelog.d/16507.misc b/changelog.d/16507.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16507.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/debian/changelog b/debian/changelog index 57479ca8e5..979d5facfa 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.95.0~rc1) stable; urgency=medium + + * New synapse release 1.95.0rc1. + + -- Synapse Packaging team Tue, 17 Oct 2023 15:50:17 +0000 + matrix-synapse-py3 (1.94.0) stable; urgency=medium * New Synapse release 1.94.0. diff --git a/pyproject.toml b/pyproject.toml index 0831510890..498b663bae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.94.0" +version = "1.95.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From c81908bcd9535ba02b8e12824d017d512c853655 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Oct 2023 13:07:12 -0400 Subject: [PATCH 064/142] Update the changelog. --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1943f7e2ea..0cabf8a6ec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,11 +3,11 @@ ### Bugfixes - Remove legacy unspecced `knock_state_events` field returned in some responses. ([\#16403](https://github.com/matrix-org/synapse/issues/16403)) -- Fixes possbile `AttributeError` when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404)) +- Fix a bug introduced in Synapse 1.81.0 where an `AttributeError` would be raised when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404)) - Properly return inline media when content types have parameters. ([\#16440](https://github.com/matrix-org/synapse/issues/16440)) - Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. ([\#16455](https://github.com/matrix-org/synapse/issues/16455)) - Improve the performance of purging rooms, particularly encrypted rooms. ([\#16457](https://github.com/matrix-org/synapse/issues/16457)) -- Fix a bug introduced in Synapse 1.59.0 where servers would be incorrectly marked as available when a request resulted in an error. ([\#16506](https://github.com/matrix-org/synapse/issues/16506)) +- Fix a bug introduced in Synapse 1.59.0 where servers could be incorrectly marked as available after an error response was received. ([\#16506](https://github.com/matrix-org/synapse/issues/16506)) ### Improved Documentation From 68d9559fef16df1fec278100026217457da5a7a3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 17 Oct 2023 14:41:10 -0400 Subject: [PATCH 065/142] Test against Python 3.12 release (#16511) --- .ci/scripts/calculate_jobs.py | 4 ++-- changelog.d/16511.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16511.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 7575683ab4..ab1d214727 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -47,7 +47,7 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12.0-rc.2") + for version in ("3.9", "3.10", "3.11", "3.12") ) trial_postgres_tests = [ @@ -62,7 +62,7 @@ trial_postgres_tests = [ if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.11", + "python-version": "3.12", "database": "postgres", "postgres-version": "16", "extras": "all", diff --git a/changelog.d/16511.misc b/changelog.d/16511.misc new file mode 100644 index 0000000000..7b7d9ee5b8 --- /dev/null +++ b/changelog.d/16511.misc @@ -0,0 +1 @@ +Run tests against Python 3.12. From 19033313e6b8d506b700b0f66bffe3bc33a03463 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Oct 2023 11:58:16 +0100 Subject: [PATCH 066/142] Bump urllib3 from 1.26.17 to 1.26.18 (#16516) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d447411b90..a891280277 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3211,13 +3211,13 @@ files = [ [[package]] name = "urllib3" -version = "1.26.17" +version = "1.26.18" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, - {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, ] [package.extras] From 8841db4d27735456086a3c766548e14a9e38496a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 18 Oct 2023 07:19:53 -0400 Subject: [PATCH 067/142] Run trial/integration tests if .ci is modified. (#16512) --- .github/workflows/tests.yml | 6 ++++++ changelog.d/16512.misc | 1 + 2 files changed, 7 insertions(+) create mode 100644 changelog.d/16512.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fcbd40b746..13746608d4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,15 +37,18 @@ jobs: - 'Cargo.toml' - 'Cargo.lock' - '.rustfmt.toml' + - '.github/workflows/tests.yml' trial: - 'synapse/**' - 'tests/**' - 'rust/**' + - '.ci/scripts/calculate_jobs.py' - 'Cargo.toml' - 'Cargo.lock' - 'pyproject.toml' - 'poetry.lock' + - '.github/workflows/tests.yml' integration: - 'synapse/**' @@ -56,7 +59,9 @@ jobs: - 'pyproject.toml' - 'poetry.lock' - 'docker/**' + - '.ci/**' - 'scripts-dev/complement.sh' + - '.github/workflows/tests.yml' linting: - 'synapse/**' @@ -70,6 +75,7 @@ jobs: - 'mypy.ini' - 'pyproject.toml' - 'poetry.lock' + - '.github/workflows/tests.yml' check-sampleconfig: runs-on: ubuntu-latest diff --git a/changelog.d/16512.misc b/changelog.d/16512.misc new file mode 100644 index 0000000000..dcc53510c4 --- /dev/null +++ b/changelog.d/16512.misc @@ -0,0 +1 @@ +Run trial & integration tests in continuous integration when `.ci` directory is modified. From bcff01b40673238dca29c0f22dc4fda05f635030 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 18 Oct 2023 17:42:01 +0200 Subject: [PATCH 068/142] Improve performance of delete device messages query (#16492) --- changelog.d/16492.misc | 1 + synapse/handlers/device.py | 2 ++ synapse/storage/databases/main/deviceinbox.py | 15 ++++++++------- 3 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16492.misc diff --git a/changelog.d/16492.misc b/changelog.d/16492.misc new file mode 100644 index 0000000000..ecb3356bdd --- /dev/null +++ b/changelog.d/16492.misc @@ -0,0 +1 @@ +Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 544bc7c13d..3ce96ef3cb 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -592,6 +592,8 @@ class DeviceHandler(DeviceWorkerHandler): ) # Delete device messages asynchronously and in batches using the task scheduler + # We specify an upper stream id to avoid deleting non delivered messages + # if an user re-uses a device ID. await self._task_scheduler.schedule_task( DELETE_DEVICE_MSGS_TASK_NAME, resource_id=device_id, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 1faa6f04b2..3e7425d4a6 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -478,18 +478,19 @@ class DeviceInboxWorkerStore(SQLBaseStore): log_kv({"message": "No changes in cache since last check"}) return 0 - ROW_ID_NAME = self.database_engine.row_id_name - def delete_messages_for_device_txn(txn: LoggingTransaction) -> int: limit_statement = "" if limit is None else f"LIMIT {limit}" sql = f""" - DELETE FROM device_inbox WHERE {ROW_ID_NAME} IN ( - SELECT {ROW_ID_NAME} FROM device_inbox - WHERE user_id = ? AND device_id = ? AND stream_id <= ? - {limit_statement} + DELETE FROM device_inbox WHERE user_id = ? AND device_id = ? AND stream_id <= ( + SELECT MAX(stream_id) FROM ( + SELECT stream_id FROM device_inbox + WHERE user_id = ? AND device_id = ? AND stream_id <= ? + ORDER BY stream_id + {limit_statement} + ) AS q1 ) """ - txn.execute(sql, (user_id, device_id, up_to_stream_id)) + txn.execute(sql, (user_id, device_id, user_id, device_id, up_to_stream_id)) return txn.rowcount count = await self.db_pool.runInteraction( From 49c9745b4516dec8728c260f1a6784f2c510110c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 18 Oct 2023 12:26:01 -0400 Subject: [PATCH 069/142] Avoid sending massive replication updates when purging a room. (#16510) --- changelog.d/16510.misc | 1 + synapse/replication/tcp/streams/events.py | 45 +++++++++- synapse/storage/databases/main/cache.py | 8 ++ tests/replication/tcp/streams/test_events.py | 89 ++++++++++++++------ 4 files changed, 114 insertions(+), 29 deletions(-) create mode 100644 changelog.d/16510.misc diff --git a/changelog.d/16510.misc b/changelog.d/16510.misc new file mode 100644 index 0000000000..5556b5d74c --- /dev/null +++ b/changelog.d/16510.misc @@ -0,0 +1 @@ +Improve replication performance when purging rooms. diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index ad9b760713..da6d948e1b 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import heapq +from collections import defaultdict from typing import TYPE_CHECKING, Iterable, Optional, Tuple, Type, TypeVar, cast import attr @@ -51,8 +52,19 @@ data part are: * The state_key of the state which has changed * The event id of the new state +A "state-all" row is sent whenever the "current state" in a room changes, but there are +too many state updates for a particular room in the same update. This replaces any +"state" rows on a per-room basis. The fields in the data part are: + +* The room id for the state changes + """ +# Any room with more than _MAX_STATE_UPDATES_PER_ROOM will send a EventsStreamAllStateRow +# instead of individual EventsStreamEventRow. This is predominantly useful when +# purging large rooms. +_MAX_STATE_UPDATES_PER_ROOM = 150 + @attr.s(slots=True, frozen=True, auto_attribs=True) class EventsStreamRow: @@ -111,9 +123,17 @@ class EventsStreamCurrentStateRow(BaseEventsStreamRow): event_id: Optional[str] +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventsStreamAllStateRow(BaseEventsStreamRow): + TypeId = "state-all" + + room_id: str + + _EventRows: Tuple[Type[BaseEventsStreamRow], ...] = ( EventsStreamEventRow, EventsStreamCurrentStateRow, + EventsStreamAllStateRow, ) TypeToRow = {Row.TypeId: Row for Row in _EventRows} @@ -213,9 +233,28 @@ class EventsStream(Stream): if stream_id <= upper_limit ) + # Separate out rooms that have many state updates, listeners should clear + # all state for those rooms. + state_updates_by_room = defaultdict(list) + for stream_id, room_id, _type, _state_key, _event_id in state_rows: + state_updates_by_room[room_id].append(stream_id) + + state_all_rows = [ + (stream_ids[-1], room_id) + for room_id, stream_ids in state_updates_by_room.items() + if len(stream_ids) >= _MAX_STATE_UPDATES_PER_ROOM + ] + state_all_updates: Iterable[Tuple[int, Tuple]] = ( + (max_stream_id, (EventsStreamAllStateRow.TypeId, (room_id,))) + for (max_stream_id, room_id) in state_all_rows + ) + + # Any remaining state updates are sent individually. + state_all_rooms = {room_id for _, room_id in state_all_rows} state_updates: Iterable[Tuple[int, Tuple]] = ( (stream_id, (EventsStreamCurrentStateRow.TypeId, rest)) for (stream_id, *rest) in state_rows + if rest[0] not in state_all_rooms ) ex_outliers_updates: Iterable[Tuple[int, Tuple]] = ( @@ -224,7 +263,11 @@ class EventsStream(Stream): ) # we need to return a sorted list, so merge them together. - updates = list(heapq.merge(event_updates, state_updates, ex_outliers_updates)) + updates = list( + heapq.merge( + event_updates, state_all_updates, state_updates, ex_outliers_updates + ) + ) return updates, upper_limit, limited @classmethod diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2fbd389c71..4d0470ffd9 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,6 +23,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces from synapse.replication.tcp.streams import BackfillStream, CachesStream from synapse.replication.tcp.streams.events import ( EventsStream, + EventsStreamAllStateRow, EventsStreamCurrentStateRow, EventsStreamEventRow, EventsStreamRow, @@ -264,6 +265,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore): (data.state_key,) ) self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined] + elif row.type == EventsStreamAllStateRow.TypeId: + assert isinstance(data, EventsStreamAllStateRow) + # Similar to the above, but the entire caches are invalidated. This is + # unfortunate for the membership caches, but should recover quickly. + self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] + self.get_rooms_for_user_with_stream_ordering.invalidate_all() # type: ignore[attr-defined] + self.get_rooms_for_user.invalidate_all() # type: ignore[attr-defined] else: raise Exception("Unknown events stream row type %s" % (row.type,)) diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 128fc3e046..b8ab4ee54b 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -14,6 +14,8 @@ from typing import Any, List, Optional +from parameterized import parameterized + from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership @@ -21,6 +23,8 @@ from synapse.events import EventBase from synapse.replication.tcp.commands import RdataCommand from synapse.replication.tcp.streams._base import _STREAM_UPDATE_TARGET_ROW_COUNT from synapse.replication.tcp.streams.events import ( + _MAX_STATE_UPDATES_PER_ROOM, + EventsStreamAllStateRow, EventsStreamCurrentStateRow, EventsStreamEventRow, EventsStreamRow, @@ -106,11 +110,21 @@ class EventsStreamTestCase(BaseStreamTestCase): self.assertEqual([], received_rows) - def test_update_function_huge_state_change(self) -> None: + @parameterized.expand( + [(_STREAM_UPDATE_TARGET_ROW_COUNT, False), (_MAX_STATE_UPDATES_PER_ROOM, True)] + ) + def test_update_function_huge_state_change( + self, num_state_changes: int, collapse_state_changes: bool + ) -> None: """Test replication with many state events Ensures that all events are correctly replicated when there are lots of state change rows to be replicated. + + Args: + num_state_changes: The number of state changes to create. + collapse_state_changes: Whether the state changes are expected to be + collapsed or not. """ # we want to generate lots of state changes at a single stream ID. @@ -145,7 +159,7 @@ class EventsStreamTestCase(BaseStreamTestCase): events = [ self._inject_state_event(sender=OTHER_USER) - for _ in range(_STREAM_UPDATE_TARGET_ROW_COUNT) + for _ in range(num_state_changes) ] self.replicate() @@ -202,8 +216,7 @@ class EventsStreamTestCase(BaseStreamTestCase): row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] - # first check the first two rows, which should be state1 - + # first check the first two rows, which should be the state1 event. stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) @@ -217,7 +230,7 @@ class EventsStreamTestCase(BaseStreamTestCase): self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state1.event_id) - # now the last two rows, which should be state2 + # now the last two rows, which should be the state2 event. stream_name, token, row = received_rows.pop(-2) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) @@ -231,34 +244,54 @@ class EventsStreamTestCase(BaseStreamTestCase): self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state2.event_id) - # that should leave us with the rows for the PL event - self.assertEqual(len(received_rows), len(events) + 2) + # Based on the number of + if collapse_state_changes: + # that should leave us with the rows for the PL event, the state changes + # get collapsed into a single row. + self.assertEqual(len(received_rows), 2) - stream_name, token, row = received_rows.pop(0) - self.assertEqual("events", stream_name) - self.assertIsInstance(row, EventsStreamRow) - self.assertEqual(row.type, "ev") - self.assertIsInstance(row.data, EventsStreamEventRow) - self.assertEqual(row.data.event_id, pl_event.event_id) - - # the state rows are unsorted - state_rows: List[EventsStreamCurrentStateRow] = [] - for stream_name, _, row in received_rows: + stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) - self.assertEqual(row.type, "state") - self.assertIsInstance(row.data, EventsStreamCurrentStateRow) - state_rows.append(row.data) + self.assertEqual(row.type, "ev") + self.assertIsInstance(row.data, EventsStreamEventRow) + self.assertEqual(row.data.event_id, pl_event.event_id) - state_rows.sort(key=lambda r: r.state_key) + stream_name, token, row = received_rows.pop(0) + self.assertIsInstance(row, EventsStreamRow) + self.assertEqual(row.type, "state-all") + self.assertIsInstance(row.data, EventsStreamAllStateRow) + self.assertEqual(row.data.room_id, state2.room_id) - sr = state_rows.pop(0) - self.assertEqual(sr.type, EventTypes.PowerLevels) - self.assertEqual(sr.event_id, pl_event.event_id) - for sr in state_rows: - self.assertEqual(sr.type, "test_state_event") - # "None" indicates the state has been deleted - self.assertIsNone(sr.event_id) + else: + # that should leave us with the rows for the PL event + self.assertEqual(len(received_rows), len(events) + 2) + + stream_name, token, row = received_rows.pop(0) + self.assertEqual("events", stream_name) + self.assertIsInstance(row, EventsStreamRow) + self.assertEqual(row.type, "ev") + self.assertIsInstance(row.data, EventsStreamEventRow) + self.assertEqual(row.data.event_id, pl_event.event_id) + + # the state rows are unsorted + state_rows: List[EventsStreamCurrentStateRow] = [] + for stream_name, _, row in received_rows: + self.assertEqual("events", stream_name) + self.assertIsInstance(row, EventsStreamRow) + self.assertEqual(row.type, "state") + self.assertIsInstance(row.data, EventsStreamCurrentStateRow) + state_rows.append(row.data) + + state_rows.sort(key=lambda r: r.state_key) + + sr = state_rows.pop(0) + self.assertEqual(sr.type, EventTypes.PowerLevels) + self.assertEqual(sr.event_id, pl_event.event_id) + for sr in state_rows: + self.assertEqual(sr.type, "test_state_event") + # "None" indicates the state has been deleted + self.assertIsNone(sr.event_id) def test_update_function_state_row_limit(self) -> None: """Test replication with many state events over several stream ids.""" From e9069c9f919685606506f04527332e83fbfa44d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Oct 2023 15:04:18 +0100 Subject: [PATCH 070/142] Mark sync as limited if there is a gap in the timeline (#16485) This splits thinsg into two queries, but most of the time we won't have new event backwards extremities so this shouldn't actually add an extra RTT for the majority of cases. Note this removes the check for events with no prev events, but that was part of MSC2716 work that has since been removed. --- changelog.d/16485.bugfix | 1 + synapse/handlers/sync.py | 52 +++++++++++-- synapse/storage/databases/main/events.py | 74 ++++++++++++------- synapse/storage/databases/main/stream.py | 47 ++++++++++++ .../storage/schema/main/delta/82/05gaps.sql | 25 +++++++ 5 files changed, 166 insertions(+), 33 deletions(-) create mode 100644 changelog.d/16485.bugfix create mode 100644 synapse/storage/schema/main/delta/82/05gaps.sql diff --git a/changelog.d/16485.bugfix b/changelog.d/16485.bugfix new file mode 100644 index 0000000000..3cd7e1877f --- /dev/null +++ b/changelog.d/16485.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 60b4d95cd7..f131c0e8e0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -500,12 +500,27 @@ class SyncHandler: async def _load_filtered_recents( self, room_id: str, + sync_result_builder: "SyncResultBuilder", sync_config: SyncConfig, - now_token: StreamToken, + upto_token: StreamToken, since_token: Optional[StreamToken] = None, potential_recents: Optional[List[EventBase]] = None, newly_joined_room: bool = False, ) -> TimelineBatch: + """Create a timeline batch for the room + + Args: + room_id + sync_result_builder + sync_config + upto_token: The token up to which we should fetch (more) events. + If `potential_results` is non-empty then this is *start* of + the the list. + since_token + potential_recents: If non-empty, the events between the since token + and current token to send down to clients. + newly_joined_room + """ with Measure(self.clock, "load_filtered_recents"): timeline_limit = sync_config.filter_collection.timeline_limit() block_all_timeline = ( @@ -521,6 +536,20 @@ class SyncHandler: else: limited = False + # Check if there is a gap, if so we need to mark this as limited and + # recalculate which events to send down. + gap_token = await self.store.get_timeline_gaps( + room_id, + since_token.room_key if since_token else None, + sync_result_builder.now_token.room_key, + ) + if gap_token: + # There's a gap, so we need to ignore the passed in + # `potential_recents`, and reset `upto_token` to match. + potential_recents = None + upto_token = sync_result_builder.now_token + limited = True + log_kv({"limited": limited}) if potential_recents: @@ -559,10 +588,10 @@ class SyncHandler: recents = [] if not limited or block_all_timeline: - prev_batch_token = now_token + prev_batch_token = upto_token if recents: room_key = recents[0].internal_metadata.before - prev_batch_token = now_token.copy_and_replace( + prev_batch_token = upto_token.copy_and_replace( StreamKeyType.ROOM, room_key ) @@ -573,11 +602,15 @@ class SyncHandler: filtering_factor = 2 load_limit = max(timeline_limit * filtering_factor, 10) max_repeat = 5 # Only try a few times per room, otherwise - room_key = now_token.room_key + room_key = upto_token.room_key end_key = room_key since_key = None - if since_token and not newly_joined_room: + if since_token and gap_token: + # If there is a gap then we need to only include events after + # it. + since_key = gap_token + elif since_token and not newly_joined_room: since_key = since_token.room_key while limited and len(recents) < timeline_limit and max_repeat: @@ -647,7 +680,7 @@ class SyncHandler: recents = recents[-timeline_limit:] room_key = recents[0].internal_metadata.before - prev_batch_token = now_token.copy_and_replace(StreamKeyType.ROOM, room_key) + prev_batch_token = upto_token.copy_and_replace(StreamKeyType.ROOM, room_key) # Don't bother to bundle aggregations if the timeline is unlimited, # as clients will have all the necessary information. @@ -662,7 +695,9 @@ class SyncHandler: return TimelineBatch( events=recents, prev_batch=prev_batch_token, - limited=limited or newly_joined_room, + # Also mark as limited if this is a new room or there has been a gap + # (to force client to paginate the gap). + limited=limited or newly_joined_room or gap_token is not None, bundled_aggregations=bundled_aggregations, ) @@ -2397,8 +2432,9 @@ class SyncHandler: batch = await self._load_filtered_recents( room_id, + sync_result_builder, sync_config, - now_token=upto_token, + upto_token=upto_token, since_token=since_token, potential_recents=events, newly_joined_room=newly_joined, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ef6766b5e0..3c1492e3ad 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2267,35 +2267,59 @@ class PersistEventsStore: Forward extremities are handled when we first start persisting the events. """ - # From the events passed in, add all of the prev events as backwards extremities. - # Ignore any events that are already backwards extrems or outliers. - query = ( - "INSERT INTO event_backward_extremities (event_id, room_id)" - " SELECT ?, ? WHERE NOT EXISTS (" - " SELECT 1 FROM event_backward_extremities" - " WHERE event_id = ? AND room_id = ?" - " )" - # 1. Don't add an event as a extremity again if we already persisted it - # as a non-outlier. - # 2. Don't add an outlier as an extremity if it has no prev_events - " AND NOT EXISTS (" - " SELECT 1 FROM events" - " LEFT JOIN event_edges edge" - " ON edge.event_id = events.event_id" - " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = FALSE OR edge.event_id IS NULL)" - " )" + + room_id = events[0].room_id + + potential_backwards_extremities = { + e_id + for ev in events + for e_id in ev.prev_event_ids() + if not ev.internal_metadata.is_outlier() + } + + if not potential_backwards_extremities: + return + + existing_events_outliers = self.db_pool.simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=potential_backwards_extremities, + keyvalues={"outlier": False}, + retcols=("event_id",), ) - txn.execute_batch( - query, - [ - (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id) - for ev in events - for e_id in ev.prev_event_ids() - if not ev.internal_metadata.is_outlier() - ], + potential_backwards_extremities.difference_update( + e for e, in existing_events_outliers ) + if potential_backwards_extremities: + self.db_pool.simple_upsert_many_txn( + txn, + table="event_backward_extremities", + key_names=("room_id", "event_id"), + key_values=[(room_id, ev) for ev in potential_backwards_extremities], + value_names=(), + value_values=(), + ) + + # Record the stream orderings where we have new gaps. + gap_events = [ + (room_id, self._instance_name, ev.internal_metadata.stream_ordering) + for ev in events + if any( + e_id in potential_backwards_extremities + for e_id in ev.prev_event_ids() + ) + ] + + self.db_pool.simple_insert_many_txn( + txn, + table="timeline_gaps", + keys=("room_id", "instance_name", "stream_ordering"), + values=gap_events, + ) + # Delete all these events that we've already fetched and now know that their # prev events are the new backwards extremeties. query = ( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index ea06e4eee0..872df6bda1 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1616,3 +1616,50 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): retcol="instance_name", desc="get_name_from_instance_id", ) + + async def get_timeline_gaps( + self, + room_id: str, + from_token: Optional[RoomStreamToken], + to_token: RoomStreamToken, + ) -> Optional[RoomStreamToken]: + """Check if there is a gap, and return a token that marks the position + of the gap in the stream. + """ + + sql = """ + SELECT instance_name, stream_ordering + FROM timeline_gaps + WHERE room_id = ? AND ? < stream_ordering AND stream_ordering <= ? + ORDER BY stream_ordering + """ + + rows = await self.db_pool.execute( + "get_timeline_gaps", + None, + sql, + room_id, + from_token.stream if from_token else 0, + to_token.get_max_stream_pos(), + ) + + if not rows: + return None + + positions = [ + PersistedEventPosition(instance_name, stream_ordering) + for instance_name, stream_ordering in rows + ] + if from_token: + positions = [p for p in positions if p.persisted_after(from_token)] + + positions = [p for p in positions if not p.persisted_after(to_token)] + + if positions: + # We return a stream token that ensures the event *at* the position + # of the gap is included (as the gap is *before* the persisted + # event). + last_position = positions[-1] + return RoomStreamToken(stream=last_position.stream - 1) + + return None diff --git a/synapse/storage/schema/main/delta/82/05gaps.sql b/synapse/storage/schema/main/delta/82/05gaps.sql new file mode 100644 index 0000000000..6813b488ca --- /dev/null +++ b/synapse/storage/schema/main/delta/82/05gaps.sql @@ -0,0 +1,25 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Records when we see a "gap in the timeline", due to missing events over +-- federation. We record this so that we can tell clients there is a gap (by +-- marking the timeline section of a sync request as limited). +CREATE TABLE IF NOT EXISTS timeline_gaps ( + room_id TEXT NOT NULL, + instance_name TEXT NOT NULL, + stream_ordering BIGINT NOT NULL +); + +CREATE INDEX timeline_gaps_room_id ON timeline_gaps(room_id, stream_ordering); From 345c61f6329ed219cde4062db925ee6d09f3ec8d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 19 Oct 2023 21:53:05 +0100 Subject: [PATCH 071/142] Build Debian packages for Ubuntu 23.10 Mantic Minotaur (#16524) --- changelog.d/16524.misc | 1 + scripts-dev/build_debian_packages.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/16524.misc diff --git a/changelog.d/16524.misc b/changelog.d/16524.misc new file mode 100644 index 0000000000..3f8ac1bce7 --- /dev/null +++ b/changelog.d/16524.misc @@ -0,0 +1 @@ +Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index b192faba14..de2a134544 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -33,6 +33,7 @@ DISTS = ( "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24) "debian:trixie", # (EOL not specified yet) ) From d2eab22de7543b62d5ac8d334160697ede37fabb Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 20 Oct 2023 11:40:26 -0400 Subject: [PATCH 072/142] Clarify presence router docs. (#16529) --- changelog.d/16529.doc | 1 + docs/modules/presence_router_callbacks.md | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/16529.doc diff --git a/changelog.d/16529.doc b/changelog.d/16529.doc new file mode 100644 index 0000000000..0f8a87f293 --- /dev/null +++ b/changelog.d/16529.doc @@ -0,0 +1 @@ +Improve documentation of presence router. diff --git a/docs/modules/presence_router_callbacks.md b/docs/modules/presence_router_callbacks.md index d3da25cef4..b210f0e3cd 100644 --- a/docs/modules/presence_router_callbacks.md +++ b/docs/modules/presence_router_callbacks.md @@ -1,8 +1,16 @@ # Presence router callbacks -Presence router callbacks allow module developers to specify additional users (local or remote) -to receive certain presence updates from local users. Presence router callbacks can be -registered using the module API's `register_presence_router_callbacks` method. +Presence router callbacks allow module developers to define additional users +which receive presence updates from local users. The additional users +can be local or remote. + +For example, it could be used to direct all of `@alice:example.com` (a local user)'s +presence updates to `@bob:matrix.org` (a remote user), even though they don't share a +room. (Note that those presence updates might not make it to `@bob:matrix.org`'s client +unless a similar presence router is running on that homeserver.) + +Presence router callbacks can be registered using the module API's +`register_presence_router_callbacks` method. ## Callbacks From 9171bf3b35a044afd373ff67e0669b587efdf665 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 09:45:12 +0100 Subject: [PATCH 073/142] Bump pygithub from 1.59.1 to 2.1.1 (#16535) Bumps [pygithub](https://github.com/pygithub/pygithub) from 1.59.1 to 2.1.1. - [Release notes](https://github.com/pygithub/pygithub/releases) - [Changelog](https://github.com/PyGithub/PyGithub/blob/main/doc/changes.rst) - [Commits](https://github.com/pygithub/pygithub/compare/v1.59.1...v2.1.1) --- updated-dependencies: - dependency-name: pygithub dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index a891280277..47fce1743b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1980,20 +1980,23 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" -version = "1.59.1" +version = "2.1.1" description = "Use the full Github API v3" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.59.1-py3-none-any.whl", hash = "sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9"}, - {file = "PyGithub-1.59.1.tar.gz", hash = "sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217"}, + {file = "PyGithub-2.1.1-py3-none-any.whl", hash = "sha256:4b528d5d6f35e991ea5fd3f942f58748f24938805cb7fcf24486546637917337"}, + {file = "PyGithub-2.1.1.tar.gz", hash = "sha256:ecf12c2809c44147bce63b047b3d2e9dac8a41b63e90fcb263c703f64936b97c"}, ] [package.dependencies] -deprecated = "*" +Deprecated = "*" pyjwt = {version = ">=2.4.0", extras = ["crypto"]} pynacl = ">=1.4.0" +python-dateutil = "*" requests = ">=2.14.0" +typing-extensions = ">=4.0.0" +urllib3 = ">=1.26.0" [[package]] name = "pygments" @@ -2137,7 +2140,7 @@ s2repoze = ["paste", "repoze.who", "zope.interface"] name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, From 2d12163cb4a007c34568e6ffc8bc532f48b07cce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 09:46:55 +0100 Subject: [PATCH 074/142] Bump types-pillow from 10.0.0.3 to 10.1.0.0 (#16536) Bumps [types-pillow](https://github.com/python/typeshed) from 10.0.0.3 to 10.1.0.0. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 47fce1743b..2f4e476401 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3109,13 +3109,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.0.0.3" +version = "10.1.0.0" description = "Typing stubs for Pillow" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-Pillow-10.0.0.3.tar.gz", hash = "sha256:ae0c877d363da349bbb82c5463c9e78037290cc07d3714cb0ceaf5d2f7f5c825"}, - {file = "types_Pillow-10.0.0.3-py3-none-any.whl", hash = "sha256:54a49f3c6a3f5e95ebeee396d7773dde22ce2515d594f9c0596c0a983558f0d4"}, + {file = "types-Pillow-10.1.0.0.tar.gz", hash = "sha256:0f5e7cf010ed226800cb5821e87781e5d0e81257d948a9459baa74a8c8b7d822"}, + {file = "types_Pillow-10.1.0.0-py3-none-any.whl", hash = "sha256:f97f596b6a39ddfd26da3eb67421062193e10732d2310f33898d36f9694331b5"}, ] [[package]] From a8026209d2d1a72f3b1dea7c075a1d361bd47d58 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:24:46 +0100 Subject: [PATCH 075/142] Bump gitpython from 3.1.37 to 3.1.40 (#16534) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.37 to 3.1.40. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.37...3.1.40) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2f4e476401..d1236298d0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -600,20 +600,20 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.37" +version = "3.1.40" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.37-py3-none-any.whl", hash = "sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33"}, - {file = "GitPython-3.1.37.tar.gz", hash = "sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"}, + {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, + {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] -test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] [[package]] name = "hiredis" From 786b614fb2793c007f0f531b9bbacba0cb45e7f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:25:02 +0100 Subject: [PATCH 076/142] Bump types-requests from 2.31.0.2 to 2.31.0.10 (#16537) Bumps [types-requests](https://github.com/python/typeshed) from 2.31.0.2 to 2.31.0.10. - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index d1236298d0..82124778a1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3156,17 +3156,17 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.2" +version = "2.31.0.10" description = "Typing stubs for requests" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"}, - {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"}, + {file = "types-requests-2.31.0.10.tar.gz", hash = "sha256:dc5852a76f1eaf60eafa81a2e50aefa3d1f015c34cf0cba130930866b1b22a92"}, + {file = "types_requests-2.31.0.10-py3-none-any.whl", hash = "sha256:b32b9a86beffa876c0c3ac99a4cd3b8b51e973fb8e3bd4e0a6bb32c7efad80fc"}, ] [package.dependencies] -types-urllib3 = "*" +urllib3 = ">=2" [[package]] name = "types-setuptools" @@ -3179,17 +3179,6 @@ files = [ {file = "types_setuptools-68.2.0.0-py3-none-any.whl", hash = "sha256:77edcc843e53f8fc83bb1a840684841f3dc804ec94562623bfa2ea70d5a2ba1b"}, ] -[[package]] -name = "types-urllib3" -version = "1.26.25.8" -description = "Typing stubs for urllib3" -optional = false -python-versions = "*" -files = [ - {file = "types-urllib3-1.26.25.8.tar.gz", hash = "sha256:ecf43c42d8ee439d732a1110b4901e9017a79a38daca26f08e42c8460069392c"}, - {file = "types_urllib3-1.26.25.8-py3-none-any.whl", hash = "sha256:95ea847fbf0bf675f50c8ae19a665baedcf07e6b4641662c4c3c72e7b2edf1a9"}, -] - [[package]] name = "typing-extensions" version = "4.8.0" @@ -3214,19 +3203,20 @@ files = [ [[package]] name = "urllib3" -version = "1.26.18" +version = "2.0.7" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7" files = [ - {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, - {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, ] [package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "webencodings" From f835ab8de573fa0ffef446972474de51f9cf0729 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:25:14 +0100 Subject: [PATCH 077/142] Bump black from 23.9.1 to 23.10.0 (#16538) Bumps [black](https://github.com/psf/black) from 23.9.1 to 23.10.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/23.9.1...23.10.0) --- updated-dependencies: - dependency-name: black dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/poetry.lock b/poetry.lock index 82124778a1..8585d9855d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -162,33 +162,29 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.9.1" +version = "23.10.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.9.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301"}, - {file = "black-23.9.1-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100"}, - {file = "black-23.9.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71"}, - {file = "black-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7"}, - {file = "black-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe"}, - {file = "black-23.9.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186"}, - {file = "black-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f"}, - {file = "black-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855"}, - {file = "black-23.9.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204"}, - {file = "black-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377"}, - {file = "black-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325"}, - {file = "black-23.9.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393"}, - {file = "black-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9"}, - {file = "black-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f"}, - {file = "black-23.9.1-py3-none-any.whl", hash = "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9"}, - {file = "black-23.9.1.tar.gz", hash = "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d"}, + {file = "black-23.10.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:f8dc7d50d94063cdfd13c82368afd8588bac4ce360e4224ac399e769d6704e98"}, + {file = "black-23.10.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:f20ff03f3fdd2fd4460b4f631663813e57dc277e37fb216463f3b907aa5a9bdd"}, + {file = "black-23.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3d9129ce05b0829730323bdcb00f928a448a124af5acf90aa94d9aba6969604"}, + {file = "black-23.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:960c21555be135c4b37b7018d63d6248bdae8514e5c55b71e994ad37407f45b8"}, + {file = "black-23.10.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:30b78ac9b54cf87bcb9910ee3d499d2bc893afd52495066c49d9ee6b21eee06e"}, + {file = "black-23.10.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:0e232f24a337fed7a82c1185ae46c56c4a6167fb0fe37411b43e876892c76699"}, + {file = "black-23.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31946ec6f9c54ed7ba431c38bc81d758970dd734b96b8e8c2b17a367d7908171"}, + {file = "black-23.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:c870bee76ad5f7a5ea7bd01dc646028d05568d33b0b09b7ecfc8ec0da3f3f39c"}, + {file = "black-23.10.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:6901631b937acbee93c75537e74f69463adaf34379a04eef32425b88aca88a23"}, + {file = "black-23.10.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:481167c60cd3e6b1cb8ef2aac0f76165843a374346aeeaa9d86765fe0dd0318b"}, + {file = "black-23.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74892b4b836e5162aa0452393112a574dac85e13902c57dfbaaf388e4eda37c"}, + {file = "black-23.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:47c4510f70ec2e8f9135ba490811c071419c115e46f143e4dce2ac45afdcf4c9"}, + {file = "black-23.10.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:76baba9281e5e5b230c9b7f83a96daf67a95e919c2dfc240d9e6295eab7b9204"}, + {file = "black-23.10.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:a3c2ddb35f71976a4cfeca558848c2f2f89abc86b06e8dd89b5a65c1e6c0f22a"}, + {file = "black-23.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db451a3363b1e765c172c3fd86213a4ce63fb8524c938ebd82919bf2a6e28c6a"}, + {file = "black-23.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:7fb5fc36bb65160df21498d5a3dd330af8b6401be3f25af60c6ebfe23753f747"}, + {file = "black-23.10.0-py3-none-any.whl", hash = "sha256:e223b731a0e025f8ef427dd79d8cd69c167da807f5710add30cdf131f13dd62e"}, + {file = "black-23.10.0.tar.gz", hash = "sha256:31b9f87b277a68d0e99d2905edae08807c007973eaa609da5f0c62def6b7c0bd"}, ] [package.dependencies] From 478a6c65eb049862a6d9a3d1f28da4b058e63757 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 23 Oct 2023 12:28:29 +0100 Subject: [PATCH 078/142] Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0 (#16539) --- changelog.d/16539.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16539.misc diff --git a/changelog.d/16539.misc b/changelog.d/16539.misc new file mode 100644 index 0000000000..cd21bdb26d --- /dev/null +++ b/changelog.d/16539.misc @@ -0,0 +1 @@ +Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. diff --git a/poetry.lock b/poetry.lock index 8585d9855d..67620f8efa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -1337,13 +1337,13 @@ test = ["aiounittest", "tox", "twisted"] [[package]] name = "matrix-synapse-ldap3" -version = "0.2.2" +version = "0.3.0" description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" files = [ - {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"}, - {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, + {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, + {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, ] [package.dependencies] From 12ca87f5eac06450abaf024e5f4906147d5322e3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 23 Oct 2023 07:37:45 -0400 Subject: [PATCH 079/142] Remove the last reference to event_txn_id. (#16521) This table was no longer used, except for a background process which purged old entries in it. --- changelog.d/16521.misc | 1 + synapse/storage/databases/main/events_worker.py | 6 ------ synapse/storage/schema/__init__.py | 5 ++++- 3 files changed, 5 insertions(+), 7 deletions(-) create mode 100644 changelog.d/16521.misc diff --git a/changelog.d/16521.misc b/changelog.d/16521.misc new file mode 100644 index 0000000000..c6a8ddcf9c --- /dev/null +++ b/changelog.d/16521.misc @@ -0,0 +1 @@ +Stop deleting from an unused table. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 8af638d60f..5bf864c1fb 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -2095,12 +2095,6 @@ class EventsWorkerStore(SQLBaseStore): def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None: one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000 - sql = """ - DELETE FROM event_txn_id - WHERE inserted_ts < ? - """ - txn.execute(sql, (one_day_ago,)) - sql = """ DELETE FROM event_txn_id_device_id WHERE inserted_ts < ? diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5b50bd66bc..158b528dce 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 82 # remember to update the list below when updating +SCHEMA_VERSION = 83 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -121,6 +121,9 @@ Changes in SCHEMA_VERSION = 81 Changes in SCHEMA_VERSION = 82 - The insertion_events, insertion_event_extremities, insertion_event_edges, and batch_events tables are no longer purged in preparation for their removal. + +Changes in SCHEMA_VERSION = 83 + - The event_txn_id is no longer used. """ From 3a0aa6fe76c43b09a0e13785894df2a285396c10 Mon Sep 17 00:00:00 2001 From: Denis Kasak Date: Mon, 23 Oct 2023 11:38:51 +0000 Subject: [PATCH 080/142] Force TLS certificate verification in registration script. (#16530) If using the script remotely, there's no particularly convincing reason to disable certificate verification, as this makes the connection interceptible. If on the other hand, the script is used locally (the most common use case), you can simply target the HTTP listener and avoid TLS altogether. This is what the script already attempts to do if passed a homeserver configuration YAML file. --- changelog.d/16530.bugfix | 1 + synapse/_scripts/register_new_matrix_user.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16530.bugfix diff --git a/changelog.d/16530.bugfix b/changelog.d/16530.bugfix new file mode 100644 index 0000000000..503ea0af20 --- /dev/null +++ b/changelog.d/16530.bugfix @@ -0,0 +1 @@ +Force TLS certificate verification in user registration script. diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 19ca399d44..9293808640 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -50,7 +50,7 @@ def request_registration( url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),) # Get the nonce - r = requests.get(url, verify=False) + r = requests.get(url) if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) @@ -88,7 +88,7 @@ def request_registration( } _print("Sending registration request...") - r = requests.post(url, json=data, verify=False) + r = requests.post(url, json=data) if r.status_code != 200: _print("ERROR! Received %d %s" % (r.status_code, r.reason)) From 3bcb6a059f4cd1014b6a90550e009860089a13cc Mon Sep 17 00:00:00 2001 From: Marcel Date: Mon, 23 Oct 2023 13:55:36 +0200 Subject: [PATCH 081/142] Mention how to redirect the Jaeger traces to a specific Jaeger instance (#16531) --- changelog.d/16531.doc | 1 + docs/opentracing.md | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/16531.doc diff --git a/changelog.d/16531.doc b/changelog.d/16531.doc new file mode 100644 index 0000000000..0932d1abf1 --- /dev/null +++ b/changelog.d/16531.doc @@ -0,0 +1 @@ +Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. diff --git a/docs/opentracing.md b/docs/opentracing.md index abb94b565f..bf48874160 100644 --- a/docs/opentracing.md +++ b/docs/opentracing.md @@ -51,6 +51,11 @@ docker run -d --name jaeger \ jaegertracing/all-in-one:1 ``` +By default, Synapse will publish traces to Jaeger on localhost. +If Jaeger is hosted elsewhere, point Synapse to the correct host by setting +`opentracing.jaeger_config.local_agent.reporting_host` [in the Synapse configuration](usage/configuration/config_documentation.md#opentracing-1) +or by setting the `JAEGER_AGENT_HOST` environment variable to the desired address. + Latest documentation is probably at https://www.jaegertracing.io/docs/latest/getting-started. From 3bc23cc45cb6a70d53ba4032a9116029bc4f538c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Oct 2023 14:39:25 +0100 Subject: [PATCH 082/142] Fix bug that could cause a `/sync` to tightloop with sqlite after restart (#16540) This could happen if the last rows in the account data stream were inserted into `account_data`. After a restart the max account ID would be calculated without looking at the `account_data` table, and so have an old ID. --- changelog.d/16540.bugfix | 1 + synapse/storage/databases/main/account_data.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16540.bugfix diff --git a/changelog.d/16540.bugfix b/changelog.d/16540.bugfix new file mode 100644 index 0000000000..34ee9facf9 --- /dev/null +++ b/changelog.d/16540.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 39498d52c6..84ef8136c2 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -94,7 +94,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) hs.get_replication_notifier(), "room_account_data", "stream_id", - extra_tables=[("room_tags_revisions", "stream_id")], + extra_tables=[ + ("account_data", "stream_id"), + ("room_tags_revisions", "stream_id"), + ], is_writer=self._instance_name in hs.config.worker.writers.account_data, ) From 8f35f8148e1a7ce3ac249e2d2052854409f2c0d6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Oct 2023 16:57:30 +0100 Subject: [PATCH 083/142] Fix bug where a new writer advances their token too quickly (#16473) * Fix bug where a new writer advances their token too quickly When starting a new writer (for e.g. persisting events), the `MultiWriterIdGenerator` doesn't have a minimum token for it as there are no rows matching that new writer in the DB. This results in the the first stream ID it acquired being announced as persisted *before* it actually finishes persisting, if another writer gets and persists a subsequent stream ID. This is due to the logic of setting the minimum persisted position to the minimum known position of across all writers, and the new writer starts off not being considered. * Fix sending out POSITIONs when our token advances without update Broke in #14820 * For replication HTTP requests, only wait for minimal position --- changelog.d/16473.bugfix | 1 + .../synapse_architecture/streams.md | 13 +- synapse/replication/http/_base.py | 2 +- synapse/replication/tcp/streams/_base.py | 129 +++++++++++------ synapse/replication/tcp/streams/events.py | 8 +- synapse/replication/tcp/streams/federation.py | 15 +- .../replication/tcp/streams/partial_state.py | 10 +- synapse/storage/util/id_generators.py | 68 ++++++++- tests/storage/test_id_generators.py | 136 ++++++++++++++++-- 9 files changed, 305 insertions(+), 77 deletions(-) create mode 100644 changelog.d/16473.bugfix diff --git a/changelog.d/16473.bugfix b/changelog.d/16473.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16473.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/docs/development/synapse_architecture/streams.md b/docs/development/synapse_architecture/streams.md index bee0b8a8c0..67d92acfa1 100644 --- a/docs/development/synapse_architecture/streams.md +++ b/docs/development/synapse_architecture/streams.md @@ -51,17 +51,24 @@ will be inserted with that ID. For any given stream reader (including writers themselves), we may define a per-writer current stream ID: -> The current stream ID _for a writer W_ is the largest stream ID such that +> A current stream ID _for a writer W_ is the largest stream ID such that > all transactions added by W with equal or smaller ID have completed. Similarly, there is a "linear" notion of current stream ID: -> The "linear" current stream ID is the largest stream ID such that +> A "linear" current stream ID is the largest stream ID such that > all facts (added by any writer) with equal or smaller ID have completed. Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs. Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates. +The above definition does not give a unique current stream ID, in fact there can +be a range of current stream IDs. Synapse uses both the minimum and maximum IDs +for different purposes. Most often the maximum is used, as its generally +beneficial for workers to advance their IDs as soon as possible. However, the +minimum is used in situations where e.g. another worker is going to wait until +the stream advances past a position. + **NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID. For single-writer streams, the per-writer current ID and the linear current ID are the same. @@ -114,7 +121,7 @@ Writers need to track: - track their current position (i.e. its own per-writer stream ID). - their facts currently awaiting completion. -At startup, +At startup, - the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and - there are no facts awaiting completion. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 63cf24a14d..7476839db5 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -238,7 +238,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): data[_STREAM_POSITION_KEY] = { "streams": { - stream.NAME: stream.current_token(local_instance_name) + stream.NAME: stream.minimal_local_current_token() for stream in streams }, "instance_name": local_instance_name, diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index c6088a0f99..5c4d228f3d 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -33,6 +33,7 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.util.id_generators import AbstractStreamIdGenerator logger = logging.getLogger(__name__) @@ -107,22 +108,10 @@ class Stream: def __init__( self, local_instance_name: str, - current_token_function: Callable[[str], Token], update_function: UpdateFunction, ): """Instantiate a Stream - `current_token_function` and `update_function` are callbacks which - should be implemented by subclasses. - - `current_token_function` takes an instance name, which is a writer to - the stream, and returns the position in the stream of the writer (as - viewed from the current process). On the writer process this is where - the writer has successfully written up to, whereas on other processes - this is the position which we have received updates up to over - replication. (Note that most streams have a single writer and so their - implementations ignore the instance name passed in). - `update_function` is called to get updates for this stream between a pair of stream tokens. See the `UpdateFunction` type definition for more info. @@ -133,12 +122,28 @@ class Stream: update_function: callback go get stream updates, as above """ self.local_instance_name = local_instance_name - self.current_token = current_token_function self.update_function = update_function # The token from which we last asked for updates self.last_token = self.current_token(self.local_instance_name) + def current_token(self, instance_name: str) -> Token: + """This takes an instance name, which is a writer to + the stream, and returns the position in the stream of the writer (as + viewed from the current process). + """ + # We can't make this an abstract class as it makes mypy unhappy. + raise NotImplementedError() + + def minimal_local_current_token(self) -> Token: + """Tries to return a minimal current token for the local instance, + i.e. for writers this would be the last successful write. + + If local instance is not a writer (or has written yet) then falls back + to returning the normal "current token". + """ + raise NotImplementedError() + def discard_updates_and_advance(self) -> None: """Called when the stream should advance but the updates would be discarded, e.g. when there are no currently connected workers. @@ -190,6 +195,25 @@ class Stream: return updates, upto_token, limited +class _StreamFromIdGen(Stream): + """Helper class for simple streams that use a stream ID generator""" + + def __init__( + self, + local_instance_name: str, + update_function: UpdateFunction, + stream_id_gen: "AbstractStreamIdGenerator", + ): + self._stream_id_gen = stream_id_gen + super().__init__(local_instance_name, update_function) + + def current_token(self, instance_name: str) -> Token: + return self._stream_id_gen.get_current_token_for_writer(instance_name) + + def minimal_local_current_token(self) -> Token: + return self._stream_id_gen.get_minimal_local_current_token() + + def current_token_without_instance( current_token: Callable[[], int] ) -> Callable[[str], int]: @@ -242,17 +266,21 @@ class BackfillStream(Stream): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - self._current_token, self.store.get_all_new_backfill_event_rows, ) - def _current_token(self, instance_name: str) -> int: + def current_token(self, instance_name: str) -> Token: # The backfill stream over replication operates on *positive* numbers, # which means we need to negate it. return -self.store._backfill_id_gen.get_current_token_for_writer(instance_name) + def minimal_local_current_token(self) -> Token: + # The backfill stream over replication operates on *positive* numbers, + # which means we need to negate it. + return -self.store._backfill_id_gen.get_minimal_local_current_token() -class PresenceStream(Stream): + +class PresenceStream(_StreamFromIdGen): @attr.s(slots=True, frozen=True, auto_attribs=True) class PresenceStreamRow: user_id: str @@ -283,9 +311,7 @@ class PresenceStream(Stream): update_function = make_http_update_function(hs, self.NAME) super().__init__( - hs.get_instance_name(), - current_token_without_instance(store.get_current_presence_token), - update_function, + hs.get_instance_name(), update_function, store._presence_id_gen ) @@ -305,13 +331,18 @@ class PresenceFederationStream(Stream): ROW_TYPE = PresenceFederationStreamRow def __init__(self, hs: "HomeServer"): - federation_queue = hs.get_presence_handler().get_federation_queue() + self._federation_queue = hs.get_presence_handler().get_federation_queue() super().__init__( hs.get_instance_name(), - federation_queue.get_current_token, - federation_queue.get_replication_rows, + self._federation_queue.get_replication_rows, ) + def current_token(self, instance_name: str) -> Token: + return self._federation_queue.get_current_token(instance_name) + + def minimal_local_current_token(self) -> Token: + return self._federation_queue.get_current_token(self.local_instance_name) + class TypingStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -341,20 +372,25 @@ class TypingStream(Stream): update_function: Callable[ [str, int, int, int], Awaitable[Tuple[List[Tuple[int, Any]], int, bool]] ] = typing_writer_handler.get_all_typing_updates - current_token_function = typing_writer_handler.get_current_token + self.current_token_function = typing_writer_handler.get_current_token else: # Query the typing writer process update_function = make_http_update_function(hs, self.NAME) - current_token_function = hs.get_typing_handler().get_current_token + self.current_token_function = hs.get_typing_handler().get_current_token super().__init__( hs.get_instance_name(), - current_token_without_instance(current_token_function), update_function, ) + def current_token(self, instance_name: str) -> Token: + return self.current_token_function() -class ReceiptsStream(Stream): + def minimal_local_current_token(self) -> Token: + return self.current_token_function() + + +class ReceiptsStream(_StreamFromIdGen): @attr.s(slots=True, frozen=True, auto_attribs=True) class ReceiptsStreamRow: room_id: str @@ -371,12 +407,12 @@ class ReceiptsStream(Stream): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_max_receipt_stream_id), store.get_all_updated_receipts, + store._receipts_id_gen, ) -class PushRulesStream(Stream): +class PushRulesStream(_StreamFromIdGen): """A user has changed their push rules""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -387,20 +423,16 @@ class PushRulesStream(Stream): ROW_TYPE = PushRulesStreamRow def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - self._current_token, - self.store.get_all_push_rule_updates, + store.get_all_push_rule_updates, + store._push_rules_stream_id_gen, ) - def _current_token(self, instance_name: str) -> int: - push_rules_token = self.store.get_max_push_rules_stream_id() - return push_rules_token - -class PushersStream(Stream): +class PushersStream(_StreamFromIdGen): """A user has added/changed/removed a pusher""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -418,8 +450,8 @@ class PushersStream(Stream): super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_pushers_stream_token), store.get_all_updated_pushers_rows, + store._pushers_id_gen, ) @@ -447,15 +479,20 @@ class CachesStream(Stream): ROW_TYPE = CachesStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_cache_stream_token_for_writer, - store.get_all_updated_caches, + self.store.get_all_updated_caches, ) + def current_token(self, instance_name: str) -> Token: + return self.store.get_cache_stream_token_for_writer(instance_name) -class DeviceListsStream(Stream): + def minimal_local_current_token(self) -> Token: + return self.current_token(self.local_instance_name) + + +class DeviceListsStream(_StreamFromIdGen): """Either a user has updated their devices or a remote server needs to be told about a device update. """ @@ -473,8 +510,8 @@ class DeviceListsStream(Stream): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(self.store.get_device_stream_token), self._update_function, + self.store._device_list_id_gen, ) async def _update_function( @@ -525,7 +562,7 @@ class DeviceListsStream(Stream): return updates, upper_limit_token, devices_limited or signatures_limited -class ToDeviceStream(Stream): +class ToDeviceStream(_StreamFromIdGen): """New to_device messages for a client""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -539,12 +576,12 @@ class ToDeviceStream(Stream): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_to_device_stream_token), store.get_all_new_device_messages, + store._device_inbox_id_gen, ) -class AccountDataStream(Stream): +class AccountDataStream(_StreamFromIdGen): """Global or per room account data was changed""" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -560,8 +597,8 @@ class AccountDataStream(Stream): self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(self.store.get_max_account_data_stream_id), self._update_function, + self.store._account_data_id_gen, ) async def _update_function( diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index da6d948e1b..38823113d8 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -19,10 +19,10 @@ from typing import TYPE_CHECKING, Iterable, Optional, Tuple, Type, TypeVar, cast import attr from synapse.replication.tcp.streams._base import ( - Stream, StreamRow, StreamUpdateResult, Token, + _StreamFromIdGen, ) if TYPE_CHECKING: @@ -139,7 +139,7 @@ _EventRows: Tuple[Type[BaseEventsStreamRow], ...] = ( TypeToRow = {Row.TypeId: Row for Row in _EventRows} -class EventsStream(Stream): +class EventsStream(_StreamFromIdGen): """We received a new event, or an event went from being an outlier to not""" NAME = "events" @@ -147,9 +147,7 @@ class EventsStream(Stream): def __init__(self, hs: "HomeServer"): self._store = hs.get_datastores().main super().__init__( - hs.get_instance_name(), - self._store._stream_id_gen.get_current_token_for_writer, - self._update_function, + hs.get_instance_name(), self._update_function, self._store._stream_id_gen ) async def _update_function( diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 4046bdec69..7f5af5852c 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -18,6 +18,7 @@ import attr from synapse.replication.tcp.streams._base import ( Stream, + Token, current_token_without_instance, make_http_update_function, ) @@ -47,7 +48,7 @@ class FederationStream(Stream): # will be a real FederationSender, which has stubs for current_token and # get_replication_rows.) federation_sender = hs.get_federation_sender() - current_token = current_token_without_instance( + self.current_token_func = current_token_without_instance( federation_sender.get_current_token ) update_function: Callable[ @@ -57,15 +58,21 @@ class FederationStream(Stream): elif hs.should_send_federation(): # federation sender: Query master process update_function = make_http_update_function(hs, self.NAME) - current_token = self._stub_current_token + self.current_token_func = self._stub_current_token else: # other worker: stub out the update function (we're not interested in # any updates so when we get a POSITION we do nothing) update_function = self._stub_update_function - current_token = self._stub_current_token + self.current_token_func = self._stub_current_token - super().__init__(hs.get_instance_name(), current_token, update_function) + super().__init__(hs.get_instance_name(), update_function) + + def current_token(self, instance_name: str) -> Token: + return self.current_token_func(instance_name) + + def minimal_local_current_token(self) -> Token: + return self.current_token(self.local_instance_name) @staticmethod def _stub_current_token(instance_name: str) -> int: diff --git a/synapse/replication/tcp/streams/partial_state.py b/synapse/replication/tcp/streams/partial_state.py index a8ce5ffd72..ad181d7e93 100644 --- a/synapse/replication/tcp/streams/partial_state.py +++ b/synapse/replication/tcp/streams/partial_state.py @@ -15,7 +15,7 @@ from typing import TYPE_CHECKING import attr -from synapse.replication.tcp.streams import Stream +from synapse.replication.tcp.streams._base import _StreamFromIdGen if TYPE_CHECKING: from synapse.server import HomeServer @@ -27,7 +27,7 @@ class UnPartialStatedRoomStreamRow: room_id: str -class UnPartialStatedRoomStream(Stream): +class UnPartialStatedRoomStream(_StreamFromIdGen): """ Stream to notify about rooms becoming un-partial-stated; that is, when the background sync finishes such that we now have full state for @@ -41,8 +41,8 @@ class UnPartialStatedRoomStream(Stream): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_un_partial_stated_rooms_token, store.get_un_partial_stated_rooms_from_stream, + store._un_partial_stated_rooms_stream_id_gen, ) @@ -56,7 +56,7 @@ class UnPartialStatedEventStreamRow: rejection_status_changed: bool -class UnPartialStatedEventStream(Stream): +class UnPartialStatedEventStream(_StreamFromIdGen): """ Stream to notify about events becoming un-partial-stated. """ @@ -68,6 +68,6 @@ class UnPartialStatedEventStream(Stream): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - store.get_un_partial_stated_events_token, store.get_un_partial_stated_events_from_stream, + store._un_partial_stated_events_stream_id_gen, ) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d2c874b9a8..9c3eafb562 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -133,6 +133,15 @@ class AbstractStreamIdGenerator(metaclass=abc.ABCMeta): """ raise NotImplementedError() + @abc.abstractmethod + def get_minimal_local_current_token(self) -> int: + """Tries to return a minimal current token for the local instance, + i.e. for writers this would be the last successful write. + + If local instance is not a writer (or has written yet) then falls back + to returning the normal "current token". + """ + @abc.abstractmethod def get_next(self) -> AsyncContextManager[int]: """ @@ -312,6 +321,9 @@ class StreamIdGenerator(AbstractStreamIdGenerator): def get_current_token_for_writer(self, instance_name: str) -> int: return self.get_current_token() + def get_minimal_local_current_token(self) -> int: + return self.get_current_token() + class MultiWriterIdGenerator(AbstractStreamIdGenerator): """Generates and tracks stream IDs for a stream with multiple writers. @@ -408,6 +420,11 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # The maximum stream ID that we have seen been allocated across any writer. self._max_seen_allocated_stream_id = 1 + # The maximum position of the local instance. This can be higher than + # the corresponding position in `current_positions` table when there are + # no active writes in progress. + self._max_position_of_local_instance = self._max_seen_allocated_stream_id + self._sequence_gen = PostgresSequenceGenerator(sequence_name) # We check that the table and sequence haven't diverged. @@ -427,6 +444,16 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._current_positions.values(), default=1 ) + # For the case where `stream_positions` is not up to date, + # `_persisted_upto_position` may be higher. + self._max_seen_allocated_stream_id = max( + self._max_seen_allocated_stream_id, self._persisted_upto_position + ) + + # Bump our local maximum position now that we've loaded things from the + # DB. + self._max_position_of_local_instance = self._max_seen_allocated_stream_id + if not writers: # If there have been no explicit writers given then any instance can # write to the stream. In which case, let's pre-seed our own @@ -545,6 +572,14 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): if instance == self._instance_name: self._current_positions[instance] = stream_id + if self._writers: + # If we have explicit writers then make sure that each instance has + # a position. + for writer in self._writers: + self._current_positions.setdefault( + writer, self._persisted_upto_position + ) + cur.close() def _load_next_id_txn(self, txn: Cursor) -> int: @@ -688,6 +723,9 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): if new_cur: curr = self._current_positions.get(self._instance_name, 0) self._current_positions[self._instance_name] = max(curr, new_cur) + self._max_position_of_local_instance = max( + curr, new_cur, self._max_position_of_local_instance + ) self._add_persisted_position(next_id) @@ -702,10 +740,26 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): # persisted up to position. This stops Synapse from doing a full table # scan when a new writer announces itself over replication. with self._lock: - return self._return_factor * self._current_positions.get( + if self._instance_name == instance_name: + return self._return_factor * self._max_position_of_local_instance + + pos = self._current_positions.get( instance_name, self._persisted_upto_position ) + # We want to return the maximum "current token" that we can for a + # writer, this helps ensure that streams progress as fast as + # possible. + pos = max(pos, self._persisted_upto_position) + + return self._return_factor * pos + + def get_minimal_local_current_token(self) -> int: + with self._lock: + return self._return_factor * self._current_positions.get( + self._instance_name, self._persisted_upto_position + ) + def get_positions(self) -> Dict[str, int]: """Get a copy of the current positon map. @@ -774,6 +828,18 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator): self._persisted_upto_position = max(min_curr, self._persisted_upto_position) + # Advance our local max position. + self._max_position_of_local_instance = max( + self._max_position_of_local_instance, self._persisted_upto_position + ) + + if not self._unfinished_ids and not self._in_flight_fetches: + # If we don't have anything in flight, it's safe to advance to the + # max seen stream ID. + self._max_position_of_local_instance = max( + self._max_seen_allocated_stream_id, self._max_position_of_local_instance + ) + # We now iterate through the seen positions, discarding those that are # less than the current min positions, and incrementing the min position # if its exactly one greater. diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 9174fb0964..fd53b0644c 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -259,8 +259,9 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): id_gen = self._create_id_generator() - # The table is empty so we expect an empty map for positions - self.assertEqual(id_gen.get_positions(), {}) + # The table is empty so we expect the map for positions to have a dummy + # minimum value. + self.assertEqual(id_gen.get_positions(), {"master": 1}) def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled @@ -349,15 +350,12 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) - # The first ID gen will notice that it can advance its token to 7 as it - # has no in progress writes... self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) - # ... but the second ID gen doesn't know that. self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) # Try allocating a new ID gen and check that we only see position @@ -398,6 +396,56 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): second_id_gen.advance("first", 8) self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9}) + def test_multi_instance_empty_row(self) -> None: + """Test that reads and writes from multiple processes are handled + correctly, when one of the writers starts without any rows. + """ + # Insert some rows for two out of three of the ID gens. + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator( + "first", writers=["first", "second", "third"] + ) + second_id_gen = self._create_id_generator( + "second", writers=["first", "second", "third"] + ) + third_id_gen = self._create_id_generator( + "third", writers=["first", "second", "third"] + ) + + self.assertEqual( + first_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("third"), 7) + + self.assertEqual( + second_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("third"), 7) + + # Try allocating a new ID gen and check that we only see position + # advanced after we leave the context manager. + + async def _get_next_async() -> None: + async with third_id_gen.get_next() as stream_id: + self.assertEqual(stream_id, 8) + + self.assertEqual( + third_id_gen.get_positions(), {"first": 3, "second": 7, "third": 7} + ) + self.assertEqual(third_id_gen.get_persisted_upto_position(), 7) + + self.get_success(_get_next_async()) + + self.assertEqual( + third_id_gen.get_positions(), {"first": 3, "second": 7, "third": 8} + ) + def test_get_next_txn(self) -> None: """Test that the `get_next_txn` function works correctly.""" @@ -600,6 +648,70 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): with self.assertRaises(IncorrectDatabaseSetup): self._create_id_generator("first") + def test_minimal_local_token(self) -> None: + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator("first", writers=["first", "second"]) + second_id_gen = self._create_id_generator("second", writers=["first", "second"]) + + self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(first_id_gen.get_minimal_local_current_token(), 3) + + self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(second_id_gen.get_minimal_local_current_token(), 7) + + def test_current_token_gap(self) -> None: + """Test that getting the current token for a writer returns the maximal + token when there are no writes. + """ + self._insert_rows("first", 3) + self._insert_rows("second", 4) + + first_id_gen = self._create_id_generator( + "first", writers=["first", "second", "third"] + ) + second_id_gen = self._create_id_generator( + "second", writers=["first", "second", "third"] + ) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) + self.assertEqual(second_id_gen.get_current_token(), 7) + + # Check that the first ID gen advancing causes the second ID gen to + # advance (as the second ID gen has nothing in flight). + + async def _get_next_async() -> None: + async with first_id_gen.get_next_mult(2): + pass + + self.get_success(_get_next_async()) + second_id_gen.advance("first", 9) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 9) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 9) + self.assertEqual(second_id_gen.get_current_token(), 7) + + # Check that the first ID gen advancing doesn't advance the second ID + # gen when the second ID gen has stuff in flight. + self.get_success(_get_next_async()) + + ctxmgr = second_id_gen.get_next() + self.get_success(ctxmgr.__aenter__()) + + second_id_gen.advance("first", 11) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 11) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 9) + self.assertEqual(second_id_gen.get_current_token(), 7) + + self.get_success(ctxmgr.__aexit__(None, None, None)) + + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 11) + self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 12) + self.assertEqual(second_id_gen.get_current_token(), 7) + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs.""" @@ -712,8 +824,8 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(_get_next_async()) - self.assertEqual(id_gen_1.get_positions(), {"first": -1}) - self.assertEqual(id_gen_2.get_positions(), {"first": -1}) + self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -1}) + self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -1}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) @@ -822,11 +934,11 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): second_id_gen = self._create_id_generator("second", writers=["first", "second"]) self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 6}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) - self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 6) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) - self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(second_id_gen.get_persisted_upto_position(), 7) From 3ab861ab9eaf54a336a5a900eeb8402c3e9ed811 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 23 Oct 2023 14:28:05 -0400 Subject: [PATCH 084/142] Fix type hint errors from Twisted trunk (#16526) --- changelog.d/16526.misc | 1 + synapse/util/file_consumer.py | 16 +++++++++++----- tests/handlers/test_appservice.py | 1 + tests/http/server/_base.py | 2 +- tests/http/test_matrixfederationclient.py | 2 +- tests/unittest.py | 3 ++- 6 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 changelog.d/16526.misc diff --git a/changelog.d/16526.misc b/changelog.d/16526.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16526.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 46771a401b..26b46be5e1 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -13,7 +13,7 @@ # limitations under the License. import queue -from typing import BinaryIO, Optional, Union, cast +from typing import Any, BinaryIO, Optional, Union, cast from twisted.internet import threads from twisted.internet.defer import Deferred @@ -58,7 +58,9 @@ class BackgroundFileConsumer: self._bytes_queue: queue.Queue[Optional[bytes]] = queue.Queue() # Deferred that is resolved when finished writing - self._finished_deferred: Optional[Deferred[None]] = None + # + # This is really Deferred[None], but mypy doesn't seem to like that. + self._finished_deferred: Optional[Deferred[Any]] = None # If the _writer thread throws an exception it gets stored here. self._write_exception: Optional[Exception] = None @@ -80,9 +82,13 @@ class BackgroundFileConsumer: self.streaming = streaming self._finished_deferred = run_in_background( threads.deferToThreadPool, - self._reactor, - self._reactor.getThreadPool(), - self._writer, + # mypy seems to get confused with the chaining of ParamSpec from + # run_in_background to deferToThreadPool. + # + # For Twisted trunk, ignore arg-type; for Twisted release ignore unused-ignore. + self._reactor, # type: ignore[arg-type,unused-ignore] + self._reactor.getThreadPool(), # type: ignore[arg-type,unused-ignore] + self._writer, # type: ignore[arg-type,unused-ignore] ) if not streaming: self._producer.resumeProducing() diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 867dbd6001..c888d1ff01 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -156,6 +156,7 @@ class AppServiceHandlerTestCase(unittest.TestCase): result = self.successResultOf( defer.ensureDeferred(self.handler.query_room_alias_exists(room_alias)) ) + assert result is not None self.mock_as_api.query_alias.assert_called_once_with( interested_service, room_alias_str diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 36472e57a8..d524c183f8 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -335,7 +335,7 @@ class Deferred__next__Patch: self._request_number = request_number self._seen_awaits = seen_awaits - self._original_Deferred___next__ = Deferred.__next__ + self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore] # The number of `await`s on `Deferred`s we have seen so far. self.awaits_seen = 0 diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index ab94f3f67a..bf1d287699 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -70,7 +70,7 @@ class FederationClientTests(HomeserverTestCase): """ @defer.inlineCallbacks - def do_request() -> Generator["Deferred[object]", object, object]: + def do_request() -> Generator["Deferred[Any]", object, object]: with LoggingContext("one") as context: fetch_d = defer.ensureDeferred( self.cl.get_json("testserv:8008", "foo/bar") diff --git a/tests/unittest.py b/tests/unittest.py index 99ad02eb06..79c47fc3cc 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -30,6 +30,7 @@ from typing import ( Generic, Iterable, List, + Mapping, NoReturn, Optional, Tuple, @@ -251,7 +252,7 @@ class TestCase(unittest.TestCase): except AssertionError as e: raise (type(e))(f"Assert error for '.{key}':") from e - def assert_dict(self, required: dict, actual: dict) -> None: + def assert_dict(self, required: Mapping, actual: Mapping) -> None: """Does a partial assert of a dict. Args: From 5fe76b9434e22bb752c252dd9c66c3c2bfb90dfc Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 23 Oct 2023 19:21:23 +0100 Subject: [PATCH 085/142] Add test case to detect dodgy b64 encoding --- tests/http/test_proxyagent.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 8164b0b78e..b48c2c293a 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -217,6 +217,20 @@ class ProxyParserTests(TestCase): ) +class TestBasicProxyCredentials(TestCase): + def test_long_user_pass_string_encoded_without_newlines(self) -> None: + """Reproduces https://github.com/matrix-org/synapse/pull/16504.""" + creds = BasicProxyCredentials( + b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass@proxy.local:9988" + ) + auth_value = creds.as_proxy_authorization_value() + self.assertNotIn(b"\n", auth_value) + self.assertEqual( + creds.as_proxy_authorization_value(), + b"Basic: bG9vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vbmd1c2VyOnBhc3M=", + ) + + class MatrixFederationAgentTests(TestCase): def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() From 3df70aa80001e05b0bbe69fd3328f11aceaab4aa Mon Sep 17 00:00:00 2001 From: Michael Sasser Date: Mon, 23 Oct 2023 20:50:50 +0200 Subject: [PATCH 086/142] Replace all Prometheus datasource UIDs of the Grafana Dashboard with the variable `${DS_PROMETHEUS}` and remove `__inputs` (#16471) --- changelog.d/16471.bugfix | 1 + contrib/grafana/synapse.json | 619 +++++++++++++++++------------------ 2 files changed, 306 insertions(+), 314 deletions(-) create mode 100644 changelog.d/16471.bugfix diff --git a/changelog.d/16471.bugfix b/changelog.d/16471.bugfix new file mode 100644 index 0000000000..c94cd5b78f --- /dev/null +++ b/changelog.d/16471.bugfix @@ -0,0 +1 @@ +Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 90f449aa76..188597c8dd 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -1,14 +1,4 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], "__elements": {}, "__requires": [ { @@ -47,7 +37,7 @@ { "builtIn": 1, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "enable": false, "hide": true, @@ -93,7 +83,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -107,7 +97,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -129,7 +119,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -203,7 +193,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -235,7 +225,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -333,7 +323,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -343,7 +333,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -354,7 +344,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -364,7 +354,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", @@ -374,7 +364,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "25%", @@ -382,7 +372,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "legendFormat": "5%", @@ -390,7 +380,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", "legendFormat": "Average", @@ -398,7 +388,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))", "hide": false, @@ -468,7 +458,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -515,7 +505,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -575,7 +565,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -625,7 +615,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -638,7 +628,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "hide": true, @@ -776,7 +766,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -831,7 +821,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -844,7 +834,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -893,7 +883,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -910,7 +900,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -973,7 +963,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", @@ -987,7 +977,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1217,7 +1207,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -1267,7 +1257,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -1280,7 +1270,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "interval": "", @@ -1326,7 +1316,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1379,7 +1369,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -1432,7 +1422,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1487,7 +1477,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", @@ -1500,7 +1490,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1", @@ -1546,7 +1536,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1592,7 +1582,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1604,7 +1594,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -1664,7 +1654,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1710,7 +1700,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_client_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -1720,7 +1710,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_matrixfederationclient_requests_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -1857,7 +1847,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -1869,7 +1859,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -1893,7 +1883,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -1967,7 +1957,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -1998,7 +1988,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "editable": true, @@ -2049,7 +2039,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size])) without (job,index)", "format": "time_series", @@ -2099,7 +2089,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2140,7 +2130,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -2187,7 +2177,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2228,7 +2218,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -2278,7 +2268,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2322,7 +2312,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -2370,7 +2360,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 1, "fill": 1, @@ -2414,7 +2404,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum(rate(synapse_storage_events_persisted_events_sep_total{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)", @@ -2614,7 +2604,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "CPU and DB time spent on most expensive state resolution in a room, summed over all workers. This is a very rough proxy for \"how fast is state res\", but it doesn't accurately represent the system load (e.g. it completely ignores cheap state resolutions).\n", "fieldConfig": { @@ -2692,7 +2682,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": false, "expr": "sum(rate(synapse_state_res_db_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", @@ -2706,7 +2696,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": false, "expr": "sum(rate(synapse_state_res_cpu_for_biggest_room_seconds_total{instance=\"$instance\"}[1m]))", @@ -2726,7 +2716,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -2738,7 +2728,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -2755,7 +2745,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -2808,7 +2798,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -2877,7 +2867,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -2926,7 +2916,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})", "format": "time_series", @@ -2976,7 +2966,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3029,7 +3019,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3098,7 +3088,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3151,7 +3141,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3220,7 +3210,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3272,7 +3262,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3321,7 +3311,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -3374,7 +3364,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))", "format": "time_series", @@ -3422,7 +3412,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3475,7 +3465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", "format": "time_series", @@ -3486,7 +3476,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -3529,7 +3519,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -3541,7 +3531,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -3557,7 +3547,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3604,7 +3594,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3650,7 +3640,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3697,7 +3687,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_background_process_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -3743,7 +3733,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3788,7 +3778,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_background_process_in_flight_count{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "legendFormat": "{{job}}-{{index}} {{name}}", @@ -3830,7 +3820,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -3842,7 +3832,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -3858,7 +3848,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -3905,7 +3895,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -3915,7 +3905,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_util_metrics_block_count_total{block_name=\"_send_new_transaction\",instance=\"$instance\"}[$bucket_size]) - ignoring (block_name) rate(synapse_federation_client_sent_transactions_total{instance=\"$instance\"}[$bucket_size]))", "legendFormat": "failed txn rate", @@ -3958,7 +3948,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4005,7 +3995,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_received_pdus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4015,7 +4005,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_received_edus_total{instance=~\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4061,7 +4051,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4108,7 +4098,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations_count_total{instance=\"$instance\"}[$bucket_size]))", @@ -4121,7 +4111,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_client_sent_edus_total{instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -4167,7 +4157,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4214,7 +4204,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_federation_client_sent_edus_by_type_total{instance=\"$instance\"}[$bucket_size])", @@ -4509,7 +4499,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "The number of events in the in-memory queues ", "fieldConfig": { @@ -4556,7 +4546,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", @@ -4568,7 +4558,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", @@ -4617,7 +4607,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Number of events queued up on the master process for processing by the federation sender", "fieldConfig": { @@ -4665,7 +4655,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_changed_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4676,7 +4666,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_map_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4688,7 +4678,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_presence_destinations_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4700,7 +4690,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_keyed_edu_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4712,7 +4702,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_edus_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4724,7 +4714,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_federation_send_queue_pos_time_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -4780,7 +4770,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4857,7 +4847,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -4892,7 +4882,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -4981,7 +4971,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -4992,7 +4982,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5003,7 +4993,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5014,7 +5004,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -5025,7 +5015,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", @@ -5034,7 +5024,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "interval": "", @@ -5043,7 +5033,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -5116,7 +5106,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -5193,7 +5183,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -5229,7 +5219,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -5279,7 +5269,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", @@ -5333,7 +5323,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -5383,7 +5373,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", @@ -5437,7 +5427,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -5477,7 +5467,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_federation_soft_failed_events_total{instance=\"$instance\"}[$bucket_size]))", "interval": "", @@ -5522,7 +5512,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -5903,7 +5893,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -6008,7 +5998,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.9995, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", @@ -6021,7 +6011,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.99, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", @@ -6033,7 +6023,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6044,7 +6034,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6054,7 +6044,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "format": "time_series", @@ -6064,7 +6054,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.25, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "legendFormat": "25%", @@ -6072,7 +6062,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.05, sum(rate(synapse_rate_limit_queue_wait_time_seconds_bucket{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", "legendFormat": "5%", @@ -6080,7 +6070,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_rate_limit_queue_wait_time_seconds_sum{index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_rate_limit_queue_wait_time_seconds_count{index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "legendFormat": "Average", @@ -6267,7 +6257,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -6280,7 +6270,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -6359,7 +6349,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", @@ -6373,7 +6363,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0", @@ -6394,7 +6384,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -6441,7 +6431,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10,synapse_pushers{job=~\"$job\",index=~\"$index\", instance=\"$instance\"})", "legendFormat": "{{kind}} {{app_id}}", @@ -6483,7 +6473,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -6495,7 +6485,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -6662,7 +6652,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "exemplar": true, "expr": "sum(rate(synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", @@ -7077,7 +7067,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -7089,7 +7079,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -7101,7 +7091,7 @@ "panels": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -7179,7 +7169,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", "format": "time_series", @@ -7198,7 +7188,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", "fieldConfig": { @@ -7247,7 +7237,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7259,7 +7249,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7269,7 +7259,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7279,7 +7269,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7327,7 +7317,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7379,7 +7369,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10, rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -7427,7 +7417,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7479,7 +7469,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7527,7 +7517,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7579,7 +7569,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_transaction_time_sum_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -7627,7 +7617,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -7673,7 +7663,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7683,7 +7673,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.9, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7693,7 +7683,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7703,7 +7693,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, sum(rate(synapse_storage_schedule_time_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le))", "format": "time_series", @@ -7751,7 +7741,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -7763,7 +7753,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -7779,7 +7769,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7830,7 +7820,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])", "format": "time_series", @@ -7877,7 +7867,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -7928,7 +7918,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds_total[$bucket_size])) / rate(synapse_util_metrics_block_count_total[$bucket_size])", "format": "time_series", @@ -8079,7 +8069,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "The time each database transaction takes to execute, on average, broken down by metrics block.", "editable": true, @@ -8131,7 +8121,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8178,7 +8168,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8228,7 +8218,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8275,7 +8265,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8325,7 +8315,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_time_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8374,7 +8364,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -8414,7 +8404,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_metrics_block_count_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "interval": "", @@ -8457,7 +8447,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -8469,7 +8459,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -8485,7 +8475,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 2, "editable": true, @@ -8538,7 +8528,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -8588,7 +8578,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8639,7 +8629,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_util_caches_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -8688,7 +8678,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editable": true, "error": false, @@ -8739,7 +8729,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -8787,7 +8777,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -8839,7 +8829,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "topk(10, rate(synapse_util_caches_cache{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache_hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", "format": "time_series", @@ -8888,7 +8878,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -8935,7 +8925,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_cache_evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -8981,7 +8971,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -8993,7 +8983,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9009,7 +8999,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9055,7 +9045,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_util_caches_response_cache_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "interval": "", @@ -9099,7 +9089,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9145,7 +9135,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_util_caches_response_cache_hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", "interval": "", @@ -9154,7 +9144,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "", "interval": "", @@ -9199,7 +9189,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -9211,7 +9201,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9227,7 +9217,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9274,7 +9264,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", "format": "time_series", @@ -9321,7 +9311,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "decimals": 3, "editable": true, @@ -9373,7 +9363,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", "format": "time_series", @@ -9420,7 +9410,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.", "fieldConfig": { @@ -9475,7 +9465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -9522,7 +9512,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9569,7 +9559,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -9614,7 +9604,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9661,7 +9651,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", @@ -9772,7 +9762,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -9784,7 +9774,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -9801,7 +9791,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9848,7 +9838,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", "format": "time_series", @@ -9893,7 +9883,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -9991,7 +9981,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10090,7 +10080,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10288,7 +10278,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10335,7 +10325,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_replication_tcp_protocol_close_reason_total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", @@ -10382,7 +10372,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10429,7 +10419,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -10439,7 +10429,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", "format": "time_series", @@ -10484,7 +10474,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -10496,7 +10486,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -10512,7 +10502,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10559,7 +10549,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -10607,7 +10597,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10654,7 +10644,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", @@ -10702,7 +10692,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -10750,7 +10740,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1", "format": "time_series", @@ -10797,7 +10787,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -10809,7 +10799,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -10833,7 +10823,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { @@ -10909,7 +10899,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -10941,7 +10931,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", "fieldConfig": { @@ -10989,7 +10979,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0", "format": "heatmap", @@ -11044,7 +11034,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", "fieldConfig": { @@ -11120,7 +11110,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -11152,7 +11142,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", "fieldConfig": { @@ -11199,7 +11189,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11209,7 +11199,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11219,7 +11209,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11229,7 +11219,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11284,7 +11274,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { @@ -11360,7 +11350,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0)", "format": "heatmap", @@ -11392,7 +11382,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", "fieldConfig": { @@ -11439,7 +11429,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11449,7 +11439,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11459,7 +11449,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11469,7 +11459,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events_total > 0))", "format": "time_series", @@ -11524,7 +11514,7 @@ }, "dataFormat": "tsbuckets", "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", "fieldConfig": { @@ -11600,7 +11590,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "heatmap", @@ -11634,7 +11624,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", "fieldConfig": { @@ -11682,7 +11672,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", @@ -11695,7 +11685,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11706,7 +11696,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11717,7 +11707,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", @@ -11765,7 +11755,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", "fill": 1, @@ -11805,7 +11795,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11814,7 +11804,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11823,7 +11813,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_storage_events_times_pruned_extremities_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "interval": "", @@ -11866,7 +11856,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -11878,7 +11868,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -11895,7 +11885,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -11949,7 +11939,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "max(synapse_admin_mau_max{instance=\"$instance\"})", @@ -11963,7 +11953,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "max(synapse_admin_mau_current{instance=\"$instance\"})", @@ -12012,7 +12002,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12051,7 +12041,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}", "interval": "", @@ -12094,7 +12084,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12106,7 +12096,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12123,7 +12113,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12169,7 +12159,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_notifier_users_woken_by_stream_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12222,7 +12212,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12268,7 +12258,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_get_updates_total{job=~\"$job\",instance=\"$instance\"}[$bucket_size])", @@ -12319,7 +12309,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12331,7 +12321,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12348,7 +12338,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12387,7 +12377,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_appservice_api_sent_events_total{instance=\"$instance\"}[$bucket_size])", @@ -12436,7 +12426,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12475,7 +12465,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_appservice_api_sent_transactions_total{instance=\"$instance\"}[$bucket_size])", @@ -12522,7 +12512,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12534,7 +12524,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12550,7 +12540,7 @@ "dashLength": 10, "dashes": false, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12589,7 +12579,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_notified_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12598,7 +12588,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_federation_presence_out_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12607,7 +12597,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_presence_updates_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12616,7 +12606,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_federation_presence_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12625,7 +12615,7 @@ }, { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "rate(synapse_handler_presence_bump_active_time_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", @@ -12670,7 +12660,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12709,7 +12699,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_state_transition_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12758,7 +12748,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fill": 1, "fillGradient": 0, @@ -12797,7 +12787,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_handler_presence_notify_reason_total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", @@ -12844,7 +12834,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -12856,7 +12846,7 @@ "collapsed": true, "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "gridPos": { "h": 1, @@ -12869,7 +12859,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -12946,7 +12936,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_external_cache_set{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", @@ -12966,7 +12956,7 @@ "dashes": false, "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fill": 1, @@ -13006,7 +12996,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "sum without (hit) (rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size]))", @@ -13063,7 +13053,7 @@ "dataFormat": "tsbuckets", "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { @@ -13140,7 +13130,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])) by (le)", "format": "heatmap", @@ -13172,7 +13162,7 @@ { "datasource": { "type": "prometheus", - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "description": "", "fieldConfig": { @@ -13246,7 +13236,7 @@ "targets": [ { "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", "expr": "rate(synapse_external_cache_get{job=~\"$job\", instance=\"$instance\", index=~\"$index\", hit=\"False\"}[$bucket_size])", @@ -13264,7 +13254,7 @@ { "datasource": { "type": "prometheus", - "uid": "000000001" + "uid": "${DS_PROMETHEUS}" }, "refId": "A" } @@ -13290,7 +13280,8 @@ "hide": 0, "includeAll": false, "multi": false, - "name": "datasource", + "name": "DS_PROMETHEUS", + "label": "Datasource", "options": [], "query": "prometheus", "queryValue": "", @@ -13361,7 +13352,7 @@ { "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, @@ -13387,7 +13378,7 @@ "allValue": "", "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, @@ -13417,7 +13408,7 @@ "allValue": ".*", "current": {}, "datasource": { - "uid": "$datasource" + "uid": "${DS_PROMETHEUS}" }, "definition": "", "hide": 0, From 79f48b2b4fd6a90d919dc64f5a8d393c50cd5652 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 24 Oct 2023 13:01:02 +0100 Subject: [PATCH 087/142] 1.95.0 --- CHANGES.md | 7 +++++++ changelog.d/16524.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/16524.misc diff --git a/CHANGES.md b/CHANGES.md index 0cabf8a6ec..caecc737f3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.95.0 (2023-10-24) + +### Internal Changes + +- Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). ([\#16524](https://github.com/matrix-org/synapse/issues/16524)) + + # Synapse 1.95.0rc1 (2023-10-17) ### Bugfixes diff --git a/changelog.d/16524.misc b/changelog.d/16524.misc deleted file mode 100644 index 3f8ac1bce7..0000000000 --- a/changelog.d/16524.misc +++ /dev/null @@ -1 +0,0 @@ -Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). diff --git a/debian/changelog b/debian/changelog index 979d5facfa..9bd5490ede 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.95.0) stable; urgency=medium + + * New Synapse release 1.95.0. + + -- Synapse Packaging team Tue, 24 Oct 2023 13:00:46 +0100 + matrix-synapse-py3 (1.95.0~rc1) stable; urgency=medium * New synapse release 1.95.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 498b663bae..f3764b1a57 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.95.0rc1" +version = "1.95.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From ffbe9b76665fb5dd9513045357247da39339e0b8 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 24 Oct 2023 07:09:59 -0500 Subject: [PATCH 088/142] Remove duplicate call to wake a remote destination when using federation sending worker (#16515) --- changelog.d/16515.misc | 1 + synapse/replication/tcp/client.py | 11 ----------- synapse/replication/tcp/handler.py | 2 -- 3 files changed, 1 insertion(+), 13 deletions(-) create mode 100644 changelog.d/16515.misc diff --git a/changelog.d/16515.misc b/changelog.d/16515.misc new file mode 100644 index 0000000000..d54dd730e1 --- /dev/null +++ b/changelog.d/16515.misc @@ -0,0 +1 @@ +Remove duplicate call to mark remote server 'awake' when using a federation sending worker. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d5337fe588..384355698d 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -279,14 +279,6 @@ class ReplicationDataHandler: # may be streaming. self.notifier.notify_replication() - def on_remote_server_up(self, server: str) -> None: - """Called when get a new REMOTE_SERVER_UP command.""" - - # Let's wake up the transaction queue for the server in case we have - # pending stuff to send to it. - if self.send_handler: - self.send_handler.wake_destination(server) - async def wait_for_stream_position( self, instance_name: str, @@ -405,9 +397,6 @@ class FederationSenderHandler: self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - def wake_destination(self, server: str) -> None: - self.federation_sender.wake_destination(server) - async def process_replication_rows( self, stream_name: str, token: int, rows: list ) -> None: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index b668bb5da1..1d586fb180 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -657,8 +657,6 @@ class ReplicationCommandHandler: self, conn: IReplicationConnection, cmd: RemoteServerUpCommand ) -> None: """Called when get a new REMOTE_SERVER_UP command.""" - self._replication_data_handler.on_remote_server_up(cmd.data) - self._notifier.notify_remote_server_up(cmd.data) def on_LOCK_RELEASED( From 6ec98810e394588d0ff000b1875c8b70edc8c327 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 24 Oct 2023 13:26:41 +0100 Subject: [PATCH 089/142] Rework alias and public room list rules docs (#16541) --- changelog.d/16541.doc | 1 + .../configuration/config_documentation.md | 168 ++++++++++++++---- 2 files changed, 134 insertions(+), 35 deletions(-) create mode 100644 changelog.d/16541.doc diff --git a/changelog.d/16541.doc b/changelog.d/16541.doc new file mode 100644 index 0000000000..39aeecada6 --- /dev/null +++ b/changelog.d/16541.doc @@ -0,0 +1 @@ +Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 92e00c1380..6cc83c1cd0 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3797,62 +3797,160 @@ enable_room_list_search: false --- ### `alias_creation_rules` -The `alias_creation_rules` option controls who is allowed to create aliases -on this server. +The `alias_creation_rules` option allows server admins to prevent unwanted +alias creation on this server. -The format of this option is a list of rules that contain globs that -match against user_id, room_id and the new alias (fully qualified with -server name). The action in the first rule that matches is taken, -which can currently either be "allow" or "deny". +This setting is an optional list of 0 or more rules. By default, no list is +provided, meaning that all alias creations are permitted. -Missing user_id/room_id/alias fields default to "*". +Otherwise, requests to create aliases are matched against each rule in order. +The first rule that matches decides if the request is allowed or denied. If no +rule matches, the request is denied. In particular, this means that configuring +an empty list of rules will deny every alias creation request. -If no rules match the request is denied. An empty list means no one -can create aliases. +Each rule is a YAML object containing four fields, each of which is an optional string: -Options for the rules include: -* `user_id`: Matches against the creator of the alias. Defaults to "*". -* `alias`: Matches against the alias being created. Defaults to "*". -* `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*" -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `user_id`: a glob pattern that matches against the creator of the alias. +* `alias`: a glob pattern that matches against the alias being created. +* `room_id`: a glob pattern that matches against the room ID the alias is being pointed at. +* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. + +Each of the glob patterns is optional, defaulting to `*` ("match anything"). +Note that the patterns match against fully qualified IDs, e.g. against +`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead +of `alice`, `room` and `abcedgghijk`. Example configuration: + ```yaml +# No rule list specified. All alias creations are allowed. +# This is the default behaviour. alias_creation_rules: - - user_id: "bad_user" - alias: "spammy_alias" - room_id: "*" - action: deny ``` + +```yaml +# A list of one rule which allows everything. +# This has the same effect as the previous example. +alias_creation_rules: + - "action": "allow" +``` + +```yaml +# An empty list of rules. All alias creations are denied. +alias_creation_rules: [] +``` + +```yaml +# A list of one rule which denies everything. +# This has the same effect as the previous example. +alias_creation_rules: + - "action": "deny" +``` + +```yaml +# Prevent a specific user from creating aliases. +# Allow other users to create any alias +alias_creation_rules: + - user_id: "@bad_user:example.com" + action: deny + + - action: allow +``` + +```yaml +# Prevent aliases being created which point to a specific room. +alias_creation_rules: + - room_id: "!forbiddenRoom:example.com" + action: deny + + - action: allow +``` + --- ### `room_list_publication_rules` -The `room_list_publication_rules` option controls who can publish and -which rooms can be published in the public room list. +The `room_list_publication_rules` option allows server admins to prevent +unwanted entries from being published in the public room list. The format of this option is the same as that for -`alias_creation_rules`. +[`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or more +rules. By default, no list is provided, meaning that all rooms may be +published to the room list. -If the room has one or more aliases associated with it, only one of -the aliases needs to match the alias rule. If there are no aliases -then only rules with `alias: *` match. +Otherwise, requests to publish a room are matched against each rule in order. +The first rule that matches decides if the request is allowed or denied. If no +rule matches, the request is denied. In particular, this means that configuring +an empty list of rules will deny every alias creation request. -If no rules match the request is denied. An empty list means no one -can publish rooms. +Each rule is a YAML object containing four fields, each of which is an optional string: + +* `user_id`: a glob pattern that matches against the user publishing the room. +* `alias`: a glob pattern that matches against one of published room's aliases. + - If the room has no aliases, the alias match fails unless `alias` is unspecified or `*`. + - If the room has exactly one alias, the alias match succeeds if the `alias` pattern matches that alias. + - If the room has two or more aliases, the alias match succeeds if the pattern matches at least one of the aliases. +* `room_id`: a glob pattern that matches against the room ID of the room being published. +* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. + +Each of the glob patterns is optional, defaulting to `*` ("match anything"). +Note that the patterns match against fully qualified IDs, e.g. against +`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead +of `alice`, `room` and `abcedgghijk`. -Options for the rules include: -* `user_id`: Matches against the creator of the alias. Defaults to "*". -* `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*". -* `room_id`: Matches against the room ID being published. Defaults to "*". -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: + ```yaml +# No rule list specified. Anyone may publish any room to the public list. +# This is the default behaviour. room_list_publication_rules: - - user_id: "*" - alias: "*" - room_id: "*" - action: allow +``` + +```yaml +# A list of one rule which allows everything. +# This has the same effect as the previous example. +room_list_publication_rules: + - "action": "allow" +``` + +```yaml +# An empty list of rules. No-one may publish to the room list. +room_list_publication_rules: [] +``` + +```yaml +# A list of one rule which denies everything. +# This has the same effect as the previous example. +room_list_publication_rules: + - "action": "deny" +``` + +```yaml +# Prevent a specific user from publishing rooms. +# Allow other users to publish anything. +room_list_publication_rules: + - user_id: "@bad_user:example.com" + action: deny + + - action: allow +``` + +```yaml +# Prevent publication of a specific room. +room_list_publication_rules: + - room_id: "!forbiddenRoom:example.com" + action: deny + + - action: allow +``` + +```yaml +# Prevent publication of rooms with at least one alias containing the word "potato". +room_list_publication_rules: + - alias: "#*potato*:example.com" + action: deny + + - action: allow ``` --- From 2f1065f81bf2f72c4d90f2afc42409f1dced1e89 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 24 Oct 2023 14:33:37 +0100 Subject: [PATCH 090/142] Revert "Add test case to detect dodgy b64 encoding" This reverts commit 5fe76b9434e22bb752c252dd9c66c3c2bfb90dfc. I think I had this accidentally commited on my local develop branch, and so it accidentally got merged into upstream develop. This should re-land with corrections in #16504. --- tests/http/test_proxyagent.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index b48c2c293a..8164b0b78e 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -217,20 +217,6 @@ class ProxyParserTests(TestCase): ) -class TestBasicProxyCredentials(TestCase): - def test_long_user_pass_string_encoded_without_newlines(self) -> None: - """Reproduces https://github.com/matrix-org/synapse/pull/16504.""" - creds = BasicProxyCredentials( - b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass@proxy.local:9988" - ) - auth_value = creds.as_proxy_authorization_value() - self.assertNotIn(b"\n", auth_value) - self.assertEqual( - creds.as_proxy_authorization_value(), - b"Basic: bG9vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vbmd1c2VyOnBhc3M=", - ) - - class MatrixFederationAgentTests(TestCase): def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() From 95076f77c1b370ecef780c7bbf631bd34868c982 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20Bre=C5=BE=C3=A1k?= Date: Tue, 24 Oct 2023 15:45:21 +0200 Subject: [PATCH 091/142] Fix http/s proxy authentication with long username/passwords (#16504) --- changelog.d/16504.bugfix | 1 + synapse/http/connectproxyclient.py | 2 +- tests/http/test_proxyagent.py | 21 +++++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16504.bugfix diff --git a/changelog.d/16504.bugfix b/changelog.d/16504.bugfix new file mode 100644 index 0000000000..60839c474b --- /dev/null +++ b/changelog.d/16504.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index 636efc33e8..59b914b87e 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -59,7 +59,7 @@ class BasicProxyCredentials(ProxyCredentials): a Proxy-Authorization header. """ # Encode as base64 and prepend the authorization type - return b"Basic " + base64.encodebytes(self.username_password) + return b"Basic " + base64.b64encode(self.username_password) @attr.s(auto_attribs=True) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 8164b0b78e..41dfd5dc17 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -217,6 +217,27 @@ class ProxyParserTests(TestCase): ) +class TestBasicProxyCredentials(TestCase): + def test_long_user_pass_string_encoded_without_newlines(self) -> None: + """Reproduces https://github.com/matrix-org/synapse/pull/16504.""" + proxy_connection_string = b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass@proxy.local:9988" + _, _, _, creds = parse_proxy(proxy_connection_string) + assert creds is not None # for mypy's benefit + self.assertIsInstance(creds, BasicProxyCredentials) + + auth_value = creds.as_proxy_authorization_value() + self.assertNotIn(b"\n", auth_value) + self.assertEqual( + creds.as_proxy_authorization_value(), + b"Basic bG9vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vb29vbmd1c2VyOnBhc3M=", + ) + basic_auth_payload = creds.as_proxy_authorization_value().split(b" ")[1] + self.assertEqual( + base64.b64decode(basic_auth_payload), + b"looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooonguser:pass", + ) + + class MatrixFederationAgentTests(TestCase): def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() From e182dbb5b9e7e9641d373fe1d72133933db9bfe4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 25 Oct 2023 07:39:45 -0400 Subject: [PATCH 092/142] Fix tests on Twisted trunk. (#16528) Twisted trunk makes a change to the `TLSMemoryBIOFactory` where the underlying protocol is changed from `TLSMemoryBIOProtocol` to `BufferingTLSTransport` to improve performance of TLS code (see https://github.com/twisted/twisted/issues/11989). In order to properly hook this code up in tests we need to pass the test reactor's clock into `TLSMemoryBIOFactory` to avoid the global (trial) reactor being used by default. Twisted does something similar internally for tests: https://github.com/twisted/twisted/blob/157cd8e659705940e895d321339d467e76ae9d0a/src/twisted/web/test/test_agent.py#L871-L874 --- changelog.d/16528.misc | 1 + tests/http/__init__.py | 37 +++++++++++- .../test_matrix_federation_agent.py | 60 ++++++++----------- tests/http/test_proxyagent.py | 44 ++++---------- tests/replication/test_multi_media_repo.py | 52 ++++------------ tests/server.py | 12 ++++ 6 files changed, 95 insertions(+), 111 deletions(-) create mode 100644 changelog.d/16528.misc diff --git a/changelog.d/16528.misc b/changelog.d/16528.misc new file mode 100644 index 0000000000..32954ea675 --- /dev/null +++ b/changelog.d/16528.misc @@ -0,0 +1 @@ +Fix running unit tests on Twisted trunk. diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 528cdee34b..d5306e7ee0 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -15,14 +15,20 @@ import os.path import subprocess from typing import List +from incremental import Version from zope.interface import implementer +import twisted from OpenSSL import SSL from OpenSSL.SSL import Connection from twisted.internet.address import IPv4Address -from twisted.internet.interfaces import IOpenSSLServerConnectionCreator +from twisted.internet.interfaces import ( + IOpenSSLServerConnectionCreator, + IProtocolFactory, + IReactorTime, +) from twisted.internet.ssl import Certificate, trustRootFromCertificates -from twisted.protocols.tls import TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401 from twisted.web.iweb import IPolicyForHTTPS # noqa: F401 @@ -153,6 +159,33 @@ class TestServerTLSConnectionFactory: return Connection(ctx, None) +def wrap_server_factory_for_tls( + factory: IProtocolFactory, clock: IReactorTime, sanlist: List[bytes] +) -> TLSMemoryBIOFactory: + """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory + + The resultant factory will create a TLS server which presents a certificate + signed by our test CA, valid for the domains in `sanlist` + + Args: + factory: protocol factory to wrap + sanlist: list of domains the cert should be valid for + + Returns: + interfaces.IProtocolFactory + """ + connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) + # Twisted > 23.8.0 has a different API that accepts a clock. + if twisted.version <= Version("Twisted", 23, 8, 0): + return TLSMemoryBIOFactory( + connection_creator, isClient=False, wrappedFactory=factory + ) + else: + return TLSMemoryBIOFactory( + connection_creator, isClient=False, wrappedFactory=factory, clock=clock # type: ignore[call-arg] + ) + + # A dummy address, useful for tests that use FakeTransport and don't care about where # packets are going to/coming from. dummy_address = IPv4Address("TCP", "127.0.0.1", 80) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 9f63fa6fa8..0f623ae50b 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -31,7 +31,7 @@ from twisted.internet.interfaces import ( IProtocolFactory, ) from twisted.internet.protocol import Factory, Protocol -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web._newclient import ResponseNeverReceived from twisted.web.client import Agent from twisted.web.http import HTTPChannel, Request @@ -57,11 +57,7 @@ from synapse.types import ISynapseReactor from synapse.util.caches.ttlcache import TTLCache from tests import unittest -from tests.http import ( - TestServerTLSConnectionFactory, - dummy_address, - get_test_ca_cert_file, -) +from tests.http import dummy_address, get_test_ca_cert_file, wrap_server_factory_for_tls from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.utils import checked_cast, default_config @@ -125,7 +121,18 @@ class MatrixFederationAgentTests(unittest.TestCase): # build the test server server_factory = _get_test_protocol_factory() if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) + server_factory = wrap_server_factory_for_tls( + server_factory, + self.reactor, + tls_sanlist + or [ + b"DNS:testserv", + b"DNS:target-server", + b"DNS:xn--bcher-kva.com", + b"IP:1.2.3.4", + b"IP:::1", + ], + ) server_protocol = server_factory.buildProtocol(dummy_address) assert server_protocol is not None @@ -435,8 +442,16 @@ class MatrixFederationAgentTests(unittest.TestCase): request.finish() # now we make another test server to act as the upstream HTTP server. - server_ssl_protocol = _wrap_server_factory_for_tls( - _get_test_protocol_factory() + server_ssl_protocol = wrap_server_factory_for_tls( + _get_test_protocol_factory(), + self.reactor, + sanlist=[ + b"DNS:testserv", + b"DNS:target-server", + b"DNS:xn--bcher-kva.com", + b"IP:1.2.3.4", + b"IP:::1", + ], ).buildProtocol(dummy_address) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. @@ -1786,33 +1801,6 @@ def _check_logcontext(context: LoggingContextOrSentinel) -> None: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) -def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> TLSMemoryBIOFactory: - """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory - The resultant factory will create a TLS server which presents a certificate - signed by our test CA, valid for the domains in `sanlist` - Args: - factory: protocol factory to wrap - sanlist: list of domains the cert should be valid for - Returns: - interfaces.IProtocolFactory - """ - if sanlist is None: - sanlist = [ - b"DNS:testserv", - b"DNS:target-server", - b"DNS:xn--bcher-kva.com", - b"IP:1.2.3.4", - b"IP:::1", - ] - - connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) - return TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=factory - ) - - def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel Returns: diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 41dfd5dc17..1f117276cf 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -29,18 +29,14 @@ from twisted.internet.endpoints import ( ) from twisted.internet.interfaces import IProtocol, IProtocolFactory from twisted.internet.protocol import Factory, Protocol -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol +from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel from synapse.http.client import BlocklistingReactorWrapper from synapse.http.connectproxyclient import BasicProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy -from tests.http import ( - TestServerTLSConnectionFactory, - dummy_address, - get_test_https_policy, -) +from tests.http import dummy_address, get_test_https_policy, wrap_server_factory_for_tls from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase from tests.utils import checked_cast @@ -272,7 +268,9 @@ class MatrixFederationAgentTests(TestCase): the server Protocol returned by server_factory """ if ssl: - server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) + server_factory = wrap_server_factory_for_tls( + server_factory, self.reactor, tls_sanlist or [b"DNS:test.com"] + ) server_protocol = server_factory.buildProtocol(dummy_address) assert server_protocol is not None @@ -639,8 +637,8 @@ class MatrixFederationAgentTests(TestCase): request.finish() # now we make another test server to act as the upstream HTTP server. - server_ssl_protocol = _wrap_server_factory_for_tls( - _get_test_protocol_factory() + server_ssl_protocol = wrap_server_factory_for_tls( + _get_test_protocol_factory(), self.reactor, sanlist=[b"DNS:test.com"] ).buildProtocol(dummy_address) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. @@ -806,7 +804,9 @@ class MatrixFederationAgentTests(TestCase): request.finish() # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel - ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory()) + ssl_factory = wrap_server_factory_for_tls( + _get_test_protocol_factory(), self.reactor, sanlist=[b"DNS:test.com"] + ) ssl_protocol = ssl_factory.buildProtocol(dummy_address) assert isinstance(ssl_protocol, TLSMemoryBIOProtocol) http_server = ssl_protocol.wrappedProtocol @@ -870,30 +870,6 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(proxy_ep._wrappedEndpoint._port, 8888) -def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> TLSMemoryBIOFactory: - """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory - - The resultant factory will create a TLS server which presents a certificate - signed by our test CA, valid for the domains in `sanlist` - - Args: - factory: protocol factory to wrap - sanlist: list of domains the cert should be valid for - - Returns: - interfaces.IProtocolFactory - """ - if sanlist is None: - sanlist = [b"DNS:test.com"] - - connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist) - return TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=factory - ) - - def _get_test_protocol_factory() -> IProtocolFactory: """Get a protocol Factory which will build an HTTPChannel diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index b230a6c361..1e9994cc0b 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -15,9 +15,7 @@ import logging import os from typing import Any, Optional, Tuple -from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory -from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.test.proto_helpers import MemoryReactor from twisted.web.http import HTTPChannel from twisted.web.server import Request @@ -27,7 +25,11 @@ from synapse.rest.client import login from synapse.server import HomeServer from synapse.util import Clock -from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file +from tests.http import ( + TestServerTLSConnectionFactory, + get_test_ca_cert_file, + wrap_server_factory_for_tls, +) from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG @@ -94,7 +96,13 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): (host, port, client_factory, _timeout, _bindAddress) = clients.pop() # build the test server - server_tls_protocol = _build_test_server(get_connection_factory()) + server_factory = Factory.forProtocol(HTTPChannel) + # Request.finish expects the factory to have a 'log' method. + server_factory.log = _log_request + + server_tls_protocol = wrap_server_factory_for_tls( + server_factory, self.reactor, sanlist=[b"DNS:example.com"] + ).buildProtocol(None) # now, tell the client protocol factory to build the client protocol (it will be a # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an @@ -114,7 +122,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): ) # fish the test server back out of the server-side TLS protocol. - http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment] + http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # give the reactor a pump to get the TLS juices flowing. self.reactor.pump((0.1,)) @@ -240,40 +248,6 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): return sum(len(files) for _, _, files in os.walk(path)) -def get_connection_factory() -> TestServerTLSConnectionFactory: - # this needs to happen once, but not until we are ready to run the first test - global test_server_connection_factory - if test_server_connection_factory is None: - test_server_connection_factory = TestServerTLSConnectionFactory( - sanlist=[b"DNS:example.com"] - ) - return test_server_connection_factory - - -def _build_test_server( - connection_creator: IOpenSSLServerConnectionCreator, -) -> TLSMemoryBIOProtocol: - """Construct a test server - - This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol - - Args: - connection_creator: thing to build SSL connections - - Returns: - TLSMemoryBIOProtocol - """ - server_factory = Factory.forProtocol(HTTPChannel) - # Request.finish expects the factory to have a 'log' method. - server_factory.log = _log_request - - server_tls_factory = TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=server_factory - ) - - return server_tls_factory.buildProtocol(None) - - def _log_request(request: Request) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info("Completed request %s", request) diff --git a/tests/server.py b/tests/server.py index 08633fe640..cfb0fb823b 100644 --- a/tests/server.py +++ b/tests/server.py @@ -43,9 +43,11 @@ from typing import ( from unittest.mock import Mock import attr +from incremental import Version from typing_extensions import ParamSpec from zope.interface import implementer +import twisted from twisted.internet import address, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed @@ -474,6 +476,16 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): return fail(DNSLookupError("OH NO: unknown %s" % (name,))) return succeed(lookups[name]) + # In order for the TLS protocol tests to work, modify _get_default_clock + # on newer Twisted versions to use the test reactor's clock. + # + # This is *super* dirty since it is never undone and relies on the next + # test to overwrite it. + if twisted.version > Version("Twisted", 23, 8, 0): + from twisted.protocols import tls + + tls._get_default_clock = lambda: self # type: ignore[attr-defined] + self.nameResolver = SimpleResolverComplexifier(FakeResolver()) super().__init__() From ba47fea5286e084ec70d568aa62eb4820b857c47 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 25 Oct 2023 16:16:19 +0100 Subject: [PATCH 093/142] Allow multiple workers to write to receipts stream. (#16432) Fixes #16417 --- changelog.d/16432.feature | 1 + synapse/config/workers.py | 4 +- synapse/handlers/appservice.py | 42 +-- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/receipts.py | 19 +- synapse/handlers/sync.py | 7 +- synapse/notifier.py | 45 +++- synapse/replication/tcp/client.py | 3 +- synapse/storage/databases/main/receipts.py | 150 ++++++++--- synapse/storage/databases/main/relations.py | 4 +- .../83/03_instance_name_receipts.sql.sqlite | 17 ++ synapse/streams/events.py | 4 +- synapse/types/__init__.py | 137 +++++++++- tests/handlers/test_appservice.py | 17 +- tests/replication/test_sharded_receipts.py | 243 ++++++++++++++++++ 15 files changed, 605 insertions(+), 90 deletions(-) create mode 100644 changelog.d/16432.feature create mode 100644 synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite create mode 100644 tests/replication/test_sharded_receipts.py diff --git a/changelog.d/16432.feature b/changelog.d/16432.feature new file mode 100644 index 0000000000..9a76e85592 --- /dev/null +++ b/changelog.d/16432.feature @@ -0,0 +1 @@ +Allow multiple workers to write to receipts stream. diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f1766088fc..6d67a8cd5c 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -358,9 +358,9 @@ class WorkerConfig(Config): "Must only specify one instance to handle `account_data` messages." ) - if len(self.writers.receipts) != 1: + if len(self.writers.receipts) == 0: raise ConfigError( - "Must only specify one instance to handle `receipts` messages." + "Must specify at least one instance to handle `receipts` messages." ) if len(self.writers.events) == 0: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index c200a45f3a..873dadc3bd 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -47,6 +47,7 @@ from synapse.types import ( DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, RoomAlias, RoomStreamToken, StreamKeyType, @@ -217,7 +218,7 @@ class ApplicationServicesHandler: def notify_interested_services_ephemeral( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken], + new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], users: Collection[Union[str, UserID]], ) -> None: """ @@ -259,19 +260,6 @@ class ApplicationServicesHandler: ): return - # Assert that new_token is an integer (and not a RoomStreamToken). - # All of the supported streams that this function handles use an - # integer to track progress (rather than a RoomStreamToken - a - # vector clock implementation) as they don't support multiple - # stream writers. - # - # As a result, we simply assert that new_token is an integer. - # If we do end up needing to pass a RoomStreamToken down here - # in the future, using RoomStreamToken.stream (the minimum stream - # position) to convert to an ascending integer value should work. - # Additional context: https://github.com/matrix-org/synapse/pull/11137 - assert isinstance(new_token, int) - # Ignore to-device messages if the feature flag is not enabled if ( stream_key == StreamKeyType.TO_DEVICE @@ -286,6 +274,9 @@ class ApplicationServicesHandler: ): return + # We know we're not a `RoomStreamToken` at this point. + assert not isinstance(new_token, RoomStreamToken) + # Check whether there are any appservices which have registered to receive # ephemeral events. # @@ -327,7 +318,7 @@ class ApplicationServicesHandler: self, services: List[ApplicationService], stream_key: StreamKeyType, - new_token: int, + new_token: Union[int, MultiWriterStreamToken], users: Collection[Union[str, UserID]], ) -> None: logger.debug("Checking interested services for %s", stream_key) @@ -340,6 +331,7 @@ class ApplicationServicesHandler: # # Instead we simply grab the latest typing updates in _handle_typing # and, if they apply to this application service, send it off. + assert isinstance(new_token, int) events = await self._handle_typing(service, new_token) if events: self.scheduler.enqueue_for_appservice(service, ephemeral=events) @@ -350,15 +342,23 @@ class ApplicationServicesHandler: (service.id, stream_key) ): if stream_key == StreamKeyType.RECEIPT: + assert isinstance(new_token, MultiWriterStreamToken) + + # We store appservice tokens as integers, so we ignore + # the `instance_map` components and instead simply + # follow the base stream position. + new_token = MultiWriterStreamToken(stream=new_token.stream) + events = await self._handle_receipts(service, new_token) self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice await self.store.set_appservice_stream_type_pos( - service, "read_receipt", new_token + service, "read_receipt", new_token.stream ) elif stream_key == StreamKeyType.PRESENCE: + assert isinstance(new_token, int) events = await self._handle_presence(service, users, new_token) self.scheduler.enqueue_for_appservice(service, ephemeral=events) @@ -368,6 +368,7 @@ class ApplicationServicesHandler: ) elif stream_key == StreamKeyType.TO_DEVICE: + assert isinstance(new_token, int) # Retrieve a list of to-device message events, as well as the # maximum stream token of the messages we were able to retrieve. to_device_messages = await self._get_to_device_messages( @@ -383,6 +384,7 @@ class ApplicationServicesHandler: ) elif stream_key == StreamKeyType.DEVICE_LIST: + assert isinstance(new_token, int) device_list_summary = await self._get_device_list_summary( service, new_token ) @@ -432,7 +434,7 @@ class ApplicationServicesHandler: return typing async def _handle_receipts( - self, service: ApplicationService, new_token: int + self, service: ApplicationService, new_token: MultiWriterStreamToken ) -> List[JsonMapping]: """ Return the latest read receipts that the given application service should receive. @@ -455,15 +457,17 @@ class ApplicationServicesHandler: from_key = await self.store.get_type_stream_id_for_appservice( service, "read_receipt" ) - if new_token is not None and new_token <= from_key: + if new_token is not None and new_token.stream <= from_key: logger.debug( "Rejecting token lower than or equal to stored: %s" % (new_token,) ) return [] + from_token = MultiWriterStreamToken(stream=from_key) + receipts_source = self.event_sources.sources.receipt receipts, _ = await receipts_source.get_new_events_as( - service=service, from_key=from_key, to_key=new_token + service=service, from_key=from_token, to_key=new_token ) return receipts diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index c34bd7db95..b1d8be866f 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -145,7 +145,7 @@ class InitialSyncHandler: joined_rooms = [r.room_id for r in room_list if r.membership == Membership.JOIN] receipt = await self.store.get_linearized_receipts_for_rooms( joined_rooms, - to_key=int(now_token.receipt_key), + to_key=now_token.receipt_key, ) receipt = ReceiptEventSource.filter_out_private_receipts(receipt, user_id) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 69ac468f75..b5f7a8b47e 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -20,6 +20,7 @@ from synapse.streams import EventSource from synapse.types import ( JsonDict, JsonMapping, + MultiWriterStreamToken, ReadReceipt, StreamKeyType, UserID, @@ -200,7 +201,7 @@ class ReceiptsHandler: await self.federation_sender.send_read_receipt(receipt) -class ReceiptEventSource(EventSource[int, JsonMapping]): +class ReceiptEventSource(EventSource[MultiWriterStreamToken, JsonMapping]): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.config = hs.config @@ -273,13 +274,12 @@ class ReceiptEventSource(EventSource[int, JsonMapping]): async def get_new_events( self, user: UserID, - from_key: int, + from_key: MultiWriterStreamToken, limit: int, room_ids: Iterable[str], is_guest: bool, explicit_room_id: Optional[str] = None, - ) -> Tuple[List[JsonMapping], int]: - from_key = int(from_key) + ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: to_key = self.get_current_key() if from_key == to_key: @@ -296,8 +296,11 @@ class ReceiptEventSource(EventSource[int, JsonMapping]): return events, to_key async def get_new_events_as( - self, from_key: int, to_key: int, service: ApplicationService - ) -> Tuple[List[JsonMapping], int]: + self, + from_key: MultiWriterStreamToken, + to_key: MultiWriterStreamToken, + service: ApplicationService, + ) -> Tuple[List[JsonMapping], MultiWriterStreamToken]: """Returns a set of new read receipt events that an appservice may be interested in. @@ -312,8 +315,6 @@ class ReceiptEventSource(EventSource[int, JsonMapping]): appservice may be interested in. * The current read receipt stream token. """ - from_key = int(from_key) - if from_key == to_key: return [], to_key @@ -333,5 +334,5 @@ class ReceiptEventSource(EventSource[int, JsonMapping]): return events, to_key - def get_current_key(self) -> int: + def get_current_key(self) -> MultiWriterStreamToken: return self.store.get_max_receipt_stream_id() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f131c0e8e0..f75c1548ca 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -57,6 +57,7 @@ from synapse.types import ( DeviceListUpdates, JsonDict, JsonMapping, + MultiWriterStreamToken, MutableStateMap, Requester, RoomStreamToken, @@ -477,7 +478,11 @@ class SyncHandler: event_copy = {k: v for (k, v) in event.items() if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - receipt_key = since_token.receipt_key if since_token else 0 + receipt_key = ( + since_token.receipt_key + if since_token + else MultiWriterStreamToken(stream=0) + ) receipt_source = self.event_sources.sources.receipt receipts, receipt_key = await receipt_source.get_new_events( diff --git a/synapse/notifier.py b/synapse/notifier.py index 99e7715896..ee0bd84f1e 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -21,11 +21,13 @@ from typing import ( Dict, Iterable, List, + Literal, Optional, Set, Tuple, TypeVar, Union, + overload, ) import attr @@ -44,6 +46,7 @@ from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( JsonDict, + MultiWriterStreamToken, PersistedEventPosition, RoomStreamToken, StrCollection, @@ -127,7 +130,7 @@ class _NotifierUserStream: def notify( self, stream_key: StreamKeyType, - stream_id: Union[int, RoomStreamToken], + stream_id: Union[int, RoomStreamToken, MultiWriterStreamToken], time_now_ms: int, ) -> None: """Notify any listeners for this user of a new event from an @@ -452,10 +455,48 @@ class Notifier: except Exception: logger.exception("Error pusher pool of event") + @overload + def on_new_event( + self, + stream_key: Literal[StreamKeyType.ROOM], + new_token: RoomStreamToken, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + + @overload + def on_new_event( + self, + stream_key: Literal[StreamKeyType.RECEIPT], + new_token: MultiWriterStreamToken, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + + @overload + def on_new_event( + self, + stream_key: Literal[ + StreamKeyType.ACCOUNT_DATA, + StreamKeyType.DEVICE_LIST, + StreamKeyType.PRESENCE, + StreamKeyType.PUSH_RULES, + StreamKeyType.TO_DEVICE, + StreamKeyType.TYPING, + StreamKeyType.UN_PARTIAL_STATED_ROOMS, + ], + new_token: int, + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[StrCollection] = None, + ) -> None: + ... + def on_new_event( self, stream_key: StreamKeyType, - new_token: Union[int, RoomStreamToken], + new_token: Union[int, RoomStreamToken, MultiWriterStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[StrCollection] = None, ) -> None: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 384355698d..1312b6f21e 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -126,8 +126,9 @@ class ReplicationDataHandler: StreamKeyType.ACCOUNT_DATA, token, users=[row.user_id for row in rows] ) elif stream_name == ReceiptsStream.NAME: + new_token = self.store.get_max_receipt_stream_id() self.notifier.on_new_event( - StreamKeyType.RECEIPT, token, rooms=[row.room_id for row in rows] + StreamKeyType.RECEIPT, new_token, rooms=[row.room_id for row in rows] ) await self._pusher_pool.on_new_receipts({row.user_id for row in rows}) elif stream_name == ToDeviceStream.NAME: diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index b2645ab43c..56e8eb16a8 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -28,6 +28,8 @@ from typing import ( cast, ) +from immutabledict import immutabledict + from synapse.api.constants import EduTypes from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -43,7 +45,12 @@ from synapse.storage.util.id_generators import ( MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict, JsonMapping +from synapse.types import ( + JsonDict, + JsonMapping, + MultiWriterStreamToken, + PersistedPosition, +) from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -105,7 +112,7 @@ class ReceiptsWorkerStore(SQLBaseStore): "receipts_linearized", entity_column="room_id", stream_column="stream_id", - max_value=max_receipts_stream_id, + max_value=max_receipts_stream_id.stream, limit=10000, ) self._receipts_stream_cache = StreamChangeCache( @@ -114,9 +121,31 @@ class ReceiptsWorkerStore(SQLBaseStore): prefilled_cache=receipts_stream_prefill, ) - def get_max_receipt_stream_id(self) -> int: + def get_max_receipt_stream_id(self) -> MultiWriterStreamToken: """Get the current max stream ID for receipts stream""" - return self._receipts_id_gen.get_current_token() + + min_pos = self._receipts_id_gen.get_current_token() + + positions = {} + if isinstance(self._receipts_id_gen, MultiWriterIdGenerator): + # The `min_pos` is the minimum position that we know all instances + # have finished persisting to, so we only care about instances whose + # positions are ahead of that. (Instance positions can be behind the + # min position as there are times we can work out that the minimum + # position is ahead of the naive minimum across all current + # positions. See MultiWriterIdGenerator for details) + positions = { + i: p + for i, p in self._receipts_id_gen.get_positions().items() + if p > min_pos + } + + return MultiWriterStreamToken( + stream=min_pos, instance_map=immutabledict(positions) + ) + + def get_receipt_stream_id_for_instance(self, instance_name: str) -> int: + return self._receipts_id_gen.get_current_token_for_writer(instance_name) def get_last_unthreaded_receipt_for_user_txn( self, @@ -257,7 +286,10 @@ class ReceiptsWorkerStore(SQLBaseStore): } async def get_linearized_receipts_for_rooms( - self, room_ids: Iterable[str], to_key: int, from_key: Optional[int] = None + self, + room_ids: Iterable[str], + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> List[JsonMapping]: """Get receipts for multiple rooms for sending to clients. @@ -276,7 +308,7 @@ class ReceiptsWorkerStore(SQLBaseStore): # Only ask the database about rooms where there have been new # receipts added since `from_key` room_ids = self._receipts_stream_cache.get_entities_changed( - room_ids, from_key + room_ids, from_key.stream ) results = await self._get_linearized_receipts_for_rooms( @@ -286,7 +318,10 @@ class ReceiptsWorkerStore(SQLBaseStore): return [ev for res in results.values() for ev in res] async def get_linearized_receipts_for_room( - self, room_id: str, to_key: int, from_key: Optional[int] = None + self, + room_id: str, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Sequence[JsonMapping]: """Get receipts for a single room for sending to clients. @@ -302,36 +337,49 @@ class ReceiptsWorkerStore(SQLBaseStore): if from_key is not None: # Check the cache first to see if any new receipts have been added # since`from_key`. If not we can no-op. - if not self._receipts_stream_cache.has_entity_changed(room_id, from_key): + if not self._receipts_stream_cache.has_entity_changed( + room_id, from_key.stream + ): return [] return await self._get_linearized_receipts_for_room(room_id, to_key, from_key) @cached(tree=True) async def _get_linearized_receipts_for_room( - self, room_id: str, to_key: int, from_key: Optional[int] = None + self, + room_id: str, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Sequence[JsonMapping]: """See get_linearized_receipts_for_room""" def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str]]: if from_key: - sql = ( - "SELECT receipt_type, user_id, event_id, data" - " FROM receipts_linearized WHERE" - " room_id = ? AND stream_id > ? AND stream_id <= ?" - ) + sql = """ + SELECT stream_id, instance_name, receipt_type, user_id, event_id, data + FROM receipts_linearized + WHERE room_id = ? AND stream_id > ? AND stream_id <= ? + """ - txn.execute(sql, (room_id, from_key, to_key)) + txn.execute( + sql, (room_id, from_key.stream, to_key.get_max_stream_pos()) + ) else: - sql = ( - "SELECT receipt_type, user_id, event_id, data" - " FROM receipts_linearized WHERE" - " room_id = ? AND stream_id <= ?" + sql = """ + SELECT stream_id, instance_name, receipt_type, user_id, event_id, data + FROM receipts_linearized WHERE + room_id = ? AND stream_id <= ? + """ + + txn.execute(sql, (room_id, to_key.get_max_stream_pos())) + + return [ + (receipt_type, user_id, event_id, data) + for stream_id, instance_name, receipt_type, user_id, event_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id ) - - txn.execute(sql, (room_id, to_key)) - - return cast(List[Tuple[str, str, str, str]], txn.fetchall()) + ] rows = await self.db_pool.runInteraction("get_linearized_receipts_for_room", f) @@ -352,7 +400,10 @@ class ReceiptsWorkerStore(SQLBaseStore): num_args=3, ) async def _get_linearized_receipts_for_rooms( - self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None + self, + room_ids: Collection[str], + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Mapping[str, Sequence[JsonMapping]]: if not room_ids: return {} @@ -362,7 +413,8 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> List[Tuple[str, str, str, str, Optional[str], str]]: if from_key: sql = """ - SELECT room_id, receipt_type, user_id, event_id, thread_id, data + SELECT stream_id, instance_name, room_id, receipt_type, + user_id, event_id, thread_id, data FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? AND """ @@ -370,10 +422,14 @@ class ReceiptsWorkerStore(SQLBaseStore): self.database_engine, "room_id", room_ids ) - txn.execute(sql + clause, [from_key, to_key] + list(args)) + txn.execute( + sql + clause, + [from_key.stream, to_key.get_max_stream_pos()] + list(args), + ) else: sql = """ - SELECT room_id, receipt_type, user_id, event_id, thread_id, data + SELECT stream_id, instance_name, room_id, receipt_type, + user_id, event_id, thread_id, data FROM receipts_linearized WHERE stream_id <= ? AND """ @@ -382,11 +438,15 @@ class ReceiptsWorkerStore(SQLBaseStore): self.database_engine, "room_id", room_ids ) - txn.execute(sql + clause, [to_key] + list(args)) + txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args)) - return cast( - List[Tuple[str, str, str, str, Optional[str], str]], txn.fetchall() - ) + return [ + (room_id, receipt_type, user_id, event_id, thread_id, data) + for stream_id, instance_name, room_id, receipt_type, user_id, event_id, thread_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id + ) + ] txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f @@ -420,7 +480,9 @@ class ReceiptsWorkerStore(SQLBaseStore): num_args=2, ) async def get_linearized_receipts_for_all_rooms( - self, to_key: int, from_key: Optional[int] = None + self, + to_key: MultiWriterStreamToken, + from_key: Optional[MultiWriterStreamToken] = None, ) -> Mapping[str, JsonMapping]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. @@ -437,25 +499,31 @@ class ReceiptsWorkerStore(SQLBaseStore): def f(txn: LoggingTransaction) -> List[Tuple[str, str, str, str, str]]: if from_key: sql = """ - SELECT room_id, receipt_type, user_id, event_id, data + SELECT stream_id, instance_name, room_id, receipt_type, user_id, event_id, data FROM receipts_linearized WHERE stream_id > ? AND stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ - txn.execute(sql, [from_key, to_key]) + txn.execute(sql, [from_key.stream, to_key.get_max_stream_pos()]) else: sql = """ - SELECT room_id, receipt_type, user_id, event_id, data + SELECT stream_id, instance_name, room_id, receipt_type, user_id, event_id, data FROM receipts_linearized WHERE stream_id <= ? ORDER BY stream_id DESC LIMIT 100 """ - txn.execute(sql, [to_key]) + txn.execute(sql, [to_key.get_max_stream_pos()]) - return cast(List[Tuple[str, str, str, str, str]], txn.fetchall()) + return [ + (room_id, receipt_type, user_id, event_id, data) + for stream_id, instance_name, room_id, receipt_type, user_id, event_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + from_key, to_key, instance_name, stream_id + ) + ] txn_results = await self.db_pool.runInteraction( "get_linearized_receipts_for_all_rooms", f @@ -545,10 +613,11 @@ class ReceiptsWorkerStore(SQLBaseStore): SELECT stream_id, room_id, receipt_type, user_id, event_id, thread_id, data FROM receipts_linearized WHERE ? < stream_id AND stream_id <= ? + AND instance_name = ? ORDER BY stream_id ASC LIMIT ? """ - txn.execute(sql, (last_id, current_id, limit)) + txn.execute(sql, (last_id, current_id, instance_name, limit)) updates = cast( List[Tuple[int, Tuple[str, str, str, str, Optional[str], JsonDict]]], @@ -695,6 +764,7 @@ class ReceiptsWorkerStore(SQLBaseStore): keyvalues=keyvalues, values={ "stream_id": stream_id, + "instance_name": self._instance_name, "event_id": event_id, "event_stream_ordering": stream_ordering, "data": json_encoder.encode(data), @@ -750,7 +820,7 @@ class ReceiptsWorkerStore(SQLBaseStore): event_ids: List[str], thread_id: Optional[str], data: dict, - ) -> Optional[int]: + ) -> Optional[PersistedPosition]: """Insert a receipt, either from local client or remote server. Automatically does conversion between linearized and graph @@ -812,7 +882,7 @@ class ReceiptsWorkerStore(SQLBaseStore): data, ) - return stream_id + return PersistedPosition(self._instance_name, stream_id) async def _insert_graph_receipt( self, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 7f40e2c446..ce7bfd5146 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -47,7 +47,7 @@ from synapse.storage.databases.main.stream import ( generate_pagination_where_clause, ) from synapse.storage.engines import PostgresEngine -from synapse.types import JsonDict, StreamKeyType, StreamToken +from synapse.types import JsonDict, MultiWriterStreamToken, StreamKeyType, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: @@ -314,7 +314,7 @@ class RelationsWorkerStore(SQLBaseStore): room_key=next_key, presence_key=0, typing_key=0, - receipt_key=0, + receipt_key=MultiWriterStreamToken(stream=0), account_data_key=0, push_rules_key=0, to_device_key=0, diff --git a/synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite b/synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite new file mode 100644 index 0000000000..6c7ad0fd37 --- /dev/null +++ b/synapse/storage/schema/main/delta/83/03_instance_name_receipts.sql.sqlite @@ -0,0 +1,17 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This already exists on Postgres. +ALTER TABLE receipts_linearized ADD COLUMN instance_name TEXT; diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 609a0978a9..d0bb83b184 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -23,7 +23,7 @@ from synapse.handlers.room import RoomEventSource from synapse.handlers.typing import TypingNotificationEventSource from synapse.logging.opentracing import trace from synapse.streams import EventSource -from synapse.types import StreamKeyType, StreamToken +from synapse.types import MultiWriterStreamToken, StreamKeyType, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer @@ -111,7 +111,7 @@ class EventSources: room_key=await self.sources.room.get_current_key_for_room(room_id), presence_key=0, typing_key=0, - receipt_key=0, + receipt_key=MultiWriterStreamToken(stream=0), account_data_key=0, push_rules_key=0, to_device_key=0, diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 09a88c86a7..4c5b26ad93 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -695,6 +695,90 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): return "s%d" % (self.stream,) +@attr.s(frozen=True, slots=True, order=False) +class MultiWriterStreamToken(AbstractMultiWriterStreamToken): + """A basic stream token class for streams that supports multiple writers.""" + + @classmethod + async def parse(cls, store: "DataStore", string: str) -> "MultiWriterStreamToken": + try: + if string[0].isdigit(): + return cls(stream=int(string)) + if string[0] == "m": + parts = string[1:].split("~") + stream = int(parts[0]) + + instance_map = {} + for part in parts[1:]: + key, value = part.split(".") + instance_id = int(key) + pos = int(value) + + instance_name = await store.get_name_from_instance_id(instance_id) + instance_map[instance_name] = pos + + return cls( + stream=stream, + instance_map=immutabledict(instance_map), + ) + except CancelledError: + raise + except Exception: + pass + raise SynapseError(400, "Invalid stream token %r" % (string,)) + + async def to_string(self, store: "DataStore") -> str: + if self.instance_map: + entries = [] + for name, pos in self.instance_map.items(): + if pos <= self.stream: + # Ignore instances who are below the minimum stream position + # (we might know they've advanced without seeing a recent + # write from them). + continue + + instance_id = await store.get_id_for_instance(name) + entries.append(f"{instance_id}.{pos}") + + encoded_map = "~".join(entries) + return f"m{self.stream}~{encoded_map}" + else: + return str(self.stream) + + @staticmethod + def is_stream_position_in_range( + low: Optional["AbstractMultiWriterStreamToken"], + high: Optional["AbstractMultiWriterStreamToken"], + instance_name: Optional[str], + pos: int, + ) -> bool: + """Checks if a given persisted position is between the two given tokens. + + If `instance_name` is None then the row was persisted before multi + writer support. + """ + + if low: + if instance_name: + low_stream = low.instance_map.get(instance_name, low.stream) + else: + low_stream = low.stream + + if pos <= low_stream: + return False + + if high: + if instance_name: + high_stream = high.instance_map.get(instance_name, high.stream) + else: + high_stream = high.stream + + if high_stream < pos: + return False + + return True + + class StreamKeyType(Enum): """Known stream types. @@ -776,7 +860,9 @@ class StreamToken: ) presence_key: int typing_key: int - receipt_key: int + receipt_key: MultiWriterStreamToken = attr.ib( + validator=attr.validators.instance_of(MultiWriterStreamToken) + ) account_data_key: int push_rules_key: int to_device_key: int @@ -799,8 +885,31 @@ class StreamToken: while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") + + ( + room_key, + presence_key, + typing_key, + receipt_key, + account_data_key, + push_rules_key, + to_device_key, + device_list_key, + groups_key, + un_partial_stated_rooms_key, + ) = keys + return cls( - await RoomStreamToken.parse(store, keys[0]), *(int(k) for k in keys[1:]) + room_key=await RoomStreamToken.parse(store, room_key), + presence_key=int(presence_key), + typing_key=int(typing_key), + receipt_key=await MultiWriterStreamToken.parse(store, receipt_key), + account_data_key=int(account_data_key), + push_rules_key=int(push_rules_key), + to_device_key=int(to_device_key), + device_list_key=int(device_list_key), + groups_key=int(groups_key), + un_partial_stated_rooms_key=int(un_partial_stated_rooms_key), ) except CancelledError: raise @@ -813,7 +922,7 @@ class StreamToken: await self.room_key.to_string(store), str(self.presence_key), str(self.typing_key), - str(self.receipt_key), + await self.receipt_key.to_string(store), str(self.account_data_key), str(self.push_rules_key), str(self.to_device_key), @@ -841,6 +950,11 @@ class StreamToken: StreamKeyType.ROOM, self.room_key.copy_and_advance(new_value) ) return new_token + elif key == StreamKeyType.RECEIPT: + new_token = self.copy_and_replace( + StreamKeyType.RECEIPT, self.receipt_key.copy_and_advance(new_value) + ) + return new_token new_token = self.copy_and_replace(key, new_value) new_id = new_token.get_field(key) @@ -858,6 +972,10 @@ class StreamToken: def get_field(self, key: Literal[StreamKeyType.ROOM]) -> RoomStreamToken: ... + @overload + def get_field(self, key: Literal[StreamKeyType.RECEIPT]) -> MultiWriterStreamToken: + ... + @overload def get_field( self, @@ -866,7 +984,6 @@ class StreamToken: StreamKeyType.DEVICE_LIST, StreamKeyType.PRESENCE, StreamKeyType.PUSH_RULES, - StreamKeyType.RECEIPT, StreamKeyType.TO_DEVICE, StreamKeyType.TYPING, StreamKeyType.UN_PARTIAL_STATED_ROOMS, @@ -875,15 +992,21 @@ class StreamToken: ... @overload - def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + def get_field( + self, key: StreamKeyType + ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: ... - def get_field(self, key: StreamKeyType) -> Union[int, RoomStreamToken]: + def get_field( + self, key: StreamKeyType + ) -> Union[int, RoomStreamToken, MultiWriterStreamToken]: """Returns the stream ID for the given key.""" return getattr(self, key.value) -StreamToken.START = StreamToken(RoomStreamToken(stream=0), 0, 0, 0, 0, 0, 0, 0, 0, 0) +StreamToken.START = StreamToken( + RoomStreamToken(stream=0), 0, 0, MultiWriterStreamToken(stream=0), 0, 0, 0, 0, 0, 0 +) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index c888d1ff01..78646cb5dc 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -31,7 +31,12 @@ from synapse.appservice import ( from synapse.handlers.appservice import ApplicationServicesHandler from synapse.rest.client import login, receipts, register, room, sendtodevice from synapse.server import HomeServer -from synapse.types import JsonDict, RoomStreamToken, StreamKeyType +from synapse.types import ( + JsonDict, + MultiWriterStreamToken, + RoomStreamToken, + StreamKeyType, +) from synapse.util import Clock from synapse.util.stringutils import random_string @@ -305,7 +310,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, + MultiWriterStreamToken(stream=580), + ["@fakerecipient:example.com"], ) self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( interested_service, ephemeral=[event] @@ -333,7 +340,9 @@ class AppServiceHandlerTestCase(unittest.TestCase): ) self.handler.notify_interested_services_ephemeral( - StreamKeyType.RECEIPT, 580, ["@fakerecipient:example.com"] + StreamKeyType.RECEIPT, + MultiWriterStreamToken(stream=580), + ["@fakerecipient:example.com"], ) # This method will be called, but with an empty list of events self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( @@ -636,7 +645,7 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): self.hs.get_application_service_handler()._notify_interested_services_ephemeral( services=[interested_appservice], stream_key=StreamKeyType.RECEIPT, - new_token=stream_token, + new_token=MultiWriterStreamToken(stream=stream_token), users=[self.exclusive_as_user], ) ) diff --git a/tests/replication/test_sharded_receipts.py b/tests/replication/test_sharded_receipts.py new file mode 100644 index 0000000000..41876b36de --- /dev/null +++ b/tests/replication/test_sharded_receipts.py @@ -0,0 +1,243 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import ReceiptTypes +from synapse.rest import admin +from synapse.rest.client import login, receipts, room, sync +from synapse.server import HomeServer +from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.types import StreamToken +from synapse.util import Clock + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.server import make_request + +logger = logging.getLogger(__name__) + + +class ReceiptsShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks receipts sharding works""" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # Register a user who sends a message that we'll get notified about + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + self.room_creator = self.hs.get_room_creation_handler() + self.store = hs.get_datastores().main + + def default_config(self) -> dict: + conf = super().default_config() + conf["stream_writers"] = {"receipts": ["worker1", "worker2"]} + conf["instance_map"] = { + "main": {"host": "testserv", "port": 8765}, + "worker1": {"host": "testserv", "port": 1001}, + "worker2": {"host": "testserv", "port": 1002}, + } + return conf + + def test_basic(self) -> None: + """Simple test to ensure that receipts can be sent on multiple + workers. + """ + + worker1 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker1"}, + ) + worker1_site = self._hs_to_site[worker1] + + worker2 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker2"}, + ) + worker2_site = self._hs_to_site[worker2] + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Create a room + room_id = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room_id, user=self.other_user_id, tok=self.other_access_token + ) + + # First user sends a message, the other users sends a receipt. + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + event_id = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker1_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + # Now we do it again using the second worker + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + event_id = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker2_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + def test_vector_clock_token(self) -> None: + """Tests that using a stream token with a vector clock component works + correctly with basic /sync usage. + """ + + worker_hs1 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker1"}, + ) + worker1_site = self._hs_to_site[worker_hs1] + + worker_hs2 = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "worker2"}, + ) + worker2_site = self._hs_to_site[worker_hs2] + + sync_hs = self.make_worker_hs( + "synapse.app.generic_worker", + {"worker_name": "sync"}, + ) + sync_hs_site = self._hs_to_site[sync_hs] + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + store = self.hs.get_datastores().main + + room_id = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room_id, user=self.other_user_id, tok=self.other_access_token + ) + + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + first_event = response["event_id"] + + # Do an initial sync so that we're up to date. + channel = make_request( + self.reactor, sync_hs_site, "GET", "/sync", access_token=access_token + ) + next_batch = channel.json_body["next_batch"] + + # We now gut wrench into the events stream MultiWriterIdGenerator on + # worker2 to mimic it getting stuck persisting a receipt. This ensures + # that when we send an event on worker1 we end up in a state where + # worker2 events stream position lags that on worker1, resulting in a + # receipts token with a non-empty instance map component. + # + # Worker2's receipts stream position will not advance until we call + # __aexit__ again. + worker_store2 = worker_hs2.get_datastores().main + assert isinstance(worker_store2._receipts_id_gen, MultiWriterIdGenerator) + + actx = worker_store2._receipts_id_gen.get_next() + self.get_success(actx.__aenter__()) + + channel = make_request( + reactor=self.reactor, + site=worker1_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{first_event}", + access_token=access_token, + content={}, + ) + self.assertEqual(200, channel.code) + + # Assert that the current stream token has an instance map component, as + # we are trying to test vector clock tokens. + receipts_token = store.get_max_receipt_stream_id() + self.assertGreater(len(receipts_token.instance_map), 0) + + # Check that syncing still gets the new receipt, despite the gap in the + # stream IDs. + channel = make_request( + self.reactor, + sync_hs_site, + "GET", + f"/sync?since={next_batch}", + access_token=access_token, + ) + + # We should only see the new event and nothing else + self.assertIn(room_id, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id]["ephemeral"]["events"] + self.assertEqual(len(events), 1) + self.assertIn(first_event, events[0]["content"]) + + # Get the next batch and makes sure its a vector clock style token. + vector_clock_token = channel.json_body["next_batch"] + parsed_token = self.get_success( + StreamToken.from_string(store, vector_clock_token) + ) + self.assertGreaterEqual(len(parsed_token.receipt_key.instance_map), 1) + + # Now that we've got a vector clock token we finish the fake persisting + # a receipt we started above. + self.get_success(actx.__aexit__(None, None, None)) + + # Now try and send another receipts to the other worker. + response = self.helper.send(room_id, body="Hi!", tok=self.other_access_token) + second_event = response["event_id"] + + channel = make_request( + reactor=self.reactor, + site=worker2_site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{second_event}", + access_token=access_token, + content={}, + ) + + channel = make_request( + self.reactor, + sync_hs_site, + "GET", + f"/sync?since={vector_clock_token}", + access_token=access_token, + ) + + self.assertIn(room_id, channel.json_body["rooms"]["join"]) + + events = channel.json_body["rooms"]["join"][room_id]["ephemeral"]["events"] + self.assertEqual(len(events), 1) + self.assertIn(second_event, events[0]["content"]) From c14a7de6af0b8a3cbf2e17afca1cab339bc5912d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Oct 2023 16:31:15 +0100 Subject: [PATCH 094/142] Pin the recommended poetry version in contributors' guide (#16550) --- changelog.d/16550.doc | 1 + docs/development/contributing_guide.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16550.doc diff --git a/changelog.d/16550.doc b/changelog.d/16550.doc new file mode 100644 index 0000000000..77ba422a06 --- /dev/null +++ b/changelog.d/16550.doc @@ -0,0 +1 @@ +Pin the recommended poetry version in contributors' guide. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 4ae2fcfee3..2efb4099e5 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -66,7 +66,7 @@ Of their installation methods, we recommend ```shell pip install --user pipx -pipx install poetry +pipx install poetry==1.5.2 # Problems with Poetry 1.6, see https://github.com/matrix-org/synapse/issues/16147 ``` but see poetry's [installation instructions](https://python-poetry.org/docs/#installation) From 9407d5ba78d1e5275b5817ae9e6aedf7d1ca14f7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Oct 2023 13:01:36 -0400 Subject: [PATCH 095/142] Convert simple_select_list and simple_select_list_txn to return lists of tuples (#16505) This should use fewer allocations and improves type hints. --- changelog.d/16505.misc | 1 + synapse/handlers/deactivate_account.py | 4 +- synapse/handlers/sso.py | 5 +- synapse/storage/database.py | 31 +-- .../storage/databases/main/account_data.py | 18 +- synapse/storage/databases/main/appservice.py | 13 +- synapse/storage/databases/main/client_ips.py | 25 +- synapse/storage/databases/main/devices.py | 70 ++--- .../storage/databases/main/e2e_room_keys.py | 49 ++-- .../databases/main/event_federation.py | 18 +- .../databases/main/experimental_features.py | 15 +- synapse/storage/databases/main/keys.py | 35 +-- .../databases/main/media_repository.py | 58 ++-- synapse/storage/databases/main/push_rule.py | 52 ++-- synapse/storage/databases/main/pusher.py | 20 +- .../storage/databases/main/registration.py | 58 ++-- synapse/storage/databases/main/relations.py | 15 +- synapse/storage/databases/main/room.py | 34 +-- synapse/storage/databases/main/roommember.py | 15 +- synapse/storage/databases/main/tags.py | 28 +- synapse/storage/databases/main/ui_auth.py | 32 ++- synapse/storage/databases/state/store.py | 18 +- tests/handlers/test_stats.py | 14 +- tests/storage/databases/main/test_receipts.py | 20 +- tests/storage/test__base.py | 16 +- tests/storage/test_background_update.py | 35 ++- tests/storage/test_base.py | 4 +- tests/storage/test_client_ips.py | 250 +++++++++--------- tests/storage/test_roommember.py | 40 +-- tests/storage/test_state.py | 60 +++-- tests/storage/test_user_directory.py | 61 +++-- 31 files changed, 607 insertions(+), 507 deletions(-) create mode 100644 changelog.d/16505.misc diff --git a/changelog.d/16505.misc b/changelog.d/16505.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16505.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 6a8f8f2fd1..370f4041fb 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -103,10 +103,10 @@ class DeactivateAccountHandler: # Attempt to unbind any known bound threepids to this account from identity # server(s). bound_threepids = await self.store.user_get_bound_threepids(user_id) - for threepid in bound_threepids: + for medium, address in bound_threepids: try: result = await self._identity_handler.try_unbind_threepid( - user_id, threepid["medium"], threepid["address"], id_server + user_id, medium, address, id_server ) except Exception: # Do we want this to be a fatal error or should we carry on? diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index e9a544e754..62f2454f5d 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -1206,10 +1206,7 @@ class SsoHandler: # We have no guarantee that all the devices of that session are for the same # `user_id`. Hence, we have to iterate over the list of devices and log them out # one by one. - for device in devices: - user_id = device["user_id"] - device_id = device["device_id"] - + for user_id, device_id in devices: # If the user_id associated with that device/session is not the one we got # out of the `sub` claim, skip that device and show log an error. if expected_user_id is not None and user_id != expected_user_id: diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 81f661160c..774d5c12f0 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -606,13 +606,16 @@ class DatabasePool: If the background updates have not completed, wait 15 sec and check again. """ - updates = await self.simple_select_list( - "background_updates", - keyvalues=None, - retcols=["update_name"], - desc="check_background_updates", + updates = cast( + List[Tuple[str]], + await self.simple_select_list( + "background_updates", + keyvalues=None, + retcols=["update_name"], + desc="check_background_updates", + ), ) - background_update_names = [x["update_name"] for x in updates] + background_update_names = [x[0] for x in updates] for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items(): if update_name not in background_update_names: @@ -1804,9 +1807,9 @@ class DatabasePool: keyvalues: Optional[Dict[str, Any]], retcols: Collection[str], desc: str = "simple_select_list", - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows, returning the result as a list of tuples. Args: table: the table name @@ -1817,8 +1820,7 @@ class DatabasePool: desc: description of the transaction, for logging and metrics Returns: - A list of dictionaries, one per result row, each a mapping between the - column names from `retcols` and that column's value for the row. + A list of tuples, one per result row, each the retcolumn's value for the row. """ return await self.runInteraction( desc, @@ -1836,9 +1838,9 @@ class DatabasePool: table: str, keyvalues: Optional[Dict[str, Any]], retcols: Iterable[str], - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[Any, ...]]: """Executes a SELECT query on the named table, which may return zero or - more rows, returning the result as a list of dicts. + more rows, returning the result as a list of tuples. Args: txn: Transaction object @@ -1849,8 +1851,7 @@ class DatabasePool: retcols: the names of the columns to return Returns: - A list of dictionaries, one per result row, each a mapping between the - column names from `retcols` and that column's value for the row. + A list of tuples, one per result row, each the retcolumn's value for the row. """ if keyvalues: sql = "SELECT %s FROM %s WHERE %s" % ( @@ -1863,7 +1864,7 @@ class DatabasePool: sql = "SELECT %s FROM %s" % (", ".join(retcols), table) txn.execute(sql) - return cls.cursor_to_dict(txn) + return txn.fetchall() async def simple_select_many_batch( self, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 84ef8136c2..d7482a1f4e 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -286,16 +286,20 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_account_data_for_room_txn( txn: LoggingTransaction, - ) -> Dict[str, JsonDict]: - rows = self.db_pool.simple_select_list_txn( - txn, - "room_account_data", - {"user_id": user_id, "room_id": room_id}, - ["account_data_type", "content"], + ) -> Dict[str, JsonMapping]: + rows = cast( + List[Tuple[str, str]], + self.db_pool.simple_select_list_txn( + txn, + table="room_account_data", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcols=["account_data_type", "content"], + ), ) return { - row["account_data_type"]: db_to_json(row["content"]) for row in rows + account_data_type: db_to_json(content) + for account_data_type, content in rows } return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 073a99cd84..fa7d1c469a 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -197,16 +197,21 @@ class ApplicationServiceTransactionWorkerStore( Returns: A list of ApplicationServices, which may be empty. """ - results = await self.db_pool.simple_select_list( - "application_services_state", {"state": state.value}, ["as_id"] + results = cast( + List[Tuple[str]], + await self.db_pool.simple_select_list( + table="application_services_state", + keyvalues={"state": state.value}, + retcols=("as_id",), + ), ) # NB: This assumes this class is linked with ApplicationServiceStore as_list = self.get_app_services() services = [] - for res in results: + for (as_id,) in results: for service in as_list: - if service.id == res["as_id"]: + if service.id == as_id: services.append(service) return services diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 8be1511859..c006129625 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -508,21 +508,24 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke if device_id is not None: keyvalues["device_id"] = device_id - res = await self.db_pool.simple_select_list( - table="devices", - keyvalues=keyvalues, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + res = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + await self.db_pool.simple_select_list( + table="devices", + keyvalues=keyvalues, + retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + ), ) return { - (d["user_id"], d["device_id"]): DeviceLastConnectionInfo( - user_id=d["user_id"], - device_id=d["device_id"], - ip=d["ip"], - user_agent=d["user_agent"], - last_seen=d["last_seen"], + (user_id, device_id): DeviceLastConnectionInfo( + user_id=user_id, + device_id=device_id, + ip=ip, + user_agent=user_agent, + last_seen=last_seen, ) - for d in res + for user_id, ip, user_agent, device_id, last_seen in res } async def _get_user_ip_and_agents_from_database( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fc23d18eba..0b75f6763a 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -283,7 +283,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): allow_none=True, ) - async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]: + async def get_devices_by_user( + self, user_id: str + ) -> Dict[str, Dict[str, Optional[str]]]: """Retrieve all of a user's registered devices. Only returns devices that are not marked as hidden. @@ -291,20 +293,26 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): user_id: Returns: A mapping from device_id to a dict containing "device_id", "user_id" - and "display_name" for each device. + and "display_name" for each device. Display name may be null. """ - devices = await self.db_pool.simple_select_list( - table="devices", - keyvalues={"user_id": user_id, "hidden": False}, - retcols=("user_id", "device_id", "display_name"), - desc="get_devices_by_user", + devices = cast( + List[Tuple[str, str, Optional[str]]], + await self.db_pool.simple_select_list( + table="devices", + keyvalues={"user_id": user_id, "hidden": False}, + retcols=("user_id", "device_id", "display_name"), + desc="get_devices_by_user", + ), ) - return {d["device_id"]: d for d in devices} + return { + d[1]: {"user_id": d[0], "device_id": d[1], "display_name": d[2]} + for d in devices + } async def get_devices_by_auth_provider_session_id( self, auth_provider_id: str, auth_provider_session_id: str - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, str]]: """Retrieve the list of devices associated with a SSO IdP session ID. Args: @@ -313,14 +321,17 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): Returns: A list of dicts containing the device_id and the user_id of each device """ - return await self.db_pool.simple_select_list( - table="device_auth_providers", - keyvalues={ - "auth_provider_id": auth_provider_id, - "auth_provider_session_id": auth_provider_session_id, - }, - retcols=("user_id", "device_id"), - desc="get_devices_by_auth_provider_session_id", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="device_auth_providers", + keyvalues={ + "auth_provider_id": auth_provider_id, + "auth_provider_session_id": auth_provider_session_id, + }, + retcols=("user_id", "device_id"), + desc="get_devices_by_auth_provider_session_id", + ), ) @trace @@ -821,15 +832,16 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): async def get_cached_devices_for_user( self, user_id: str ) -> Mapping[str, JsonMapping]: - devices = await self.db_pool.simple_select_list( - table="device_lists_remote_cache", - keyvalues={"user_id": user_id}, - retcols=("device_id", "content"), - desc="get_cached_devices_for_user", + devices = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="device_lists_remote_cache", + keyvalues={"user_id": user_id}, + retcols=("device_id", "content"), + desc="get_cached_devices_for_user", + ), ) - return { - device["device_id"]: db_to_json(device["content"]) for device in devices - } + return {device[0]: db_to_json(device[1]) for device in devices} def get_cached_device_list_changes( self, @@ -1080,7 +1092,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): The IDs of users whose device lists need resync. """ if user_ids: - row_tuples = cast( + rows = cast( List[Tuple[str]], await self.db_pool.simple_select_many_batch( table="device_lists_remote_resync", @@ -1090,11 +1102,9 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): desc="get_user_ids_requiring_device_list_resync_with_iterable", ), ) - - return {row[0] for row in row_tuples} else: rows = cast( - List[Dict[str, str]], + List[Tuple[str]], await self.db_pool.simple_select_list( table="device_lists_remote_resync", keyvalues=None, @@ -1103,7 +1113,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ), ) - return {row["user_id"] for row in rows} + return {row[0] for row in rows} async def mark_remote_users_device_caches_as_stale( self, user_ids: StrCollection diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index aac4cfb054..ad904a26a6 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple, cast from typing_extensions import Literal, TypedDict @@ -274,32 +274,41 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): if session_id: keyvalues["session_id"] = session_id - rows = await self.db_pool.simple_select_list( - table="e2e_room_keys", - keyvalues=keyvalues, - retcols=( - "user_id", - "room_id", - "session_id", - "first_message_index", - "forwarded_count", - "is_verified", - "session_data", + rows = cast( + List[Tuple[str, str, int, int, int, str]], + await self.db_pool.simple_select_list( + table="e2e_room_keys", + keyvalues=keyvalues, + retcols=( + "room_id", + "session_id", + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + desc="get_e2e_room_keys", ), - desc="get_e2e_room_keys", ) sessions: Dict[ Literal["rooms"], Dict[str, Dict[Literal["sessions"], Dict[str, RoomKey]]] ] = {"rooms": {}} - for row in rows: - room_entry = sessions["rooms"].setdefault(row["room_id"], {"sessions": {}}) - room_entry["sessions"][row["session_id"]] = { - "first_message_index": row["first_message_index"], - "forwarded_count": row["forwarded_count"], + for ( + room_id, + session_id, + first_message_index, + forwarded_count, + is_verified, + session_data, + ) in rows: + room_entry = sessions["rooms"].setdefault(room_id, {"sessions": {}}) + room_entry["sessions"][session_id] = { + "first_message_index": first_message_index, + "forwarded_count": forwarded_count, # is_verified must be returned to the client as a boolean - "is_verified": bool(row["is_verified"]), - "session_data": db_to_json(row["session_data"]), + "is_verified": bool(is_verified), + "session_data": db_to_json(session_data), } return sessions diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 4f80ce75cc..f1b0991503 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1898,21 +1898,23 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas # keeping only the forward extremities (i.e. the events not referenced # by other events in the queue). We do this so that we can always # backpaginate in all the events we have dropped. - rows = await self.db_pool.simple_select_list( - table="federation_inbound_events_staging", - keyvalues={"room_id": room_id}, - retcols=("event_id", "event_json"), - desc="prune_staged_events_in_room_fetch", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="federation_inbound_events_staging", + keyvalues={"room_id": room_id}, + retcols=("event_id", "event_json"), + desc="prune_staged_events_in_room_fetch", + ), ) # Find the set of events referenced by those in the queue, as well as # collecting all the event IDs in the queue. referenced_events: Set[str] = set() seen_events: Set[str] = set() - for row in rows: - event_id = row["event_id"] + for event_id, event_json in rows: seen_events.add(event_id) - event_d = db_to_json(row["event_json"]) + event_d = db_to_json(event_json) # We don't bother parsing the dicts into full blown event objects, # as that is needlessly expensive. diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py index 654f924019..60621edeef 100644 --- a/synapse/storage/databases/main/experimental_features.py +++ b/synapse/storage/databases/main/experimental_features.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, FrozenSet +from typing import TYPE_CHECKING, Dict, FrozenSet, List, Tuple, cast from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main import CacheInvalidationWorkerStore @@ -42,13 +42,16 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore): Returns: the features currently enabled for the user """ - enabled = await self.db_pool.simple_select_list( - "per_user_experimental_features", - {"user_id": user_id, "enabled": True}, - ["feature"], + enabled = cast( + List[Tuple[str]], + await self.db_pool.simple_select_list( + table="per_user_experimental_features", + keyvalues={"user_id": user_id, "enabled": True}, + retcols=("feature",), + ), ) - return frozenset(feature["feature"] for feature in enabled) + return frozenset(feature[0] for feature in enabled) async def set_features_for_user( self, diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index ea797864b9..ce88772f9e 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -248,17 +248,20 @@ class KeyStore(CacheInvalidationWorkerStore): If we have multiple entries for a given key ID, returns the most recent. """ - rows = await self.db_pool.simple_select_list( - table="server_keys_json", - keyvalues={"server_name": server_name}, - retcols=( - "key_id", - "from_server", - "ts_added_ms", - "ts_valid_until_ms", - "key_json", + rows = cast( + List[Tuple[str, str, int, int, Union[bytes, memoryview]]], + await self.db_pool.simple_select_list( + table="server_keys_json", + keyvalues={"server_name": server_name}, + retcols=( + "key_id", + "from_server", + "ts_added_ms", + "ts_valid_until_ms", + "key_json", + ), + desc="get_server_keys_json_for_remote", ), - desc="get_server_keys_json_for_remote", ) if not rows: @@ -266,14 +269,14 @@ class KeyStore(CacheInvalidationWorkerStore): # We sort the rows by ts_added_ms so that the most recently added entry # will stomp over older entries in the dictionary. - rows.sort(key=lambda r: r["ts_added_ms"]) + rows.sort(key=lambda r: r[2]) return { - row["key_id"]: FetchKeyResultForRemote( + key_id: FetchKeyResultForRemote( # Cast to bytes since postgresql returns a memoryview. - key_json=bytes(row["key_json"]), - valid_until_ts=row["ts_valid_until_ms"], - added_ts=row["ts_added_ms"], + key_json=bytes(key_json), + valid_until_ts=ts_valid_until_ms, + added_ts=ts_added_ms, ) - for row in rows + for key_id, from_server, ts_added_ms, ts_valid_until_ms, key_json in rows } diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 2e6b176bd2..f82140b2e8 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -437,25 +437,24 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): ) async def get_local_media_thumbnails(self, media_id: str) -> List[ThumbnailInfo]: - rows = await self.db_pool.simple_select_list( - "local_media_repository_thumbnails", - {"media_id": media_id}, - ( - "thumbnail_width", - "thumbnail_height", - "thumbnail_method", - "thumbnail_type", - "thumbnail_length", + rows = cast( + List[Tuple[int, int, str, str, int]], + await self.db_pool.simple_select_list( + "local_media_repository_thumbnails", + {"media_id": media_id}, + ( + "thumbnail_width", + "thumbnail_height", + "thumbnail_method", + "thumbnail_type", + "thumbnail_length", + ), + desc="get_local_media_thumbnails", ), - desc="get_local_media_thumbnails", ) return [ ThumbnailInfo( - width=row["thumbnail_width"], - height=row["thumbnail_height"], - method=row["thumbnail_method"], - type=row["thumbnail_type"], - length=row["thumbnail_length"], + width=row[0], height=row[1], method=row[2], type=row[3], length=row[4] ) for row in rows ] @@ -568,25 +567,24 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_thumbnails( self, origin: str, media_id: str ) -> List[ThumbnailInfo]: - rows = await self.db_pool.simple_select_list( - "remote_media_cache_thumbnails", - {"media_origin": origin, "media_id": media_id}, - ( - "thumbnail_width", - "thumbnail_height", - "thumbnail_method", - "thumbnail_type", - "thumbnail_length", + rows = cast( + List[Tuple[int, int, str, str, int]], + await self.db_pool.simple_select_list( + "remote_media_cache_thumbnails", + {"media_origin": origin, "media_id": media_id}, + ( + "thumbnail_width", + "thumbnail_height", + "thumbnail_method", + "thumbnail_type", + "thumbnail_length", + ), + desc="get_remote_media_thumbnails", ), - desc="get_remote_media_thumbnails", ) return [ ThumbnailInfo( - width=row["thumbnail_width"], - height=row["thumbnail_height"], - method=row["thumbnail_method"], - type=row["thumbnail_type"], - length=row["thumbnail_length"], + width=row[0], height=row[1], method=row[2], type=row[3], length=row[4] ) for row in rows ] diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index f5356e7f80..22025eca56 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -179,46 +179,44 @@ class PushRulesWorkerStore( @cached(max_entries=5000) async def get_push_rules_for_user(self, user_id: str) -> FilteredPushRules: - rows = await self.db_pool.simple_select_list( - table="push_rules", - keyvalues={"user_name": user_id}, - retcols=( - "user_name", - "rule_id", - "priority_class", - "priority", - "conditions", - "actions", + rows = cast( + List[Tuple[str, int, int, str, str]], + await self.db_pool.simple_select_list( + table="push_rules", + keyvalues={"user_name": user_id}, + retcols=( + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ), + desc="get_push_rules_for_user", ), - desc="get_push_rules_for_user", ) - rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) + # Sort by highest priority_class, then highest priority. + rows.sort(key=lambda row: (-int(row[1]), -int(row[2]))) enabled_map = await self.get_push_rules_enabled_for_user(user_id) return _load_rules( - [ - ( - row["rule_id"], - row["priority_class"], - row["conditions"], - row["actions"], - ) - for row in rows - ], + [(row[0], row[1], row[3], row[4]) for row in rows], enabled_map, self.hs.config.experimental, ) async def get_push_rules_enabled_for_user(self, user_id: str) -> Dict[str, bool]: - results = await self.db_pool.simple_select_list( - table="push_rules_enable", - keyvalues={"user_name": user_id}, - retcols=("rule_id", "enabled"), - desc="get_push_rules_enabled_for_user", + results = cast( + List[Tuple[str, Optional[Union[int, bool]]]], + await self.db_pool.simple_select_list( + table="push_rules_enable", + keyvalues={"user_name": user_id}, + retcols=("rule_id", "enabled"), + desc="get_push_rules_enabled_for_user", + ), ) - return {r["rule_id"]: bool(r["enabled"]) for r in results} + return {r[0]: bool(r[1]) for r in results} async def have_push_rules_changed_for_user( self, user_id: str, last_id: int diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c7eb7fc478..a6a1671bd6 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -371,18 +371,20 @@ class PusherWorkerStore(SQLBaseStore): async def get_throttle_params_by_room( self, pusher_id: int ) -> Dict[str, ThrottleParams]: - res = await self.db_pool.simple_select_list( - "pusher_throttle", - {"pusher": pusher_id}, - ["room_id", "last_sent_ts", "throttle_ms"], - desc="get_throttle_params_by_room", + res = cast( + List[Tuple[str, Optional[int], Optional[int]]], + await self.db_pool.simple_select_list( + "pusher_throttle", + {"pusher": pusher_id}, + ["room_id", "last_sent_ts", "throttle_ms"], + desc="get_throttle_params_by_room", + ), ) params_by_room = {} - for row in res: - params_by_room[row["room_id"]] = ThrottleParams( - row["last_sent_ts"], - row["throttle_ms"], + for room_id, last_sent_ts, throttle_ms in res: + params_by_room[room_id] = ThrottleParams( + last_sent_ts or 0, throttle_ms or 0 ) return params_by_room diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 9e8643ae4d..b0ef7be155 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -855,13 +855,15 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): Returns: Tuples of (auth_provider, external_id) """ - res = await self.db_pool.simple_select_list( - table="user_external_ids", - keyvalues={"user_id": mxid}, - retcols=("auth_provider", "external_id"), - desc="get_external_ids_by_user", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="user_external_ids", + keyvalues={"user_id": mxid}, + retcols=("auth_provider", "external_id"), + desc="get_external_ids_by_user", + ), ) - return [(r["auth_provider"], r["external_id"]) for r in res] async def count_all_users(self) -> int: """Counts all users registered on the homeserver.""" @@ -997,13 +999,24 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ) async def user_get_threepids(self, user_id: str) -> List[ThreepidResult]: - results = await self.db_pool.simple_select_list( - "user_threepids", - keyvalues={"user_id": user_id}, - retcols=["medium", "address", "validated_at", "added_at"], - desc="user_get_threepids", + results = cast( + List[Tuple[str, str, int, int]], + await self.db_pool.simple_select_list( + "user_threepids", + keyvalues={"user_id": user_id}, + retcols=["medium", "address", "validated_at", "added_at"], + desc="user_get_threepids", + ), ) - return [ThreepidResult(**r) for r in results] + return [ + ThreepidResult( + medium=r[0], + address=r[1], + validated_at=r[2], + added_at=r[3], + ) + for r in results + ] async def user_delete_threepid( self, user_id: str, medium: str, address: str @@ -1042,7 +1055,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): desc="add_user_bound_threepid", ) - async def user_get_bound_threepids(self, user_id: str) -> List[Dict[str, Any]]: + async def user_get_bound_threepids(self, user_id: str) -> List[Tuple[str, str]]: """Get the threepids that a user has bound to an identity server through the homeserver The homeserver remembers where binds to an identity server occurred. Using this method can retrieve those threepids. @@ -1051,15 +1064,18 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): user_id: The ID of the user to retrieve threepids for Returns: - List of dictionaries containing the following keys: - medium (str): The medium of the threepid (e.g "email") - address (str): The address of the threepid (e.g "bob@example.com") + List of tuples of two strings: + medium: The medium of the threepid (e.g "email") + address: The address of the threepid (e.g "bob@example.com") """ - return await self.db_pool.simple_select_list( - table="user_threepid_id_server", - keyvalues={"user_id": user_id}, - retcols=["medium", "address"], - desc="user_get_bound_threepids", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="user_threepid_id_server", + keyvalues={"user_id": user_id}, + retcols=["medium", "address"], + desc="user_get_bound_threepids", + ), ) async def remove_user_bound_threepid( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index ce7bfd5146..419b2c7a22 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -384,14 +384,17 @@ class RelationsWorkerStore(SQLBaseStore): def get_all_relation_ids_for_event_txn( txn: LoggingTransaction, ) -> List[str]: - rows = self.db_pool.simple_select_list_txn( - txn=txn, - table="event_relations", - keyvalues={"relates_to_id": event_id}, - retcols=["event_id"], + rows = cast( + List[Tuple[str]], + self.db_pool.simple_select_list_txn( + txn=txn, + table="event_relations", + keyvalues={"relates_to_id": event_id}, + retcols=["event_id"], + ), ) - return [row["event_id"] for row in rows] + return [row[0] for row in rows] return await self.db_pool.runInteraction( desc="get_all_relation_ids_for_event", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 9d24d2c347..3e8fcf1975 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1232,28 +1232,30 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): """ room_servers: Dict[str, PartialStateResyncInfo] = {} - rows = await self.db_pool.simple_select_list( - table="partial_state_rooms", - keyvalues={}, - retcols=("room_id", "joined_via"), - desc="get_server_which_served_partial_join", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="partial_state_rooms", + keyvalues={}, + retcols=("room_id", "joined_via"), + desc="get_server_which_served_partial_join", + ), ) - for row in rows: - room_id = row["room_id"] - joined_via = row["joined_via"] + for room_id, joined_via in rows: room_servers[room_id] = PartialStateResyncInfo(joined_via=joined_via) - rows = await self.db_pool.simple_select_list( - "partial_state_rooms_servers", - keyvalues=None, - retcols=("room_id", "server_name"), - desc="get_partial_state_rooms", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + "partial_state_rooms_servers", + keyvalues=None, + retcols=("room_id", "server_name"), + desc="get_partial_state_rooms", + ), ) - for row in rows: - room_id = row["room_id"] - server_name = row["server_name"] + for room_id, server_name in rows: entry = room_servers.get(room_id) if entry is None: # There is a foreign key constraint which enforces that every room_id in diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3a87eba430..a1627dffb7 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1070,13 +1070,16 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): for fully-joined rooms. """ - rows = await self.db_pool.simple_select_list( - "current_state_events", - keyvalues={"room_id": room_id}, - retcols=("event_id", "membership"), - desc="has_completed_background_updates", + rows = cast( + List[Tuple[str, Optional[str]]], + await self.db_pool.simple_select_list( + "current_state_events", + keyvalues={"room_id": room_id}, + retcols=("event_id", "membership"), + desc="has_completed_background_updates", + ), ) - return {row["event_id"]: row["membership"] for row in rows} + return dict(rows) # TODO This returns a mutable object, which is generally confusing when using a cache. @cached(max_entries=10000) # type: ignore[synapse-@cached-mutable] diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 61403a98cf..7deda7790e 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -45,14 +45,17 @@ class TagsWorkerStore(AccountDataWorkerStore): tag content. """ - rows = await self.db_pool.simple_select_list( - "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"] + rows = cast( + List[Tuple[str, str, str]], + await self.db_pool.simple_select_list( + "room_tags", {"user_id": user_id}, ["room_id", "tag", "content"] + ), ) tags_by_room: Dict[str, Dict[str, JsonDict]] = {} - for row in rows: - room_tags = tags_by_room.setdefault(row["room_id"], {}) - room_tags[row["tag"]] = db_to_json(row["content"]) + for room_id, tag, content in rows: + room_tags = tags_by_room.setdefault(room_id, {}) + room_tags[tag] = db_to_json(content) return tags_by_room async def get_all_updated_tags( @@ -161,13 +164,16 @@ class TagsWorkerStore(AccountDataWorkerStore): Returns: A mapping of tags to tag content. """ - rows = await self.db_pool.simple_select_list( - table="room_tags", - keyvalues={"user_id": user_id, "room_id": room_id}, - retcols=("tag", "content"), - desc="get_tags_for_room", + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="room_tags", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcols=("tag", "content"), + desc="get_tags_for_room", + ), ) - return {row["tag"]: db_to_json(row["content"]) for row in rows} + return {tag: db_to_json(content) for tag, content in rows} async def add_tag_to_room( self, user_id: str, room_id: str, tag: str, content: JsonDict diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 919c66f553..8ab7c42c4a 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -169,13 +169,17 @@ class UIAuthWorkerStore(SQLBaseStore): that auth-type. """ results = {} - for row in await self.db_pool.simple_select_list( - table="ui_auth_sessions_credentials", - keyvalues={"session_id": session_id}, - retcols=("stage_type", "result"), - desc="get_completed_ui_auth_stages", - ): - results[row["stage_type"]] = db_to_json(row["result"]) + rows = cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="ui_auth_sessions_credentials", + keyvalues={"session_id": session_id}, + retcols=("stage_type", "result"), + desc="get_completed_ui_auth_stages", + ), + ) + for stage_type, result in rows: + results[stage_type] = db_to_json(result) return results @@ -295,13 +299,15 @@ class UIAuthWorkerStore(SQLBaseStore): Returns: List of user_agent/ip pairs """ - rows = await self.db_pool.simple_select_list( - table="ui_auth_sessions_ips", - keyvalues={"session_id": session_id}, - retcols=("user_agent", "ip"), - desc="get_user_agents_ips_to_ui_auth_session", + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="ui_auth_sessions_ips", + keyvalues={"session_id": session_id}, + retcols=("user_agent", "ip"), + desc="get_user_agents_ips_to_ui_auth_session", + ), ) - return [(row["user_agent"], row["ip"]) for row in rows] async def delete_old_ui_auth_sessions(self, expiration_time: int) -> None: """ diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 09d2a8c5b3..182e429174 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -154,16 +154,22 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): if not prev_group: return _GetStateGroupDelta(None, None) - delta_ids = self.db_pool.simple_select_list_txn( - txn, - table="state_groups_state", - keyvalues={"state_group": state_group}, - retcols=("type", "state_key", "event_id"), + delta_ids = cast( + List[Tuple[str, str, str]], + self.db_pool.simple_select_list_txn( + txn, + table="state_groups_state", + keyvalues={"state_group": state_group}, + retcols=("type", "state_key", "event_id"), + ), ) return _GetStateGroupDelta( prev_group, - {(row["type"], row["state_key"]): row["event_id"] for row in delta_ids}, + { + (event_type, state_key): event_id + for event_type, state_key, event_id in delta_ids + }, ) return await self.db_pool.runInteraction( diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index d11ded6c5b..76c56d5434 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor @@ -68,10 +68,14 @@ class StatsRoomTests(unittest.HomeserverTestCase): ) ) - async def get_all_room_state(self) -> List[Dict[str, Any]]: - return await self.store.db_pool.simple_select_list( - "room_stats_state", None, retcols=("name", "topic", "canonical_alias") + async def get_all_room_state(self) -> List[Optional[str]]: + rows = cast( + List[Tuple[Optional[str]]], + await self.store.db_pool.simple_select_list( + "room_stats_state", None, retcols=("topic",) + ), ) + return [r[0] for r in rows] def _get_current_stats( self, stats_type: str, stat_id: str @@ -130,7 +134,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): r = self.get_success(self.get_all_room_state()) self.assertEqual(len(r), 1) - self.assertEqual(r[0]["topic"], "foo") + self.assertEqual(r[0], "foo") def test_create_user(self) -> None: """ diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index 71db47405e..98b01086bc 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -117,7 +117,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): if expected_row is not None: columns += expected_row.keys() - rows = self.get_success( + row_tuples = self.get_success( self.store.db_pool.simple_select_list( table=table, keyvalues={ @@ -134,22 +134,22 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): if expected_row is not None: self.assertEqual( - len(rows), + len(row_tuples), 1, f"Background update did not leave behind latest receipt in {table}", ) self.assertEqual( - rows[0], - { - "room_id": room_id, - "receipt_type": receipt_type, - "user_id": user_id, - **expected_row, - }, + row_tuples[0], + ( + room_id, + receipt_type, + user_id, + *expected_row.values(), + ), ) else: self.assertEqual( - len(rows), + len(row_tuples), 0, f"Background update did not remove all duplicate receipts from {table}", ) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8bbf936ae9..8cbc974ac4 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -14,7 +14,7 @@ # limitations under the License. import secrets -from typing import Generator, Tuple +from typing import Generator, List, Tuple, cast from twisted.test.proto_helpers import MemoryReactor @@ -47,15 +47,15 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase): ) def _dump_table_to_tuple(self) -> Generator[Tuple[int, str, str], None, None]: - res = self.get_success( - self.storage.db_pool.simple_select_list( - self.table_name, None, ["id, username, value"] - ) + yield from cast( + List[Tuple[int, str, str]], + self.get_success( + self.storage.db_pool.simple_select_list( + self.table_name, None, ["id, username, value"] + ) + ), ) - for i in res: - yield (i["id"], i["username"], i["value"]) - def test_upsert_many(self) -> None: """ Upsert_many will perform the upsert operation across a batch of data. diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index abf7d0564d..3f5bfa09d4 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import List, Tuple, cast from unittest.mock import AsyncMock, Mock import yaml @@ -526,15 +527,18 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): self.wait_for_background_updates() # Check the correct values are in the new table. - rows = self.get_success( - self.store.db_pool.simple_select_list( - table="test_constraint", - keyvalues={}, - retcols=("a", "b"), - ) + rows = cast( + List[Tuple[int, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ), ) - self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + self.assertCountEqual(rows, [(1, 1), (3, 3)]) # And check that invalid rows get correctly rejected. self.get_failure( @@ -640,14 +644,17 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): self.wait_for_background_updates() # Check the correct values are in the new table. - rows = self.get_success( - self.store.db_pool.simple_select_list( - table="test_constraint", - keyvalues={}, - retcols=("a", "b"), - ) + rows = cast( + List[Tuple[int, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="test_constraint", + keyvalues={}, + retcols=("a", "b"), + ) + ), ) - self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}]) + self.assertCountEqual(rows, [(1, 1), (3, 3)]) # And check that invalid rows get correctly rejected. self.get_failure( diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 256d28e4c9..e4a52c301e 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -146,7 +146,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_select_list(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 3 - self.mock_txn.__iter__ = Mock(return_value=iter([(1,), (2,), (3,)])) + self.mock_txn.fetchall.return_value = [(1,), (2,), (3,)] self.mock_txn.description = (("colA", None, None, None, None, None, None),) ret = yield defer.ensureDeferred( @@ -155,7 +155,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.assertEqual([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret) + self.assertEqual([(1,), (2,), (3,)], ret) self.mock_txn.execute.assert_called_with( "SELECT colA FROM tablename WHERE keycol = ?", ["A set"] ) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 0c054a598f..8e4393d843 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Dict, List, Optional, Tuple, cast from unittest.mock import AsyncMock from parameterized import parameterized @@ -97,26 +97,26 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(200) self.pump(0) - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual( - result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": None, - "last_seen": 12345678000, - } - ], + result, [("access_token", "ip", "user_agent", None, 12345678000)] ) # Add another & trigger the storage loop @@ -128,26 +128,26 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(10) self.pump(0) - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) # Only one result, has been upserted. self.assertEqual( - result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": None, - "last_seen": 12345878000, - } - ], + result, [("access_token", "ip", "user_agent", None, 12345878000)] ) @parameterized.expand([(False,), (True,)]) @@ -177,25 +177,23 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(10) else: # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="devices", - keyvalues={}, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + db_result = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + self.get_success( + self.store.db_pool.simple_select_list( + table="devices", + keyvalues={}, + retcols=( + "user_id", + "ip", + "user_agent", + "device_id", + "last_seen", + ), + ), ), ) - self.assertEqual( - db_result, - [ - { - "user_id": user_id, - "device_id": device_id, - "ip": None, - "user_agent": None, - "last_seen": None, - }, - ], - ) + self.assertEqual(db_result, [(user_id, None, None, device_id, None)]) result = self.get_success( self.store.get_last_client_ip_by_device(user_id, device_id) @@ -261,30 +259,21 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="devices", - keyvalues={}, - retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + db_result = cast( + List[Tuple[str, Optional[str], Optional[str], str, Optional[int]]], + self.get_success( + self.store.db_pool.simple_select_list( + table="devices", + keyvalues={}, + retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"), + ), ), ) self.assertCountEqual( db_result, [ - { - "user_id": user_id, - "device_id": device_id_1, - "ip": "ip_1", - "user_agent": "user_agent_1", - "last_seen": 12345678000, - }, - { - "user_id": user_id, - "device_id": device_id_2, - "ip": "ip_2", - "user_agent": "user_agent_2", - "last_seen": 12345678000, - }, + (user_id, "ip_1", "user_agent_1", device_id_1, 12345678000), + (user_id, "ip_2", "user_agent_2", device_id_2, 12345678000), ], ) @@ -385,28 +374,21 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): ) # Check that the new IP and user agent has not been stored yet - db_result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={}, - retcols=("access_token", "ip", "user_agent", "last_seen"), + db_result = cast( + List[Tuple[str, str, str, int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={}, + retcols=("access_token", "ip", "user_agent", "last_seen"), + ), ), ) self.assertEqual( db_result, [ - { - "access_token": "access_token", - "ip": "ip_1", - "user_agent": "user_agent_1", - "last_seen": 12345678000, - }, - { - "access_token": "access_token", - "ip": "ip_2", - "user_agent": "user_agent_2", - "last_seen": 12345678000, - }, + ("access_token", "ip_1", "user_agent_1", 12345678000), + ("access_token", "ip_2", "user_agent_2", 12345678000), ], ) @@ -600,39 +582,49 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(200) # We should see that in the DB - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual( result, - [ - { - "access_token": "access_token", - "ip": "ip", - "user_agent": "user_agent", - "device_id": device_id, - "last_seen": 0, - } - ], + [("access_token", "ip", "user_agent", device_id, 0)], ) # Now advance by a couple of months self.reactor.advance(60 * 24 * 60 * 60) # We should get no results. - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={"user_id": user_id}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={"user_id": user_id}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) self.assertEqual(result, []) @@ -696,28 +688,26 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(200) # We should see that in the DB - result = self.get_success( - self.store.db_pool.simple_select_list( - table="user_ips", - keyvalues={}, - retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"], - desc="get_user_ip_and_agents", - ) + result = cast( + List[Tuple[str, str, str, Optional[str], int]], + self.get_success( + self.store.db_pool.simple_select_list( + table="user_ips", + keyvalues={}, + retcols=[ + "access_token", + "ip", + "user_agent", + "device_id", + "last_seen", + ], + desc="get_user_ip_and_agents", + ) + ), ) # ensure user1 is filtered out - self.assertEqual( - result, - [ - { - "access_token": access_token2, - "ip": "ip", - "user_agent": "user_agent", - "device_id": device_id2, - "last_seen": 0, - } - ], - ) + self.assertEqual(result, [(access_token2, "ip", "user_agent", device_id2, 0)]) class ClientIpAuthTestCase(unittest.HomeserverTestCase): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index f4c4661aaf..36fcab06b5 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional, Tuple, cast + from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import Membership @@ -110,21 +112,24 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): def test__null_byte_in_display_name_properly_handled(self) -> None: room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - res = self.get_success( - self.store.db_pool.simple_select_list( - "room_memberships", - {"user_id": "@alice:test"}, - ["display_name", "event_id"], - ) + res = cast( + List[Tuple[Optional[str], str]], + self.get_success( + self.store.db_pool.simple_select_list( + "room_memberships", + {"user_id": "@alice:test"}, + ["display_name", "event_id"], + ) + ), ) # Check that we only got one result back self.assertEqual(len(res), 1) # Check that alice's display name is "alice" - self.assertEqual(res[0]["display_name"], "alice") + self.assertEqual(res[0][0], "alice") # Grab the event_id to use later - event_id = res[0]["event_id"] + event_id = res[0][1] # Create a profile with the offending null byte in the display name new_profile = {"displayname": "ali\u0000ce"} @@ -139,21 +144,24 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): tok=self.t_alice, ) - res2 = self.get_success( - self.store.db_pool.simple_select_list( - "room_memberships", - {"user_id": "@alice:test"}, - ["display_name", "event_id"], - ) + res2 = cast( + List[Tuple[Optional[str], str]], + self.get_success( + self.store.db_pool.simple_select_list( + "room_memberships", + {"user_id": "@alice:test"}, + ["display_name", "event_id"], + ) + ), ) # Check that we only have two results self.assertEqual(len(res2), 2) # Filter out the previous event using the event_id we grabbed above - row = [row for row in res2 if row["event_id"] != event_id] + row = [row for row in res2 if row[1] != event_id] # Check that alice's display name is now None - self.assertEqual(row[0]["display_name"], None) + self.assertIsNone(row[0][0]) def test_room_is_locally_forgotten(self) -> None: """Test that when the last local user has forgotten a room it is known as forgotten.""" diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 0b9446c36c..2715c73f16 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -13,6 +13,7 @@ # limitations under the License. import logging +from typing import List, Tuple, cast from immutabledict import immutabledict @@ -584,18 +585,21 @@ class StateStoreTestCase(HomeserverTestCase): ) # check that only state events are in state_groups, and all state events are in state_groups - res = self.get_success( - self.store.db_pool.simple_select_list( - table="state_groups", - keyvalues=None, - retcols=("event_id",), - ) + res = cast( + List[Tuple[str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups", + keyvalues=None, + retcols=("event_id",), + ) + ), ) events = [] for result in res: - self.assertNotIn(event3.event_id, result) - events.append(result.get("event_id")) + self.assertNotIn(event3.event_id, result) # XXX + events.append(result[0]) for event, _ in processed_events_and_context: if event.is_state(): @@ -606,23 +610,29 @@ class StateStoreTestCase(HomeserverTestCase): # has an entry and prev event in state_group_edges for event, context in processed_events_and_context: if event.is_state(): - state = self.get_success( - self.store.db_pool.simple_select_list( - table="state_groups_state", - keyvalues={"state_group": context.state_group_after_event}, - retcols=("type", "state_key"), - ) + state = cast( + List[Tuple[str, str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups_state", + keyvalues={"state_group": context.state_group_after_event}, + retcols=("type", "state_key"), + ) + ), ) - self.assertEqual(event.type, state[0].get("type")) - self.assertEqual(event.state_key, state[0].get("state_key")) + self.assertEqual(event.type, state[0][0]) + self.assertEqual(event.state_key, state[0][1]) - groups = self.get_success( - self.store.db_pool.simple_select_list( - table="state_group_edges", - keyvalues={"state_group": str(context.state_group_after_event)}, - retcols=("*",), - ) - ) - self.assertEqual( - context.state_group_before_event, groups[0].get("prev_state_group") + groups = cast( + List[Tuple[str]], + self.get_success( + self.store.db_pool.simple_select_list( + table="state_group_edges", + keyvalues={ + "state_group": str(context.state_group_after_event) + }, + retcols=("prev_state_group",), + ) + ), ) + self.assertEqual(context.state_group_before_event, groups[0][0]) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 8c72aa1722..822c41dd9f 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Any, Dict, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple, cast from unittest import mock from unittest.mock import Mock, patch @@ -62,14 +62,13 @@ class GetUserDirectoryTables: Returns a list of tuples (user_id, room_id) where room_id is public and contains the user with the given id. """ - r = await self.store.db_pool.simple_select_list( - "users_in_public_rooms", None, ("user_id", "room_id") + r = cast( + List[Tuple[str, str]], + await self.store.db_pool.simple_select_list( + "users_in_public_rooms", None, ("user_id", "room_id") + ), ) - - retval = set() - for i in r: - retval.add((i["user_id"], i["room_id"])) - return retval + return set(r) async def get_users_who_share_private_rooms(self) -> Set[Tuple[str, str, str]]: """Fetch the entire `users_who_share_private_rooms` table. @@ -78,27 +77,30 @@ class GetUserDirectoryTables: to the rows of `users_who_share_private_rooms`. """ - rows = await self.store.db_pool.simple_select_list( - "users_who_share_private_rooms", - None, - ["user_id", "other_user_id", "room_id"], + rows = cast( + List[Tuple[str, str, str]], + await self.store.db_pool.simple_select_list( + "users_who_share_private_rooms", + None, + ["user_id", "other_user_id", "room_id"], + ), ) - rv = set() - for row in rows: - rv.add((row["user_id"], row["other_user_id"], row["room_id"])) - return rv + return set(rows) async def get_users_in_user_directory(self) -> Set[str]: """Fetch the set of users in the `user_directory` table. This is useful when checking we've correctly excluded users from the directory. """ - result = await self.store.db_pool.simple_select_list( - "user_directory", - None, - ["user_id"], + result = cast( + List[Tuple[str]], + await self.store.db_pool.simple_select_list( + "user_directory", + None, + ["user_id"], + ), ) - return {row["user_id"] for row in result} + return {row[0] for row in result} async def get_profiles_in_user_directory(self) -> Dict[str, ProfileInfo]: """Fetch users and their profiles from the `user_directory` table. @@ -107,16 +109,17 @@ class GetUserDirectoryTables: It's almost the entire contents of the `user_directory` table: the only thing missing is an unused room_id column. """ - rows = await self.store.db_pool.simple_select_list( - "user_directory", - None, - ("user_id", "display_name", "avatar_url"), + rows = cast( + List[Tuple[str, Optional[str], Optional[str]]], + await self.store.db_pool.simple_select_list( + "user_directory", + None, + ("user_id", "display_name", "avatar_url"), + ), ) return { - row["user_id"]: ProfileInfo( - display_name=row["display_name"], avatar_url=row["avatar_url"] - ) - for row in rows + user_id: ProfileInfo(display_name=display_name, avatar_url=avatar_url) + for user_id, display_name, avatar_url in rows } async def get_tables( From 85e5f2dc252b866d67c8da2ddbfdb84974db1807 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Oct 2023 15:11:24 -0400 Subject: [PATCH 096/142] Add a new module API to update user presence state. (#16544) This adds a module API which allows a module to update a user's presence state/status message. This is useful for controlling presence from an external system. To fully control presence from the module the presence.enabled config parameter gains a new state of "untracked" which disables internal tracking of presence changes via user actions, etc. Only updates from the module will be persisted and sent down sync properly). --- changelog.d/16544.feature | 1 + .../configuration/config_documentation.md | 7 ++ synapse/config/server.py | 11 +- synapse/federation/federation_server.py | 2 +- synapse/federation/sender/__init__.py | 2 +- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/presence.py | 78 +++++++----- synapse/handlers/sync.py | 2 +- synapse/module_api/__init__.py | 33 ++++++ synapse/rest/client/presence.py | 6 +- tests/handlers/test_presence.py | 111 ++++++++++++++++-- tests/rest/client/test_presence.py | 19 ++- 12 files changed, 221 insertions(+), 53 deletions(-) create mode 100644 changelog.d/16544.feature diff --git a/changelog.d/16544.feature b/changelog.d/16544.feature new file mode 100644 index 0000000000..92bf701be6 --- /dev/null +++ b/changelog.d/16544.feature @@ -0,0 +1 @@ +Add a new module API for controller presence. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6cc83c1cd0..a1ca5fa98c 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -230,6 +230,13 @@ Example configuration: presence: enabled: false ``` + +`enabled` can also be set to a special value of "untracked" which ignores updates +received via clients and federation, while still accepting updates from the +[module API](../../modules/index.md). + +*The "untracked" option was added in Synapse 1.96.0.* + --- ### `require_auth_for_profile_requests` diff --git a/synapse/config/server.py b/synapse/config/server.py index 72d30da300..f9e18d2053 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -368,9 +368,14 @@ class ServerConfig(Config): # Whether to enable user presence. presence_config = config.get("presence") or {} - self.use_presence = presence_config.get("enabled") - if self.use_presence is None: - self.use_presence = config.get("use_presence", True) + presence_enabled = presence_config.get("enabled") + if presence_enabled is None: + presence_enabled = config.get("use_presence", True) + + # Whether presence is enabled *at all*. + self.presence_enabled = bool(presence_enabled) + # Whether to internally track presence, requires that presence is enabled, + self.track_presence = self.presence_enabled and presence_enabled != "untracked" # Custom presence router module # This is the legacy way of configuring it (the config should now be put in the modules section) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6ac8d16095..3b27925517 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1395,7 +1395,7 @@ class FederationHandlerRegistry: self._edu_type_to_instance[edu_type] = instance_names async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: - if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE: + if not self.config.server.track_presence and edu_type == EduTypes.PRESENCE: return # Check if we have a handler on this instance diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 7b6b1da090..7980d1a322 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -844,7 +844,7 @@ class FederationSender(AbstractFederationSender): destinations (list[str]) """ - if not states or not self.hs.config.server.use_presence: + if not states or not self.hs.config.server.track_presence: # No-op if presence is disabled. return diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index b1d8be866f..4727efcdba 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -439,7 +439,7 @@ class InitialSyncHandler: async def get_presence() -> List[JsonDict]: # If presence is disabled, return an empty list - if not self.hs.config.server.use_presence: + if not self.hs.config.server.presence_enabled: return [] states = await presence_handler.get_states( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index dfc0b9db07..202beee738 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -192,7 +192,8 @@ class BasePresenceHandler(abc.ABC): self.state = hs.get_state_handler() self.is_mine_id = hs.is_mine_id - self._presence_enabled = hs.config.server.use_presence + self._presence_enabled = hs.config.server.presence_enabled + self._track_presence = hs.config.server.track_presence self._federation = None if hs.should_send_federation(): @@ -512,7 +513,7 @@ class WorkerPresenceHandler(BasePresenceHandler): ) async def _on_shutdown(self) -> None: - if self._presence_enabled: + if self._track_presence: self.hs.get_replication_command_handler().send_command( ClearUserSyncsCommand(self.instance_id) ) @@ -524,7 +525,7 @@ class WorkerPresenceHandler(BasePresenceHandler): is_syncing: bool, last_sync_ms: int, ) -> None: - if self._presence_enabled: + if self._track_presence: self.hs.get_replication_command_handler().send_user_sync( self.instance_id, user_id, device_id, is_syncing, last_sync_ms ) @@ -571,7 +572,7 @@ class WorkerPresenceHandler(BasePresenceHandler): Called by the sync and events servlets to record that a user has connected to this worker and is waiting for some events. """ - if not affect_presence or not self._presence_enabled: + if not affect_presence or not self._track_presence: return _NullContextManager() # Note that this causes last_active_ts to be incremented which is not @@ -702,8 +703,8 @@ class WorkerPresenceHandler(BasePresenceHandler): user_id = target_user.to_string() - # If presence is disabled, no-op - if not self._presence_enabled: + # If tracking of presence is disabled, no-op + if not self._track_presence: return # Proxy request to instance that writes presence @@ -723,7 +724,7 @@ class WorkerPresenceHandler(BasePresenceHandler): with the app. """ # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return # Proxy request to instance that writes presence @@ -760,7 +761,7 @@ class PresenceHandler(BasePresenceHandler): ] = {} now = self.clock.time_msec() - if self._presence_enabled: + if self._track_presence: for state in self.user_to_current_state.values(): # Create a psuedo-device to properly handle time outs. This will # be overridden by any "real" devices within SYNC_ONLINE_TIMEOUT. @@ -831,7 +832,7 @@ class PresenceHandler(BasePresenceHandler): self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") - if self._presence_enabled: + if self._track_presence: # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. @@ -839,6 +840,9 @@ class PresenceHandler(BasePresenceHandler): 30, self.clock.looping_call, self._handle_timeouts, 5000 ) + # Presence information is persisted, whether or not it is being tracked + # internally. + if self._presence_enabled: self.clock.call_later( 60, self.clock.looping_call, @@ -854,7 +858,7 @@ class PresenceHandler(BasePresenceHandler): ) # Used to handle sending of presence to newly joined users/servers - if self._presence_enabled: + if self._track_presence: self.notifier.add_replication_callback(self.notify_new_event) # Presence is best effort and quickly heals itself, so lets just always @@ -905,7 +909,9 @@ class PresenceHandler(BasePresenceHandler): ) async def _update_states( - self, new_states: Iterable[UserPresenceState], force_notify: bool = False + self, + new_states: Iterable[UserPresenceState], + force_notify: bool = False, ) -> None: """Updates presence of users. Sets the appropriate timeouts. Pokes the notifier and federation if and only if the changed presence state @@ -943,7 +949,7 @@ class PresenceHandler(BasePresenceHandler): for new_state in new_states: user_id = new_state.user_id - # Its fine to not hit the database here, as the only thing not in + # It's fine to not hit the database here, as the only thing not in # the current state cache are OFFLINE states, where the only field # of interest is last_active which is safe enough to assume is 0 # here. @@ -957,6 +963,9 @@ class PresenceHandler(BasePresenceHandler): is_mine=self.is_mine_id(user_id), wheel_timer=self.wheel_timer, now=now, + # When overriding disabled presence, don't kick off all the + # wheel timers. + persist=not self._track_presence, ) if force_notify: @@ -1072,7 +1081,7 @@ class PresenceHandler(BasePresenceHandler): with the app. """ # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return user_id = user.to_string() @@ -1124,7 +1133,7 @@ class PresenceHandler(BasePresenceHandler): client that is being used by a user. presence_state: The presence state indicated in the sync request """ - if not affect_presence or not self._presence_enabled: + if not affect_presence or not self._track_presence: return _NullContextManager() curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0) @@ -1284,7 +1293,7 @@ class PresenceHandler(BasePresenceHandler): async def incoming_presence(self, origin: str, content: JsonDict) -> None: """Called when we receive a `m.presence` EDU from a remote server.""" - if not self._presence_enabled: + if not self._track_presence: return now = self.clock.time_msec() @@ -1359,7 +1368,7 @@ class PresenceHandler(BasePresenceHandler): raise SynapseError(400, "Invalid presence state") # If presence is disabled, no-op - if not self._presence_enabled: + if not self._track_presence: return user_id = target_user.to_string() @@ -2118,6 +2127,7 @@ def handle_update( is_mine: bool, wheel_timer: WheelTimer, now: int, + persist: bool, ) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. @@ -2129,6 +2139,8 @@ def handle_update( is_mine: Whether the user is ours wheel_timer now: Time now in ms + persist: True if this state should persist until another update occurs. + Skips insertion into wheel timers. Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: @@ -2146,14 +2158,15 @@ def handle_update( if is_mine: if new_state.state == PresenceState.ONLINE: # Idle timer - wheel_timer.insert( - now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER - ) + if not persist: + wheel_timer.insert( + now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER + ) active = now - new_state.last_active_ts < LAST_ACTIVE_GRANULARITY new_state = new_state.copy_and_replace(currently_active=active) - if active: + if active and not persist: wheel_timer.insert( now=now, obj=user_id, @@ -2162,11 +2175,12 @@ def handle_update( if new_state.state != PresenceState.OFFLINE: # User has stopped syncing - wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, - ) + if not persist: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, + ) last_federate = new_state.last_federation_update_ts if now - last_federate > FEDERATION_PING_INTERVAL: @@ -2174,7 +2188,7 @@ def handle_update( new_state = new_state.copy_and_replace(last_federation_update_ts=now) federation_ping = True - if new_state.state == PresenceState.BUSY: + if new_state.state == PresenceState.BUSY and not persist: wheel_timer.insert( now=now, obj=user_id, @@ -2182,11 +2196,13 @@ def handle_update( ) else: - wheel_timer.insert( - now=now, - obj=user_id, - then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT, - ) + # An update for a remote user was received. + if not persist: + wheel_timer.insert( + now=now, + obj=user_id, + then=new_state.last_federation_update_ts + FEDERATION_TIMEOUT, + ) # Check whether the change was something worth notifying about if should_notify(prev_state, new_state, is_mine): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f75c1548ca..2f1bc5a015 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1517,7 +1517,7 @@ class SyncHandler: # Presence data is included if the server has it enabled and not filtered out. include_presence_data = bool( - self.hs_config.server.use_presence + self.hs_config.server.presence_enabled and not sync_config.filter_collection.blocks_all_presence() ) # Device list updates are sent if a since token is provided. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 0786d20635..09ea6bdecb 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -23,6 +23,7 @@ from typing import ( Generator, Iterable, List, + Mapping, Optional, Tuple, TypeVar, @@ -39,6 +40,7 @@ from twisted.web.resource import Resource from synapse.api import errors from synapse.api.errors import SynapseError +from synapse.api.presence import UserPresenceState from synapse.config import ConfigError from synapse.events import EventBase from synapse.events.presence_router import ( @@ -1184,6 +1186,37 @@ class ModuleApi: presence_events, [destination] ) + async def set_presence_for_users( + self, users: Mapping[str, Tuple[str, Optional[str]]] + ) -> None: + """ + Update the internal presence state of users. + + This can be used for either local or remote users. + + Note that this method can only be run on the process that is configured to write to the + presence stream. By default, this is the main process. + + Added in Synapse v1.96.0. + """ + + # We pull out the presence handler here to break a cyclic + # dependency between the presence router and module API. + presence_handler = self._hs.get_presence_handler() + + from synapse.handlers.presence import PresenceHandler + + assert isinstance(presence_handler, PresenceHandler) + + states = await presence_handler.current_state_for_users(users.keys()) + for user_id, (state, status_msg) in users.items(): + prev_state = states.setdefault(user_id, UserPresenceState.default(user_id)) + states[user_id] = prev_state.copy_and_replace( + state=state, status_msg=status_msg + ) + + await presence_handler._update_states(states.values(), force_notify=True) + def looping_background_call( self, f: Callable, diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py index d578faa969..054a391f26 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py @@ -42,15 +42,13 @@ class PresenceStatusRestServlet(RestServlet): self.clock = hs.get_clock() self.auth = hs.get_auth() - self._use_presence = hs.config.server.use_presence - async def on_GET( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) - if not self._use_presence: + if not self.hs.config.server.presence_enabled: return 200, {"presence": "offline"} if requester.user != user: @@ -96,7 +94,7 @@ class PresenceStatusRestServlet(RestServlet): except Exception: raise SynapseError(400, "Unable to parse state") - if self._use_presence: + if self.hs.config.server.track_presence: await self.presence_handler.set_state(user, requester.device_id, state) return 200, {} diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 41c8c44e02..173b14521a 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import itertools from typing import Optional, cast from unittest.mock import Mock, call @@ -33,6 +33,7 @@ from synapse.handlers.presence import ( IDLE_TIMER, LAST_ACTIVE_GRANULARITY, SYNC_ONLINE_TIMEOUT, + PresenceHandler, handle_timeout, handle_update, ) @@ -66,7 +67,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -108,7 +114,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -153,7 +164,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): ) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -196,7 +212,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -231,7 +252,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): new_state = prev_state.copy_and_replace(state=PresenceState.ONLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=False, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=False, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertFalse(persist_and_notify) @@ -265,7 +291,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): new_state = prev_state.copy_and_replace(state=PresenceState.OFFLINE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -287,7 +318,12 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): new_state = prev_state.copy_and_replace(state=PresenceState.UNAVAILABLE) state, persist_and_notify, federation_ping = handle_update( - prev_state, new_state, is_mine=True, wheel_timer=wheel_timer, now=now + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, ) self.assertTrue(persist_and_notify) @@ -347,6 +383,41 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): # They should be identical. self.assertEqual(presence_states_compare, db_presence_states) + @parameterized.expand( + itertools.permutations( + ( + PresenceState.BUSY, + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + ), + 2, + ) + ) + def test_override(self, initial_state: str, final_state: str) -> None: + """Overridden statuses should not go into the wheel timer.""" + wheel_timer = Mock() + user_id = "@foo:bar" + now = 5000000 + + prev_state = UserPresenceState.default(user_id) + prev_state = prev_state.copy_and_replace( + state=initial_state, last_active_ts=now, currently_active=True + ) + + new_state = prev_state.copy_and_replace(state=final_state, last_active_ts=now) + + handle_update( + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=True, + ) + + wheel_timer.insert.assert_not_called() + class PresenceTimeoutTestCase(unittest.TestCase): """Tests different timers and that the timer does not change `status_msg` of user.""" @@ -738,7 +809,6 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.presence_handler = hs.get_presence_handler() - self.clock = hs.get_clock() def test_external_process_timeout(self) -> None: """Test that if an external process doesn't update the records for a while @@ -1471,6 +1541,29 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): self.assertEqual(new_state.state, state) self.assertEqual(new_state.status_msg, status_msg) + @unittest.override_config({"presence": {"enabled": "untracked"}}) + def test_untracked_does_not_idle(self) -> None: + """Untracked presence should not idle.""" + + # Mark user as online, this needs to reach into internals in order to + # bypass checks. + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + assert isinstance(self.presence_handler, PresenceHandler) + self.get_success( + self.presence_handler._update_states( + [state.copy_and_replace(state=PresenceState.ONLINE)] + ) + ) + + # Ensure the update took. + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.ONLINE) + + # The timeout should not fire and the state should be the same. + self.reactor.advance(SYNC_ONLINE_TIMEOUT) + state = self.get_success(self.presence_handler.get_state(self.user_id_obj)) + self.assertEqual(state.state, PresenceState.ONLINE) + class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 66b387cea3..4e89107e54 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -50,7 +50,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): PUT to the status endpoint with use_presence enabled will call set_state on the presence handler. """ - self.hs.config.server.use_presence = True + self.hs.config.server.presence_enabled = True body = {"presence": "here", "status_msg": "beep boop"} channel = self.make_request( @@ -63,7 +63,22 @@ class PresenceTestCase(unittest.HomeserverTestCase): @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self) -> None: """ - PUT to the status endpoint with use_presence disabled will NOT call + PUT to the status endpoint with presence disabled will NOT call + set_state on the presence handler. + """ + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(self.presence_handler.set_state.call_count, 0) + + @unittest.override_config({"presence": {"enabled": "untracked"}}) + def test_put_presence_untracked(self) -> None: + """ + PUT to the status endpoint with presence untracked will NOT call set_state on the presence handler. """ From 679c691f6f7c4f7901e6d075a645a8ade20f44d5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Oct 2023 15:12:28 -0400 Subject: [PATCH 097/142] Remove more usages of cursor_to_dict. (#16551) Mostly to improve type safety. --- changelog.d/16551.misc | 1 + synapse/handlers/identity.py | 18 +++---- synapse/handlers/ui_auth/checkers.py | 6 +-- synapse/media/media_repository.py | 5 +- synapse/rest/admin/federation.py | 14 ++++- synapse/rest/admin/rooms.py | 12 ++++- synapse/rest/admin/statistics.py | 13 ++++- synapse/storage/database.py | 30 ++--------- .../storage/databases/main/censor_events.py | 2 +- synapse/storage/databases/main/devices.py | 3 +- .../storage/databases/main/end_to_end_keys.py | 1 - .../databases/main/events_bg_updates.py | 7 +-- .../main/events_forward_extremities.py | 15 ++++-- .../databases/main/media_repository.py | 19 ++++--- .../storage/databases/main/registration.py | 43 ++++++++++----- synapse/storage/databases/main/roommember.py | 4 +- synapse/storage/databases/main/search.py | 52 +++++++++++-------- synapse/storage/databases/main/stats.py | 15 ++++-- synapse/storage/databases/main/stream.py | 3 +- .../storage/databases/main/transactions.py | 28 ++++++++-- .../storage/databases/main/user_directory.py | 14 +++-- synapse/storage/databases/state/bg_updates.py | 1 - tests/federation/test_federation_catch_up.py | 1 - tests/storage/test_background_update.py | 16 +++--- tests/storage/test_profile.py | 2 +- tests/storage/test_user_filters.py | 2 +- 26 files changed, 193 insertions(+), 134 deletions(-) create mode 100644 changelog.d/16551.misc diff --git a/changelog.d/16551.misc b/changelog.d/16551.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16551.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 472879c964..c041b67993 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -19,6 +19,8 @@ import logging import urllib.parse from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple +import attr + from synapse.api.errors import ( CodeMessageException, Codes, @@ -357,9 +359,9 @@ class IdentityHandler: # Check to see if a session already exists and that it is not yet # marked as validated - if session and session.get("validated_at") is None: - session_id = session["session_id"] - last_send_attempt = session["last_send_attempt"] + if session and session.validated_at is None: + session_id = session.session_id + last_send_attempt = session.last_send_attempt # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: @@ -480,7 +482,6 @@ class IdentityHandler: # We don't actually know which medium this 3PID is. Thus we first assume it's email, # and if validation fails we try msisdn - validation_session = None # Try to validate as email if self.hs.config.email.can_verify_email: @@ -488,19 +489,18 @@ class IdentityHandler: validation_session = await self.store.get_threepid_validation_session( "email", client_secret, sid=sid, validated=True ) - - if validation_session: - return validation_session + if validation_session: + return attr.asdict(validation_session) # Try to validate as msisdn if self.hs.config.registration.account_threepid_delegate_msisdn: # Ask our delegated msisdn identity server - validation_session = await self.threepid_from_creds( + return await self.threepid_from_creds( self.hs.config.registration.account_threepid_delegate_msisdn, threepid_creds, ) - return validation_session + return None async def proxy_msisdn_submit_token( self, id_server: str, client_secret: str, sid: str, token: str diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 78a75bfed6..ab8f7610e9 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -187,9 +187,9 @@ class _BaseThreepidAuthChecker: if row: threepid = { - "medium": row["medium"], - "address": row["address"], - "validated_at": row["validated_at"], + "medium": row.medium, + "address": row.address, + "validated_at": row.validated_at, } # Valid threepid returned, delete from the db diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 7fd46901f7..72b0f1c5de 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -949,10 +949,7 @@ class MediaRepository: deleted = 0 - for media in old_media: - origin = media["media_origin"] - media_id = media["media_id"] - file_id = media["filesystem_id"] + for origin, media_id, file_id in old_media: key = (origin, media_id) logger.info("Deleting: %r", key) diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 8a617af599..a6ce787da1 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -85,7 +85,19 @@ class ListDestinationsRestServlet(RestServlet): destinations, total = await self._store.get_destinations_paginate( start, limit, destination, order_by, direction ) - response = {"destinations": destinations, "total": total} + response = { + "destinations": [ + { + "destination": r[0], + "retry_last_ts": r[1], + "retry_interval": r[2], + "failure_ts": r[3], + "last_successful_stream_ordering": r[4], + } + for r in destinations + ], + "total": total, + } if (start + limit) < total: response["next_token"] = str(start + len(destinations)) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 436718c8b2..2d4da38db9 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -724,7 +724,17 @@ class ForwardExtremitiesRestServlet(ResolveRoomIdMixin, RestServlet): room_id, _ = await self.resolve_room_id(room_identifier) extremities = await self.store.get_forward_extremities_for_room(room_id) - return HTTPStatus.OK, {"count": len(extremities), "results": extremities} + result = [ + { + "event_id": ex[0], + "state_group": ex[1], + "depth": ex[2], + "received_ts": ex[3], + } + for ex in extremities + ] + + return HTTPStatus.OK, {"count": len(extremities), "results": result} class RoomEventContextServlet(RestServlet): diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 19780e4b4c..75d8a37ccf 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -108,7 +108,18 @@ class UserMediaStatisticsRestServlet(RestServlet): users_media, total = await self.store.get_users_media_usage_paginate( start, limit, from_ts, until_ts, order_by, direction, search_term ) - ret = {"users": users_media, "total": total} + ret = { + "users": [ + { + "user_id": r[0], + "displayname": r[1], + "media_count": r[2], + "media_length": r[3], + } + for r in users_media + ], + "total": total, + } if (start + limit) < total: ret["next_token"] = start + len(users_media) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 774d5c12f0..b1ece63845 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -35,7 +35,6 @@ from typing import ( Tuple, Type, TypeVar, - Union, cast, overload, ) @@ -1047,43 +1046,20 @@ class DatabasePool: results = [dict(zip(col_headers, row)) for row in cursor] return results - @overload - async def execute( - self, desc: str, decoder: Literal[None], query: str, *args: Any - ) -> List[Tuple[Any, ...]]: - ... - - @overload - async def execute( - self, desc: str, decoder: Callable[[Cursor], R], query: str, *args: Any - ) -> R: - ... - - async def execute( - self, - desc: str, - decoder: Optional[Callable[[Cursor], R]], - query: str, - *args: Any, - ) -> Union[List[Tuple[Any, ...]], R]: + async def execute(self, desc: str, query: str, *args: Any) -> List[Tuple[Any, ...]]: """Runs a single query for a result set. Args: desc: description of the transaction, for logging and metrics - decoder - The function which can resolve the cursor results to - something meaningful. query - The query string to execute *args - Query args. Returns: The result of decoder(results) """ - def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]: + def interaction(txn: LoggingTransaction) -> List[Tuple[Any, ...]]: txn.execute(query, args) - if decoder: - return decoder(txn) - else: - return txn.fetchall() + return txn.fetchall() return await self.runInteraction(desc, interaction) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 58177ecec1..711fdddd4e 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -93,7 +93,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase """ rows = await self.db_pool.execute( - "_censor_redactions_fetch", None, sql, before_ts, 100 + "_censor_redactions_fetch", sql, before_ts, 100 ) updates = [] diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 0b75f6763a..49edbb9e06 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -894,7 +894,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): rows = await self.db_pool.execute( "get_all_devices_changed", - None, sql, from_key, to_key, @@ -978,7 +977,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): WHERE from_user_id = ? AND stream_id > ? """ rows = await self.db_pool.execute( - "get_users_whose_signatures_changed", None, sql, user_id, from_key + "get_users_whose_signatures_changed", sql, user_id, from_key ) return {user for row in rows for user in db_to_json(row[0])} else: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index f13d776b0d..f70f95eeba 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -155,7 +155,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """ rows = await self.db_pool.execute( "get_e2e_device_keys_for_federation_query_check", - None, sql, now_stream_id, user_id, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index c5fce1c82b..0061805150 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1310,12 +1310,9 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): # ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the # indexes on it. - # We need to pass execute a dummy function to handle the txn's result otherwise - # it tries to call fetchall() on it and fails because there's no result to fetch. - await self.db_pool.execute( + await self.db_pool.runInteraction( "background_analyze_new_stream_ordering_column", - lambda txn: None, - "ANALYZE events(stream_ordering2)", + lambda txn: txn.execute("ANALYZE events(stream_ordering2)"), ) await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index f851bff604..0ba84b1469 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Any, Dict, List +from typing import List, Optional, Tuple, cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -91,12 +91,17 @@ class EventForwardExtremitiesStore( async def get_forward_extremities_for_room( self, room_id: str - ) -> List[Dict[str, Any]]: - """Get list of forward extremities for a room.""" + ) -> List[Tuple[str, int, int, Optional[int]]]: + """ + Get list of forward extremities for a room. + + Returns: + A list of tuples of event_id, state_group, depth, and received_ts. + """ def get_forward_extremities_for_room_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, int, int, Optional[int]]]: sql = """ SELECT event_id, state_group, depth, received_ts FROM event_forward_extremities @@ -106,7 +111,7 @@ class EventForwardExtremitiesStore( """ txn.execute(sql, (room_id,)) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, int, int, Optional[int]]], txn.fetchall()) return await self.db_pool.runInteraction( "get_forward_extremities_for_room", diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index f82140b2e8..aeb3db596c 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -650,7 +650,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): async def get_remote_media_ids( self, before_ts: int, include_quarantined_media: bool - ) -> List[Dict[str, str]]: + ) -> List[Tuple[str, str, str]]: """ Retrieve a list of server name, media ID tuples from the remote media cache. @@ -664,12 +664,14 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): A list of tuples containing: * The server name of homeserver where the media originates from, * The ID of the media. + * The filesystem ID. + """ + + sql = """ + SELECT media_origin, media_id, filesystem_id + FROM remote_media_cache + WHERE last_access_ts < ? """ - sql = ( - "SELECT media_origin, media_id, filesystem_id" - " FROM remote_media_cache" - " WHERE last_access_ts < ?" - ) if include_quarantined_media is False: # Only include media that has not been quarantined @@ -677,8 +679,9 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): AND quarantined_by IS NULL """ - return await self.db_pool.execute( - "get_remote_media_ids", self.db_pool.cursor_to_dict, sql, before_ts + return cast( + List[Tuple[str, str, str]], + await self.db_pool.execute("get_remote_media_ids", sql, before_ts), ) async def delete_remote_media(self, media_origin: str, media_id: str) -> None: diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index b0ef7be155..e09ab21593 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -151,6 +151,22 @@ class ThreepidResult: added_at: int +@attr.s(frozen=True, slots=True, auto_attribs=True) +class ThreepidValidationSession: + address: str + """address of the 3pid""" + medium: str + """medium of the 3pid""" + client_secret: str + """a secret provided by the client for this validation session""" + session_id: str + """ID of the validation session""" + last_send_attempt: int + """a number serving to dedupe send attempts for this session""" + validated_at: Optional[int] + """timestamp of when this session was validated if so""" + + class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__( self, @@ -1172,7 +1188,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): address: Optional[str] = None, sid: Optional[str] = None, validated: Optional[bool] = True, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[ThreepidValidationSession]: """Gets a session_id and last_send_attempt (if available) for a combination of validation metadata @@ -1187,15 +1203,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): perform no filtering Returns: - A dict containing the following: - * address - address of the 3pid - * medium - medium of the 3pid - * client_secret - a secret provided by the client for this validation session - * session_id - ID of the validation session - * send_attempt - a number serving to dedupe send attempts for this session - * validated_at - timestamp of when this session was validated if so - - Otherwise None if a validation session is not found + A ThreepidValidationSession or None if a validation session is not found """ if not client_secret: raise SynapseError( @@ -1214,7 +1222,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): def get_threepid_validation_session_txn( txn: LoggingTransaction, - ) -> Optional[Dict[str, Any]]: + ) -> Optional[ThreepidValidationSession]: sql = """ SELECT address, session_id, medium, client_secret, last_send_attempt, validated_at @@ -1229,11 +1237,18 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): sql += " LIMIT 1" txn.execute(sql, list(keyvalues.values())) - rows = self.db_pool.cursor_to_dict(txn) - if not rows: + row = txn.fetchone() + if not row: return None - return rows[0] + return ThreepidValidationSession( + address=row[0], + session_id=row[1], + medium=row[2], + client_secret=row[3], + last_send_attempt=row[4], + validated_at=row[5], + ) return await self.db_pool.runInteraction( "get_threepid_validation_session", get_threepid_validation_session_txn diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index a1627dffb7..67e149b586 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -940,7 +940,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): like_clause = "%:" + host rows = await self.db_pool.execute( - "is_host_joined", None, sql, membership, room_id, like_clause + "is_host_joined", sql, membership, room_id, like_clause ) if not rows: @@ -1168,7 +1168,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): AND forgotten = 0; """ - rows = await self.db_pool.execute("is_forgotten_room", None, sql, room_id) + rows = await self.db_pool.execute("is_forgotten_room", sql, room_id) # `count(*)` returns always an integer # If any rows still exist it means someone has not forgotten this room yet diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 1d69c4a5f0..dbde9130c6 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -26,6 +26,7 @@ from typing import ( Set, Tuple, Union, + cast, ) import attr @@ -506,16 +507,18 @@ class SearchStore(SearchBackgroundUpdateStore): # entire table from the database. sql += " ORDER BY rank DESC LIMIT 500" - results = await self.db_pool.execute( - "search_msgs", self.db_pool.cursor_to_dict, sql, *args + # List of tuples of (rank, room_id, event_id). + results = cast( + List[Tuple[Union[int, float], str, str]], + await self.db_pool.execute("search_msgs", sql, *args), ) - results = list(filter(lambda row: row["room_id"] in room_ids, results)) + results = list(filter(lambda row: row[1] in room_ids, results)) # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] - [r["event_id"] for r in results], + [r[2] for r in results], redact_behaviour=EventRedactBehaviour.block, ) @@ -527,16 +530,18 @@ class SearchStore(SearchBackgroundUpdateStore): count_sql += " GROUP BY room_id" - count_results = await self.db_pool.execute( - "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args + # List of tuples of (room_id, count). + count_results = cast( + List[Tuple[str, int]], + await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) - count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) + count = sum(row[1] for row in count_results if row[0] in room_ids) return { "results": [ - {"event": event_map[r["event_id"]], "rank": r["rank"]} + {"event": event_map[r[2]], "rank": r[0]} for r in results - if r["event_id"] in event_map + if r[2] in event_map ], "highlights": highlights, "count": count, @@ -604,7 +609,7 @@ class SearchStore(SearchBackgroundUpdateStore): search_query = search_term sql = """ SELECT ts_rank_cd(vector, websearch_to_tsquery('english', ?)) as rank, - origin_server_ts, stream_ordering, room_id, event_id + room_id, event_id, origin_server_ts, stream_ordering FROM event_search WHERE vector @@ websearch_to_tsquery('english', ?) AND """ @@ -665,16 +670,18 @@ class SearchStore(SearchBackgroundUpdateStore): # mypy expects to append only a `str`, not an `int` args.append(limit) - results = await self.db_pool.execute( - "search_rooms", self.db_pool.cursor_to_dict, sql, *args + # List of tuples of (rank, room_id, event_id, origin_server_ts, stream_ordering). + results = cast( + List[Tuple[Union[int, float], str, str, int, int]], + await self.db_pool.execute("search_rooms", sql, *args), ) - results = list(filter(lambda row: row["room_id"] in room_ids, results)) + results = list(filter(lambda row: row[1] in room_ids, results)) # We set redact_behaviour to block here to prevent redacted events being returned in # search results (which is a data leak) events = await self.get_events_as_list( # type: ignore[attr-defined] - [r["event_id"] for r in results], + [r[2] for r in results], redact_behaviour=EventRedactBehaviour.block, ) @@ -686,22 +693,23 @@ class SearchStore(SearchBackgroundUpdateStore): count_sql += " GROUP BY room_id" - count_results = await self.db_pool.execute( - "search_rooms_count", self.db_pool.cursor_to_dict, count_sql, *count_args + # List of tuples of (room_id, count). + count_results = cast( + List[Tuple[str, int]], + await self.db_pool.execute("search_rooms_count", count_sql, *count_args), ) - count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) + count = sum(row[1] for row in count_results if row[0] in room_ids) return { "results": [ { - "event": event_map[r["event_id"]], - "rank": r["rank"], - "pagination_token": "%s,%s" - % (r["origin_server_ts"], r["stream_ordering"]), + "event": event_map[r[2]], + "rank": r[0], + "pagination_token": "%s,%s" % (r[3], r[4]), } for r in results - if r["event_id"] in event_map + if r[2] in event_map ], "highlights": highlights, "count": count, diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 5b2d0ba870..e96c9b0486 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -679,7 +679,7 @@ class StatsStore(StateDeltasStore): order_by: Optional[str] = UserSortOrder.USER_ID.value, direction: Direction = Direction.FORWARDS, search_term: Optional[str] = None, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: """Function to retrieve a paginated list of users and their uploaded local media (size and number). This will return a json list of users and the total number of users matching the filter criteria. @@ -692,14 +692,19 @@ class StatsStore(StateDeltasStore): order_by: the sort order of the returned list direction: sort ascending or descending search_term: a string to filter user names by + Returns: - A list of user dicts and an integer representing the total number of - users that exist given this query + A tuple of: + A list of tuples of user information (the user ID, displayname, + total number of media, total length of media) and + + An integer representing the total number of users that exist + given this query """ def get_users_media_usage_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[Tuple[str, Optional[str], int, int]], int]: filters = [] args: list = [] @@ -773,7 +778,7 @@ class StatsStore(StateDeltasStore): args += [limit, start] txn.execute(sql, args) - users = self.db_pool.cursor_to_dict(txn) + users = cast(List[Tuple[str, Optional[str], int, int]], txn.fetchall()) return users, count diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 872df6bda1..2225f8272d 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1078,7 +1078,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ row = await self.db_pool.execute( - "get_current_topological_token", None, sql, room_id, room_id, stream_key + "get_current_topological_token", sql, room_id, room_id, stream_key ) return row[0][0] if row else 0 @@ -1636,7 +1636,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rows = await self.db_pool.execute( "get_timeline_gaps", - None, sql, room_id, from_token.stream if from_token else 0, diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index c4a6475060..fecddb4144 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -478,7 +478,10 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): destination: Optional[str] = None, order_by: str = DestinationSortOrder.DESTINATION.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[ + List[Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]]], + int, + ]: """Function to retrieve a paginated list of destinations. This will return a json list of destinations and the total number of destinations matching the filter criteria. @@ -490,13 +493,23 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): order_by: the sort order of the returned list direction: sort ascending or descending Returns: - A tuple of a list of mappings from destination to information + A tuple of a list of tuples of destination information: + * destination + * retry_last_ts + * retry_interval + * failure_ts + * last_successful_stream_ordering and a count of total destinations. """ def get_destinations_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[ + List[ + Tuple[str, Optional[int], Optional[int], Optional[int], Optional[int]] + ], + int, + ]: order_by_column = DestinationSortOrder(order_by).value if direction == Direction.BACKWARDS: @@ -523,7 +536,14 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): LIMIT ? OFFSET ? """ txn.execute(sql, args + [limit, start]) - destinations = self.db_pool.cursor_to_dict(txn) + destinations = cast( + List[ + Tuple[ + str, Optional[int], Optional[int], Optional[int], Optional[int] + ] + ], + txn.fetchall(), + ) return destinations, count return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 23eb92c514..a9f5d68b63 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -1145,15 +1145,19 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): raise Exception("Unrecognized database engine") results = cast( - List[UserProfile], - await self.db_pool.execute( - "search_user_dir", self.db_pool.cursor_to_dict, sql, *args - ), + List[Tuple[str, Optional[str], Optional[str]]], + await self.db_pool.execute("search_user_dir", sql, *args), ) limited = len(results) > limit - return {"limited": limited, "results": results[0:limit]} + return { + "limited": limited, + "results": [ + {"user_id": r[0], "display_name": r[1], "avatar_url": r[2]} + for r in results[0:limit] + ], + } def _filter_text_for_index(text: str) -> str: diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 6ff533a129..0f9c550b27 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -359,7 +359,6 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): if max_group is None: rows = await self.db_pool.execute( "_background_deduplicate_state", - None, "SELECT coalesce(max(id), 0) FROM state_groups", ) max_group = rows[0][0] diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 75ae740b43..08214b0013 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -100,7 +100,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): event_id, stream_ordering = self.get_success( self.hs.get_datastores().main.db_pool.execute( "test:get_destination_rooms", - None, """ SELECT event_id, stream_ordering FROM destination_rooms dr diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 3f5bfa09d4..67ea640902 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -457,8 +457,8 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): ); """ self.get_success( - self.store.db_pool.execute( - "test_not_null_constraint", lambda _: None, table_sql + self.store.db_pool.runInteraction( + "test_not_null_constraint", lambda txn: txn.execute(table_sql) ) ) @@ -466,8 +466,8 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): # using SQLite. index_sql = "CREATE INDEX test_index ON test_constraint(a)" self.get_success( - self.store.db_pool.execute( - "test_not_null_constraint", lambda _: None, index_sql + self.store.db_pool.runInteraction( + "test_not_null_constraint", lambda txn: txn.execute(index_sql) ) ) @@ -574,13 +574,13 @@ class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase): ); """ self.get_success( - self.store.db_pool.execute( - "test_foreign_key_constraint", lambda _: None, base_sql + self.store.db_pool.runInteraction( + "test_foreign_key_constraint", lambda txn: txn.execute(base_sql) ) ) self.get_success( - self.store.db_pool.execute( - "test_foreign_key_constraint", lambda _: None, table_sql + self.store.db_pool.runInteraction( + "test_foreign_key_constraint", lambda txn: txn.execute(table_sql) ) ) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index 95f99f4130..6afb5403bd 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -120,7 +120,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): res = self.get_success( self.store.db_pool.execute( - "", None, "SELECT full_user_id from profiles ORDER BY full_user_id" + "", "SELECT full_user_id from profiles ORDER BY full_user_id" ) ) self.assertEqual(len(res), len(expected_values)) diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py index d4637d9d1e..2da6a018e8 100644 --- a/tests/storage/test_user_filters.py +++ b/tests/storage/test_user_filters.py @@ -87,7 +87,7 @@ class UserFiltersStoreTestCase(unittest.HomeserverTestCase): res = self.get_success( self.store.db_pool.execute( - "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id" + "", "SELECT full_user_id from user_filters ORDER BY full_user_id" ) ) self.assertEqual(len(res), len(expected_values)) From c02406ac711095eedbeeff229af8e368f77196aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2023 10:04:08 +0100 Subject: [PATCH 098/142] Add new module API for adding custom fields to events `unsigned` section (#16549) --- changelog.d/16549.feature | 1 + docs/SUMMARY.md | 3 +- ..._extra_fields_to_client_events_unsigned.md | 32 ++++++++++ synapse/events/utils.py | 48 ++++++++++++--- synapse/handlers/events.py | 2 +- synapse/handlers/initial_sync.py | 14 ++--- synapse/handlers/message.py | 2 +- synapse/handlers/pagination.py | 4 +- synapse/handlers/relations.py | 8 ++- synapse/handlers/search.py | 8 +-- synapse/module_api/__init__.py | 21 +++++++ synapse/rest/admin/rooms.py | 10 ++-- synapse/rest/client/events.py | 2 +- synapse/rest/client/notifications.py | 2 +- synapse/rest/client/room.py | 10 ++-- synapse/rest/client/sync.py | 8 +-- synapse/server.py | 2 +- .../test_event_unsigned_addition.py | 59 +++++++++++++++++++ tests/rest/client/test_retention.py | 2 +- 19 files changed, 194 insertions(+), 44 deletions(-) create mode 100644 changelog.d/16549.feature create mode 100644 docs/modules/add_extra_fields_to_client_events_unsigned.md create mode 100644 tests/module_api/test_event_unsigned_addition.py diff --git a/changelog.d/16549.feature b/changelog.d/16549.feature new file mode 100644 index 0000000000..51129200f3 --- /dev/null +++ b/changelog.d/16549.feature @@ -0,0 +1 @@ +Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 31b3032029..c50121d5f7 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -19,7 +19,7 @@ # Usage - [Federation](federate.md) - [Configuration](usage/configuration/README.md) - - [Configuration Manual](usage/configuration/config_documentation.md) + - [Configuration Manual](usage/configuration/config_documentation.md) - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md) - [Logging Sample Config File](usage/configuration/logging_sample_config.md) - [Structured Logging](structured_logging.md) @@ -48,6 +48,7 @@ - [Password auth provider callbacks](modules/password_auth_provider_callbacks.md) - [Background update controller callbacks](modules/background_update_controller_callbacks.md) - [Account data callbacks](modules/account_data_callbacks.md) + - [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md) - [Porting a legacy module to the new interface](modules/porting_legacy_module.md) - [Workers](workers.md) - [Using `synctl` with Workers](synctl_workers.md) diff --git a/docs/modules/add_extra_fields_to_client_events_unsigned.md b/docs/modules/add_extra_fields_to_client_events_unsigned.md new file mode 100644 index 0000000000..c4fd19bde0 --- /dev/null +++ b/docs/modules/add_extra_fields_to_client_events_unsigned.md @@ -0,0 +1,32 @@ +# Add extra fields to client events unsigned section callbacks + +_First introduced in Synapse v1.96.0_ + +This callback allows modules to add extra fields to the unsigned section of +events when they get sent down to clients. + +These get called *every* time an event is to be sent to clients, so care should +be taken to ensure with respect to performance. + +### API + +To register the callback, use +`register_add_extra_fields_to_unsigned_client_event_callbacks` on the +`ModuleApi`. + +The callback should be of the form + +```python +async def add_field_to_unsigned( + event: EventBase, +) -> JsonDict: +``` + +where the extra fields to add to the event's unsigned section is returned. +(Modules must not attempt to modify the `event` directly). + +This cannot be used to alter the "core" fields in the unsigned section emitted +by Synapse itself. + +If multiple such callbacks try to add the same field to an event's unsigned +section, the last-registered callback wins. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 53af423a5a..ac2cf83d9f 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -17,6 +17,7 @@ import re from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Dict, Iterable, @@ -45,6 +46,7 @@ from . import EventBase if TYPE_CHECKING: from synapse.handlers.relations import BundledAggregations + from synapse.server import HomeServer # Split strings on "." but not "\." (or "\\\."). @@ -56,6 +58,13 @@ CANONICALJSON_MAX_INT = (2**53) - 1 CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT +# Module API callback that allows adding fields to the unsigned section of +# events that are sent to clients. +ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK = Callable[ + [EventBase], Awaitable[JsonDict] +] + + def prune_event(event: EventBase) -> EventBase: """Returns a pruned version of the given event, which removes all keys we don't know about or think could potentially be dodgy. @@ -509,7 +518,13 @@ class EventClientSerializer: clients. """ - def serialize_event( + def __init__(self, hs: "HomeServer") -> None: + self._store = hs.get_datastores().main + self._add_extra_fields_to_unsigned_client_event_callbacks: List[ + ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ] = [] + + async def serialize_event( self, event: Union[JsonDict, EventBase], time_now: int, @@ -535,10 +550,21 @@ class EventClientSerializer: serialized_event = serialize_event(event, time_now, config=config) + new_unsigned = {} + for callback in self._add_extra_fields_to_unsigned_client_event_callbacks: + u = await callback(event) + new_unsigned.update(u) + + if new_unsigned: + # We do the `update` this way round so that modules can't clobber + # existing fields. + new_unsigned.update(serialized_event["unsigned"]) + serialized_event["unsigned"] = new_unsigned + # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: if event.event_id in bundle_aggregations: - self._inject_bundled_aggregations( + await self._inject_bundled_aggregations( event, time_now, config, @@ -548,7 +574,7 @@ class EventClientSerializer: return serialized_event - def _inject_bundled_aggregations( + async def _inject_bundled_aggregations( self, event: EventBase, time_now: int, @@ -590,7 +616,7 @@ class EventClientSerializer: # said that we should only include the `event_id`, `origin_server_ts` and # `sender` of the edit; however MSC3925 proposes extending it to the whole # of the edit, which is what we do here. - serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( + serialized_aggregations[RelationTypes.REPLACE] = await self.serialize_event( event_aggregations.replace, time_now, config=config, @@ -600,7 +626,7 @@ class EventClientSerializer: if event_aggregations.thread: thread = event_aggregations.thread - serialized_latest_event = self.serialize_event( + serialized_latest_event = await self.serialize_event( thread.latest_event, time_now, config=config, @@ -623,7 +649,7 @@ class EventClientSerializer: "m.relations", {} ).update(serialized_aggregations) - def serialize_events( + async def serialize_events( self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, @@ -645,7 +671,7 @@ class EventClientSerializer: The list of serialized events """ return [ - self.serialize_event( + await self.serialize_event( event, time_now, config=config, @@ -654,6 +680,14 @@ class EventClientSerializer: for event in events ] + def register_add_extra_fields_to_unsigned_client_event_callback( + self, callback: ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ) -> None: + """Register a callback that returns additions to the unsigned section of + serialized events. + """ + self._add_extra_fields_to_unsigned_client_event_callbacks.append(callback) + _PowerLevel = Union[str, int] PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index d12803bf0f..756825061c 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -120,7 +120,7 @@ class EventStreamHandler: events.extend(to_add) - chunks = self._event_serializer.serialize_events( + chunks = await self._event_serializer.serialize_events( events, time_now, config=SerializeEventConfig( diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 4727efcdba..c4bec955fe 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -173,7 +173,7 @@ class InitialSyncHandler: d["inviter"] = event.sender invite_event = await self.store.get_event(event.event_id) - d["invite"] = self._event_serializer.serialize_event( + d["invite"] = await self._event_serializer.serialize_event( invite_event, time_now, config=serializer_options, @@ -225,7 +225,7 @@ class InitialSyncHandler: d["messages"] = { "chunk": ( - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now=time_now, config=serializer_options, @@ -235,7 +235,7 @@ class InitialSyncHandler: "end": await end_token.to_string(self.store), } - d["state"] = self._event_serializer.serialize_events( + d["state"] = await self._event_serializer.serialize_events( current_state.values(), time_now=time_now, config=serializer_options, @@ -387,7 +387,7 @@ class InitialSyncHandler: "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now, config=serialize_options ) ), @@ -396,7 +396,7 @@ class InitialSyncHandler: }, "state": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( room_state.values(), time_now, config=serialize_options ) ), @@ -420,7 +420,7 @@ class InitialSyncHandler: time_now = self.clock.time_msec() serialize_options = SerializeEventConfig(requester=requester) # Don't bundle aggregations as this is a deprecated API. - state = self._event_serializer.serialize_events( + state = await self._event_serializer.serialize_events( current_state.values(), time_now, config=serialize_options, @@ -497,7 +497,7 @@ class InitialSyncHandler: "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now, config=serialize_options ) ), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 41a35ce510..a0b4a93ae8 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -244,7 +244,7 @@ class MessageHandler: ) room_state = room_state_events[membership_event_id] - events = self._event_serializer.serialize_events( + events = await self._event_serializer.serialize_events( room_state.values(), self.clock.time_msec(), config=SerializeEventConfig(requester=requester), diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 878f267a4e..87e51bca48 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -657,7 +657,7 @@ class PaginationHandler: chunk = { "chunk": ( - self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( events, time_now, config=serialize_options, @@ -669,7 +669,7 @@ class PaginationHandler: } if state: - chunk["state"] = self._event_serializer.serialize_events( + chunk["state"] = await self._event_serializer.serialize_events( state, time_now, config=serialize_options ) diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 9b13448cdd..a15983afae 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -167,7 +167,7 @@ class RelationsHandler: now = self._clock.time_msec() serialize_options = SerializeEventConfig(requester=requester) return_value: JsonDict = { - "chunk": self._event_serializer.serialize_events( + "chunk": await self._event_serializer.serialize_events( events, now, bundle_aggregations=aggregations, @@ -177,7 +177,9 @@ class RelationsHandler: if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. - return_value["original_event"] = self._event_serializer.serialize_event( + return_value[ + "original_event" + ] = await self._event_serializer.serialize_event( event, now, bundle_aggregations=None, @@ -602,7 +604,7 @@ class RelationsHandler: ) now = self._clock.time_msec() - serialized_events = self._event_serializer.serialize_events( + serialized_events = await self._event_serializer.serialize_events( events, now, bundle_aggregations=aggregations ) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index aad4706f14..f51ed9d5bb 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -374,13 +374,13 @@ class SearchHandler: serialize_options = SerializeEventConfig(requester=requester) for context in contexts.values(): - context["events_before"] = self._event_serializer.serialize_events( + context["events_before"] = await self._event_serializer.serialize_events( context["events_before"], time_now, bundle_aggregations=aggregations, config=serialize_options, ) - context["events_after"] = self._event_serializer.serialize_events( + context["events_after"] = await self._event_serializer.serialize_events( context["events_after"], time_now, bundle_aggregations=aggregations, @@ -390,7 +390,7 @@ class SearchHandler: results = [ { "rank": search_result.rank_map[e.event_id], - "result": self._event_serializer.serialize_event( + "result": await self._event_serializer.serialize_event( e, time_now, bundle_aggregations=aggregations, @@ -409,7 +409,7 @@ class SearchHandler: if state_results: rooms_cat_res["state"] = { - room_id: self._event_serializer.serialize_events( + room_id: await self._event_serializer.serialize_events( state_events, time_now, config=serialize_options ) for room_id, state_events in state_results.items() diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 09ea6bdecb..755c59274c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -48,6 +48,7 @@ from synapse.events.presence_router import ( GET_USERS_FOR_STATES_CALLBACK, PresenceRouter, ) +from synapse.events.utils import ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, @@ -259,6 +260,7 @@ class ModuleApi: self.custom_template_dir = hs.config.server.custom_template_directory self._callbacks = hs.get_module_api_callbacks() self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled + self._event_serializer = hs.get_event_client_serializer() try: app_name = self._hs.config.email.email_app_name @@ -490,6 +492,25 @@ class ModuleApi: """ self._hs.register_module_web_resource(path, resource) + def register_add_extra_fields_to_unsigned_client_event_callbacks( + self, + *, + add_field_to_unsigned_callback: Optional[ + ADD_EXTRA_FIELDS_TO_UNSIGNED_CLIENT_EVENT_CALLBACK + ] = None, + ) -> None: + """Registers a callback that can be used to add fields to the unsigned + section of events. + + The callback is called every time an event is sent down to a client. + + Added in Synapse 1.96.0 + """ + if add_field_to_unsigned_callback is not None: + self._event_serializer.register_add_extra_fields_to_unsigned_client_event_callback( + add_field_to_unsigned_callback + ) + ######################################################################### # The following methods can be called by the module at any point in time. diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 2d4da38db9..0659f22a89 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -444,7 +444,7 @@ class RoomStateRestServlet(RestServlet): event_ids = await self._storage_controllers.state.get_current_state_ids(room_id) events = await self.store.get_events(event_ids.values()) now = self.clock.time_msec() - room_state = self._event_serializer.serialize_events(events.values(), now) + room_state = await self._event_serializer.serialize_events(events.values(), now) ret = {"state": room_state} return HTTPStatus.OK, ret @@ -789,22 +789,22 @@ class RoomEventContextServlet(RestServlet): time_now = self.clock.time_msec() results = { - "events_before": self._event_serializer.serialize_events( + "events_before": await self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, ), - "event": self._event_serializer.serialize_event( + "event": await self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, ), - "events_after": self._event_serializer.serialize_events( + "events_after": await self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, ), - "state": self._event_serializer.serialize_events( + "state": await self._event_serializer.serialize_events( event_context.state, time_now ), "start": event_context.start, diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 3eca4fe21f..5705f812a5 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -93,7 +93,7 @@ class EventRestServlet(RestServlet): event = await self.event_handler.get_event(requester.user, None, event_id) if event: - result = self._event_serializer.serialize_event( + result = await self._event_serializer.serialize_event( event, self.clock.time_msec(), config=SerializeEventConfig(requester=requester), diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index e7fe1332e7..5688d8593d 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -87,7 +87,7 @@ class NotificationsServlet(RestServlet): "actions": pa.actions, "ts": pa.received_ts, "event": ( - self._event_serializer.serialize_event( + await self._event_serializer.serialize_event( notif_events[pa.event_id], now, config=serialize_options, diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 553938ce9d..96f5726911 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -859,7 +859,7 @@ class RoomEventServlet(RestServlet): # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the # *original* event, rather than the edited version - event_dict = self._event_serializer.serialize_event( + event_dict = await self._event_serializer.serialize_event( event, self.clock.time_msec(), bundle_aggregations=aggregations, @@ -911,25 +911,25 @@ class RoomEventContextServlet(RestServlet): time_now = self.clock.time_msec() serializer_options = SerializeEventConfig(requester=requester) results = { - "events_before": self._event_serializer.serialize_events( + "events_before": await self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "event": self._event_serializer.serialize_event( + "event": await self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "events_after": self._event_serializer.serialize_events( + "events_after": await self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, config=serializer_options, ), - "state": self._event_serializer.serialize_events( + "state": await self._event_serializer.serialize_events( event_context.state, time_now, config=serializer_options, diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 42bdd3bb10..33fde6c6f8 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -384,7 +384,7 @@ class SyncRestServlet(RestServlet): """ invited = {} for room in rooms: - invite = self._event_serializer.serialize_event( + invite = await self._event_serializer.serialize_event( room.invite, time_now, config=serialize_options ) unsigned = dict(invite.get("unsigned", {})) @@ -415,7 +415,7 @@ class SyncRestServlet(RestServlet): """ knocked = {} for room in rooms: - knock = self._event_serializer.serialize_event( + knock = await self._event_serializer.serialize_event( room.knock, time_now, config=serialize_options ) @@ -506,10 +506,10 @@ class SyncRestServlet(RestServlet): event.room_id, ) - serialized_state = self._event_serializer.serialize_events( + serialized_state = await self._event_serializer.serialize_events( state_events, time_now, config=serialize_options ) - serialized_timeline = self._event_serializer.serialize_events( + serialized_timeline = await self._event_serializer.serialize_events( timeline_events, time_now, config=serialize_options, diff --git a/synapse/server.py b/synapse/server.py index 71ead524d6..5bfb4ba4eb 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -786,7 +786,7 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer(self) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/tests/module_api/test_event_unsigned_addition.py b/tests/module_api/test_event_unsigned_addition.py new file mode 100644 index 0000000000..b64426b1ac --- /dev/null +++ b/tests/module_api/test_event_unsigned_addition.py @@ -0,0 +1,59 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + +from synapse.events import EventBase +from synapse.rest import admin, login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class EventUnsignedAdditionTestCase(HomeserverTestCase): + servlets = [ + room.register_servlets, + admin.register_servlets, + login.register_servlets, + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self._store = homeserver.get_datastores().main + self._module_api = homeserver.get_module_api() + self._account_data_mgr = self._module_api.account_data_manager + + def test_annotate_event(self) -> None: + """Test that we can annotate an event when we request it from the + server. + """ + + async def add_unsigned_event(event: EventBase) -> JsonDict: + return {"test_key": event.event_id} + + self._module_api.register_add_extra_fields_to_unsigned_client_event_callbacks( + add_field_to_unsigned_callback=add_unsigned_event + ) + + user_id = self.register_user("user", "password") + token = self.login("user", "password") + + room_id = self.helper.create_room_as(user_id, tok=token) + result = self.helper.send(room_id, "Hello!", tok=token) + event_id = result["event_id"] + + event_json = self.helper.get_event(room_id, event_id, tok=token) + self.assertEqual(event_json["unsigned"].get("test_key"), event_id) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index d3e06bf6b3..534dc339f3 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -243,7 +243,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): assert event is not None time_now = self.clock.time_msec() - serialized = self.serializer.serialize_event(event, time_now) + serialized = self.get_success(self.serializer.serialize_event(event, time_now)) return serialized From 0680d76659c03cb190e0ae37af7d9db3014e3627 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2023 12:51:08 +0100 Subject: [PATCH 099/142] Reduce replication traffic due to reflected cache stream POSITION (#16557) --- changelog.d/16557.bugfix | 1 + synapse/replication/tcp/resource.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16557.bugfix diff --git a/changelog.d/16557.bugfix b/changelog.d/16557.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16557.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 1d9a29d22e..38abb5df54 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -27,7 +27,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream -from synapse.replication.tcp.streams._base import StreamRow, Token +from synapse.replication.tcp.streams._base import CachesStream, StreamRow, Token from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -204,6 +204,23 @@ class ReplicationStreamer: # The token has advanced but there is no data to # send, so we send a `POSITION` to inform other # workers of the updated position. + # + # There are two reasons for this: 1) this instance + # requested a stream ID but didn't use it, or 2) + # this instance advanced its own stream position due + # to receiving notifications about other instances + # advancing their stream position. + + # We skip sending `POSITION` for the `caches` stream + # for the second case as a) it generates a lot of + # traffic as every worker would echo each write, and + # b) nothing cares if a given worker's caches stream + # position lags. + if stream.NAME == CachesStream.NAME: + # If there haven't been any writes since the + # `last_token` then we're in the second case. + if stream.minimal_local_current_token() <= last_token: + continue # Note: `last_token` may not *actually* be the # last token we sent out in a RDATA or POSITION. From 928e9648578d24dc9f5ed3476d629fca9a64b22a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2023 12:52:40 +0100 Subject: [PATCH 100/142] Fix cross-worker ratelimiting (#16558) c.f. #16481 --- changelog.d/16558.bugfix | 1 + synapse/handlers/message.py | 73 +++++++++++++++++++++++++++++-------- 2 files changed, 58 insertions(+), 16 deletions(-) create mode 100644 changelog.d/16558.bugfix diff --git a/changelog.d/16558.bugfix b/changelog.d/16558.bugfix new file mode 100644 index 0000000000..64f419fd82 --- /dev/null +++ b/changelog.d/16558.bugfix @@ -0,0 +1 @@ +Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a0b4a93ae8..811a41f161 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -999,7 +999,26 @@ class EventCreationHandler: raise ShadowBanError() if ratelimit: - await self.request_ratelimiter.ratelimit(requester, update=False) + room_id = event_dict["room_id"] + try: + room_version = await self.store.get_room_version(room_id) + except NotFoundError: + # The room doesn't exist. + raise AuthError(403, f"User {requester.user} not in room {room_id}") + + if room_version.updated_redaction_rules: + redacts = event_dict["content"].get("redacts") + else: + redacts = event_dict.get("redacts") + + is_admin_redaction = await self.is_admin_redaction( + event_type=event_dict["type"], + sender=event_dict["sender"], + redacts=redacts, + ) + await self.request_ratelimiter.ratelimit( + requester, is_admin_redaction=is_admin_redaction, update=False + ) # We limit the number of concurrent event sends in a room so that we # don't fork the DAG too much. If we don't limit then we can end up in @@ -1508,6 +1527,18 @@ class EventCreationHandler: first_event.room_id ) if writer_instance != self._instance_name: + # Ratelimit before sending to the other event persister, to + # ensure that we correctly have ratelimits on both the event + # creators and event persisters. + if ratelimit: + for event, _ in events_and_context: + is_admin_redaction = await self.is_admin_redaction( + event.type, event.sender, event.redacts + ) + await self.request_ratelimiter.ratelimit( + requester, is_admin_redaction=is_admin_redaction + ) + try: result = await self.send_events( instance_name=writer_instance, @@ -1538,6 +1569,7 @@ class EventCreationHandler: # stream_ordering entry manually (as it was persisted on # another worker). event.internal_metadata.stream_ordering = stream_id + return event event = await self.persist_and_notify_client_events( @@ -1696,21 +1728,9 @@ class EventCreationHandler: # can apply different ratelimiting. We do this by simply checking # it's not a self-redaction (to avoid having to look up whether the # user is actually admin or not). - is_admin_redaction = False - if event.type == EventTypes.Redaction: - assert event.redacts is not None - - original_event = await self.store.get_event( - event.redacts, - redact_behaviour=EventRedactBehaviour.as_is, - get_prev_content=False, - allow_rejected=False, - allow_none=True, - ) - - is_admin_redaction = bool( - original_event and event.sender != original_event.sender - ) + is_admin_redaction = await self.is_admin_redaction( + event.type, event.sender, event.redacts + ) await self.request_ratelimiter.ratelimit( requester, is_admin_redaction=is_admin_redaction @@ -1930,6 +1950,27 @@ class EventCreationHandler: return persisted_events[-1] + async def is_admin_redaction( + self, event_type: str, sender: str, redacts: Optional[str] + ) -> bool: + """Return whether the event is a redaction made by an admin, and thus + should use a different ratelimiter. + """ + if event_type != EventTypes.Redaction: + return False + + assert redacts is not None + + original_event = await self.store.get_event( + redacts, + redact_behaviour=EventRedactBehaviour.as_is, + get_prev_content=False, + allow_rejected=False, + allow_none=True, + ) + + return bool(original_event and sender != original_event.sender) + async def _maybe_kick_guest_users( self, event: EventBase, context: EventContext ) -> None: From 89dbbd68e120a8d33c9f5c14d29fd56ecb7c6a93 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2023 14:27:20 +0100 Subject: [PATCH 101/142] Reduce spurious replication catchup (#16555) --- changelog.d/16555.misc | 1 + synapse/replication/tcp/handler.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16555.misc diff --git a/changelog.d/16555.misc b/changelog.d/16555.misc new file mode 100644 index 0000000000..d02efb2114 --- /dev/null +++ b/changelog.d/16555.misc @@ -0,0 +1 @@ +Reduce some spurious logging in worker mode. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 1d586fb180..afd03137f0 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -611,10 +611,14 @@ class ReplicationCommandHandler: # Find where we previously streamed up to. current_token = stream.current_token(cmd.instance_name) - # If the position token matches our current token then we're up to - # date and there's nothing to do. Otherwise, fetch all updates - # between then and now. - missing_updates = cmd.prev_token != current_token + # If the incoming previous position is less than our current position + # then we're up to date and there's nothing to do. Otherwise, fetch + # all updates between then and now. + # + # Note: We also have to check that `current_token` is at most the + # new position, to handle the case where the stream gets "reset" + # (e.g. for `caches` and `typing` after the writer's restart). + missing_updates = not (cmd.prev_token <= current_token <= cmd.new_token) while missing_updates: # Note: There may very well not be any new updates, but we check to # make sure. This can particularly happen for the event stream where @@ -644,7 +648,7 @@ class ReplicationCommandHandler: [stream.parse_row(row) for row in rows], ) - logger.info("Caught up with stream '%s' to %i", stream_name, cmd.new_token) + logger.info("Caught up with stream '%s' to %i", stream_name, current_token) # We've now caught up to position sent to us, notify handler. await self._replication_data_handler.on_position( From 5413cefe32094e414c46b7750467e5dcc6710422 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 27 Oct 2023 16:07:11 +0100 Subject: [PATCH 102/142] Reduce amount of caches POSITIONS we send (#16561) Follow on from / actually correctly does #16557 --- changelog.d/16561.bugfix | 1 + synapse/replication/tcp/streams/_base.py | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/16561.bugfix diff --git a/changelog.d/16561.bugfix b/changelog.d/16561.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16561.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 5c4d228f3d..58a44029aa 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -161,6 +161,14 @@ class Stream: and `limited` is whether there are more updates to fetch. """ current_token = self.current_token(self.local_instance_name) + + # If the minimum current token for the local instance is less than or + # equal to the last thing we published, we know that there are no + # updates. + if self.last_token >= self.minimal_local_current_token(): + self.last_token = current_token + return [], current_token, False + updates, current_token, limited = await self.get_updates_since( self.local_instance_name, self.last_token, current_token ) @@ -489,6 +497,8 @@ class CachesStream(Stream): return self.store.get_cache_stream_token_for_writer(instance_name) def minimal_local_current_token(self) -> Token: + if self.store._cache_id_gen: + return self.store._cache_id_gen.get_minimal_local_current_token() return self.current_token(self.local_instance_name) From 2bf934140625e91e1e27ffc9f717f6f2d277b2b9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 27 Oct 2023 12:50:50 -0400 Subject: [PATCH 103/142] Ensure local invited & knocking users leave before purge. (#16559) This is mostly useful for federated rooms where some users would get stuck in the invite or knock state when the room was purged from their homeserver. --- changelog.d/16559.bugfix | 1 + synapse/handlers/room.py | 7 +-- synapse/storage/databases/main/roommember.py | 16 ++++++ tests/rest/admin/test_room.py | 53 +++++++++++++++++++- 4 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16559.bugfix diff --git a/changelog.d/16559.bugfix b/changelog.d/16559.bugfix new file mode 100644 index 0000000000..e0fb16f807 --- /dev/null +++ b/changelog.d/16559.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where invited/knocking users would not leave during a room purge. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 97c9f01245..6d680b0795 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1939,9 +1939,10 @@ class RoomShutdownHandler: else: logger.info("Shutting down room %r", room_id) - users = await self.store.get_users_in_room(room_id) - for user_id in users: - if not self.hs.is_mine_id(user_id): + users = await self.store.get_local_users_related_to_room(room_id) + for user_id, membership in users: + # If the user is not in the room (or is banned), nothing to do. + if membership not in (Membership.JOIN, Membership.INVITE, Membership.KNOCK): continue logger.info("Kicking %r from %r...", user_id, room_id) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 67e149b586..1ed7f2d0ef 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -482,6 +482,22 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): desc="get_local_users_in_room", ) + async def get_local_users_related_to_room( + self, room_id: str + ) -> List[Tuple[str, str]]: + """ + Retrieves a list of the current roommembers who are local to the server and their membership status. + """ + return cast( + List[Tuple[str, str]], + await self.db_pool.simple_select_list( + table="local_current_membership", + keyvalues={"room_id": room_id}, + retcols=("user_id", "membership"), + desc="get_local_users_in_room", + ), + ) + async def check_local_user_in_room(self, user_id: str, room_id: str) -> bool: """ Check whether a given local user is currently joined to the given room. diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6ed451d7c4..206ca7f083 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -29,7 +29,7 @@ from synapse.handlers.pagination import ( PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, ) -from synapse.rest.client import directory, events, login, room +from synapse.rest.client import directory, events, knock, login, room, sync from synapse.server import HomeServer from synapse.types import UserID from synapse.util import Clock @@ -49,6 +49,8 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): login.register_servlets, events.register_servlets, room.register_servlets, + knock.register_servlets, + sync.register_servlets, room.register_deprecated_servlets, ] @@ -254,6 +256,55 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): self._is_blocked(self.room_id, expect=False) self._has_no_members(self.room_id) + def test_purge_room_unjoined(self) -> None: + """Test to purge a room when there are invited or knocked users.""" + # Test that room is not purged + with self.assertRaises(AssertionError): + self._is_purged(self.room_id) + + # Test that room is not blocked + self._is_blocked(self.room_id, expect=False) + + # Assert one user in room + self._is_member(room_id=self.room_id, user_id=self.other_user) + self.helper.send_state( + self.room_id, + EventTypes.JoinRules, + {"join_rule": "knock"}, + tok=self.other_user_tok, + ) + + # Invite a user. + invited_user = self.register_user("invited", "pass") + self.helper.invite( + self.room_id, self.other_user, invited_user, tok=self.other_user_tok + ) + + # Have a user knock. + knocked_user = self.register_user("knocked", "pass") + knocked_user_tok = self.login("knocked", "pass") + self.helper.knock(self.room_id, knocked_user, tok=knocked_user_tok) + + channel = self.make_request( + "DELETE", + self.url.encode("ascii"), + content={"block": False, "purge": True}, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(None, channel.json_body["new_room_id"]) + self.assertCountEqual( + [self.other_user, invited_user, knocked_user], + channel.json_body["kicked_users"], + ) + self.assertIn("failed_to_kick_users", channel.json_body) + self.assertIn("local_aliases", channel.json_body) + + self._is_purged(self.room_id) + self._is_blocked(self.room_id, expect=False) + self._has_no_members(self.room_id) + def test_block_room_and_not_purge(self) -> None: """Test to block a room without purging it. Members will not be moved to a new room and will not receive a message. From 11a8ae06325e9a8c9d57e3dff5fdf46a90309cd1 Mon Sep 17 00:00:00 2001 From: kegsay Date: Fri, 27 Oct 2023 18:29:20 +0100 Subject: [PATCH 104/142] complement: enable dirty runs (#16520) * complement: enable dirty runs * Add changelog * Set a low connpool limit when running in Complement Dirty runs can cause many containers to be running concurrently, which seems to easily exhaust resources on the host. The increased speedup from dirty runs also seems to use more db connections on workers, which are misconfigured currently to have `SUM(workers * cp_max) > max_connections`, causing ``` FATAL: sorry, too many clients already ``` which results in tests failing. * Try p=2 concurrency to restrict slowness of servers which causes partial state join tests to flake * Debug logging * Only run flakey tests * Only adjust connection pool limits in worker mode * Move cp vars to somewhere where they get executed in CI * Move cp values back to where they actually work * Debug logging * Try p=1 to see if this makes worker mode happier * Remove debug logging --- .github/workflows/tests.yml | 3 ++- changelog.d/16520.misc | 1 + docker/complement/conf/start_for_complement.sh | 5 +++++ docker/conf/homeserver.yaml | 4 ++-- scripts-dev/complement.sh | 6 +++++- 5 files changed, 15 insertions(+), 4 deletions(-) create mode 100644 changelog.d/16520.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 13746608d4..12420911b4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -638,9 +638,10 @@ jobs: - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + # use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once. - run: | set -o pipefail - COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt + COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -p 1 -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash env: POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }} diff --git a/changelog.d/16520.misc b/changelog.d/16520.misc new file mode 100644 index 0000000000..ea10fd4345 --- /dev/null +++ b/changelog.d/16520.misc @@ -0,0 +1 @@ +Enable dirty runs on Complement CI, which is significantly faster. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index 5560ab8b95..7b012ce8ab 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -68,6 +68,11 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then fi log "Workers requested: $SYNAPSE_WORKER_TYPES" + # adjust connection pool limits on worker mode as otherwise running lots of worker synapses + # can make docker unhappy (in GHA) + export POSTGRES_CP_MIN=1 + export POSTGRES_CP_MAX=3 + echo "using reduced connection pool limits for worker mode" # Improve startup times by using a launcher based on fork() export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1 else diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index c46b955d63..c412ba2e87 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -67,8 +67,8 @@ database: host: "{{ POSTGRES_HOST or "db" }}" port: "{{ POSTGRES_PORT or "5432" }}" {% endif %} - cp_min: 5 - cp_max: 10 + cp_min: {{ POSTGRES_CP_MIN or 5 }} + cp_max: {{ POSTGRES_CP_MAX or 10 }} {% else %} database: name: "sqlite3" diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3e0cddb527..b1a8724b7e 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -216,6 +216,10 @@ extra_test_args=() test_packages="./tests/csapi ./tests ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 ./tests/msc3930 ./tests/msc3902" +# Enable dirty runs, so tests will reuse the same container where possible. +# This significantly speeds up tests, but increases the possibility of test pollution. +export COMPLEMENT_ENABLE_DIRTY_RUNS=1 + # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ @@ -274,7 +278,7 @@ fi export PASS_SYNAPSE_LOG_TESTING=1 # Run the tests! -echo "Images built; running complement" +echo "Images built; running complement with ${extra_test_args[@]} $@ $test_packages" cd "$COMPLEMENT_DIR" go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" $test_packages From 747416e94cd8f137b9173c132f7c44ea1c59534d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 27 Oct 2023 20:14:02 +0100 Subject: [PATCH 105/142] Portdb: don't copy a table that gets rebuilt (#16563) --- changelog.d/16563.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16563.misc diff --git a/changelog.d/16563.misc b/changelog.d/16563.misc new file mode 100644 index 0000000000..e433659e8f --- /dev/null +++ b/changelog.d/16563.misc @@ -0,0 +1 @@ +Stop porting a table in port db that we're going to nuke and rebuild anyway. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index ab2b29cf1b..ef8590db65 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -191,7 +191,7 @@ IGNORED_TABLES = { "user_directory_search_stat", "user_directory_search_pos", "users_who_share_private_rooms", - "users_in_public_room", + "users_in_public_rooms", # UI auth sessions have foreign keys so additional care needs to be taken, # the sessions are transient anyway, so ignore them. "ui_auth_sessions", From bcaaeab410224cb65b90425ca980c6c06fe0b01d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:28:50 +0000 Subject: [PATCH 106/142] Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15 (#16573) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 67620f8efa..f63a6c66ab 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "alabaster" @@ -1765,6 +1765,8 @@ files = [ {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, + {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, + {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, @@ -3116,13 +3118,13 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.14" +version = "2.9.21.15" description = "Typing stubs for psycopg2" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "types-psycopg2-2.9.21.14.tar.gz", hash = "sha256:bf73a0ac4da4e278c89bf1b01fc596d5a5ac7a356cfe6ac0249f47b9e259f868"}, - {file = "types_psycopg2-2.9.21.14-py3-none-any.whl", hash = "sha256:cd9c5350631f3bc6184ec8d48f2ed31d4ea660f89d0fffe78239450782f383c5"}, + {file = "types-psycopg2-2.9.21.15.tar.gz", hash = "sha256:cf99b62ab32cd4ef412fc3c4da1c29ca5a130847dff06d709b84a523802406f0"}, + {file = "types_psycopg2-2.9.21.15-py3-none-any.whl", hash = "sha256:cc80479def02e4dd1ef21649d82f04426c73bc0693bcc0a8b5223c7c168472af"}, ] [[package]] From 13f64677854b3b4675f104e2f3f6afb95a010e29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:29:52 +0000 Subject: [PATCH 107/142] Bump black from 23.10.0 to 23.10.1 (#16575) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/poetry.lock b/poetry.lock index f63a6c66ab..05f08779fd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -162,29 +162,29 @@ lxml = ["lxml"] [[package]] name = "black" -version = "23.10.0" +version = "23.10.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" files = [ - {file = "black-23.10.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:f8dc7d50d94063cdfd13c82368afd8588bac4ce360e4224ac399e769d6704e98"}, - {file = "black-23.10.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:f20ff03f3fdd2fd4460b4f631663813e57dc277e37fb216463f3b907aa5a9bdd"}, - {file = "black-23.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3d9129ce05b0829730323bdcb00f928a448a124af5acf90aa94d9aba6969604"}, - {file = "black-23.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:960c21555be135c4b37b7018d63d6248bdae8514e5c55b71e994ad37407f45b8"}, - {file = "black-23.10.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:30b78ac9b54cf87bcb9910ee3d499d2bc893afd52495066c49d9ee6b21eee06e"}, - {file = "black-23.10.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:0e232f24a337fed7a82c1185ae46c56c4a6167fb0fe37411b43e876892c76699"}, - {file = "black-23.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31946ec6f9c54ed7ba431c38bc81d758970dd734b96b8e8c2b17a367d7908171"}, - {file = "black-23.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:c870bee76ad5f7a5ea7bd01dc646028d05568d33b0b09b7ecfc8ec0da3f3f39c"}, - {file = "black-23.10.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:6901631b937acbee93c75537e74f69463adaf34379a04eef32425b88aca88a23"}, - {file = "black-23.10.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:481167c60cd3e6b1cb8ef2aac0f76165843a374346aeeaa9d86765fe0dd0318b"}, - {file = "black-23.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f74892b4b836e5162aa0452393112a574dac85e13902c57dfbaaf388e4eda37c"}, - {file = "black-23.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:47c4510f70ec2e8f9135ba490811c071419c115e46f143e4dce2ac45afdcf4c9"}, - {file = "black-23.10.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:76baba9281e5e5b230c9b7f83a96daf67a95e919c2dfc240d9e6295eab7b9204"}, - {file = "black-23.10.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:a3c2ddb35f71976a4cfeca558848c2f2f89abc86b06e8dd89b5a65c1e6c0f22a"}, - {file = "black-23.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db451a3363b1e765c172c3fd86213a4ce63fb8524c938ebd82919bf2a6e28c6a"}, - {file = "black-23.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:7fb5fc36bb65160df21498d5a3dd330af8b6401be3f25af60c6ebfe23753f747"}, - {file = "black-23.10.0-py3-none-any.whl", hash = "sha256:e223b731a0e025f8ef427dd79d8cd69c167da807f5710add30cdf131f13dd62e"}, - {file = "black-23.10.0.tar.gz", hash = "sha256:31b9f87b277a68d0e99d2905edae08807c007973eaa609da5f0c62def6b7c0bd"}, + {file = "black-23.10.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:ec3f8e6234c4e46ff9e16d9ae96f4ef69fa328bb4ad08198c8cee45bb1f08c69"}, + {file = "black-23.10.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:1b917a2aa020ca600483a7b340c165970b26e9029067f019e3755b56e8dd5916"}, + {file = "black-23.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c74de4c77b849e6359c6f01987e94873c707098322b91490d24296f66d067dc"}, + {file = "black-23.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b4d10b0f016616a0d93d24a448100adf1699712fb7a4efd0e2c32bbb219b173"}, + {file = "black-23.10.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b15b75fc53a2fbcac8a87d3e20f69874d161beef13954747e053bca7a1ce53a0"}, + {file = "black-23.10.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:e293e4c2f4a992b980032bbd62df07c1bcff82d6964d6c9496f2cd726e246ace"}, + {file = "black-23.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d56124b7a61d092cb52cce34182a5280e160e6aff3137172a68c2c2c4b76bcb"}, + {file = "black-23.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f157a8945a7b2d424da3335f7ace89c14a3b0625e6593d21139c2d8214d55ce"}, + {file = "black-23.10.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:cfcce6f0a384d0da692119f2d72d79ed07c7159879d0bb1bb32d2e443382bf3a"}, + {file = "black-23.10.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:33d40f5b06be80c1bbce17b173cda17994fbad096ce60eb22054da021bf933d1"}, + {file = "black-23.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:840015166dbdfbc47992871325799fd2dc0dcf9395e401ada6d88fe11498abad"}, + {file = "black-23.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:037e9b4664cafda5f025a1728c50a9e9aedb99a759c89f760bd83730e76ba884"}, + {file = "black-23.10.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:7cb5936e686e782fddb1c73f8aa6f459e1ad38a6a7b0e54b403f1f05a1507ee9"}, + {file = "black-23.10.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:7670242e90dc129c539e9ca17665e39a146a761e681805c54fbd86015c7c84f7"}, + {file = "black-23.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed45ac9a613fb52dad3b61c8dea2ec9510bf3108d4db88422bacc7d1ba1243d"}, + {file = "black-23.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6d23d7822140e3fef190734216cefb262521789367fbdc0b3f22af6744058982"}, + {file = "black-23.10.1-py3-none-any.whl", hash = "sha256:d431e6739f727bb2e0495df64a6c7a5310758e87505f5f8cde9ff6c0f2d7e4fe"}, + {file = "black-23.10.1.tar.gz", hash = "sha256:1f8ce316753428ff68749c65a5f7844631aa18c8679dfd3ca9dc1a289979c258"}, ] [package.dependencies] From 425cb9c23cb4f46f69c64f8ecbf9439f844ab48a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:30:26 +0000 Subject: [PATCH 108/142] Bump phonenumbers from 8.13.22 to 8.13.23 (#16576) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 05f08779fd..c099aba779 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1624,13 +1624,13 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.22" +version = "8.13.23" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.22-py2.py3-none-any.whl", hash = "sha256:85ceeba9e67984ba98182c77e8e4c70093d38c0c6a0cb2bd392e0694ddaeb1f6"}, - {file = "phonenumbers-8.13.22.tar.gz", hash = "sha256:001664c90f59b8954766c2db85adafc8dbc96177efeb49607ca4e64a7acaf569"}, + {file = "phonenumbers-8.13.23-py2.py3-none-any.whl", hash = "sha256:34d6cb279dd4a64714e324c71350f96e5bda3237be28d11b4c555c44701544cd"}, + {file = "phonenumbers-8.13.23.tar.gz", hash = "sha256:869e44fcaaf276eca6b953a401e2b27d57461f3a18a66cf5f13377e7bb0e228c"}, ] [[package]] From 8f7cd4cd035bf165b311d1f44bdcbe5d065662cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:32:36 +0000 Subject: [PATCH 109/142] Bump serde from 1.0.189 to 1.0.190 (#16577) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5acf47cea8..3f7e66909b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -332,18 +332,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", From ba55835000789465f5de28879b523023dc4c6ff6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:44:36 +0000 Subject: [PATCH 110/142] Bump cryptography from 41.0.4 to 41.0.5 (#16572) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index c099aba779..b5bd4ce5b7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -467,34 +467,34 @@ files = [ [[package]] name = "cryptography" -version = "41.0.4" +version = "41.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, - {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, - {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, - {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, + {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797"}, + {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5"}, + {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147"}, + {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696"}, + {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da"}, + {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20"}, + {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548"}, + {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d"}, + {file = "cryptography-41.0.5-cp37-abi3-win32.whl", hash = "sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936"}, + {file = "cryptography-41.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81"}, + {file = "cryptography-41.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1"}, + {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72"}, + {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88"}, + {file = "cryptography-41.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf"}, + {file = "cryptography-41.0.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e"}, + {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8"}, + {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179"}, + {file = "cryptography-41.0.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d"}, + {file = "cryptography-41.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1"}, + {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86"}, + {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723"}, + {file = "cryptography-41.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84"}, + {file = "cryptography-41.0.5.tar.gz", hash = "sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7"}, ] [package.dependencies] From 8c63e932865c0d620c6d5b4be98b9ea3e9f240bd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Oct 2023 12:27:14 +0000 Subject: [PATCH 111/142] Fix HTTP repl response to use minimum token (#16578) --- changelog.d/16578.bugfix | 1 + synapse/replication/http/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16578.bugfix diff --git a/changelog.d/16578.bugfix b/changelog.d/16578.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16578.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 7476839db5..38701aea72 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -433,7 +433,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): if self.WAIT_FOR_STREAMS: response[_STREAM_POSITION_KEY] = { - stream.NAME: stream.current_token(self._instance_name) + stream.NAME: stream.minimal_local_current_token() for stream in self._streams } From 4e1a19d3752b5bda95e6791e98b3f9df4dfb33cd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 30 Oct 2023 13:07:08 +0000 Subject: [PATCH 112/142] Run actions/setup-go after checking out complement (#16567) --- .github/workflows/latest_deps.yml | 7 +++++-- .github/workflows/tests.yml | 7 +++++-- .github/workflows/twisted_trunk.yml | 7 +++++-- changelog.d/16567.misc | 1 + 4 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16567.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index c9ec70abe9..cb801afcbf 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -197,11 +197,14 @@ jobs: with: path: synapse - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + - run: | set -o pipefail TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 12420911b4..a1f714da23 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -633,11 +633,14 @@ jobs: uses: dtolnay/rust-toolchain@1.61.0 - uses: Swatinem/rust-cache@v2 - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + # use p=1 concurrency as GHA boxes are underpowered and don't like running tons of synapses at once. - run: | set -o pipefail diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 062f782e8b..1011a15390 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -168,11 +168,14 @@ jobs: with: path: synapse - - uses: actions/setup-go@v4 - - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh + - uses: actions/setup-go@v4 + with: + cache-dependency-path: complement/go.sum + go-version-file: complement/go.mod + # This step is specific to the 'Twisted trunk' test run: - name: Patch dependencies run: | diff --git a/changelog.d/16567.misc b/changelog.d/16567.misc new file mode 100644 index 0000000000..858fbac7f2 --- /dev/null +++ b/changelog.d/16567.misc @@ -0,0 +1 @@ +Deal with warnings from running complement in CI. From a3f6200d6565f3bfc72f05465dfbec41284b3afc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 13:40:17 +0000 Subject: [PATCH 113/142] Bump setuptools-rust from 1.7.0 to 1.8.0 (#16574) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: David Robertson --- changelog.d/16574.misc | 1 + poetry.lock | 9 ++++----- pyproject.toml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16574.misc diff --git a/changelog.d/16574.misc b/changelog.d/16574.misc new file mode 100644 index 0000000000..fae0f00fb3 --- /dev/null +++ b/changelog.d/16574.misc @@ -0,0 +1 @@ +Allow building with `setuptools_rust` 1.8.0. diff --git a/poetry.lock b/poetry.lock index b5bd4ce5b7..00f5b4a20a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2580,20 +2580,19 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.7.0" +version = "1.8.0" description = "Setuptools Rust extension plugin" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "setuptools-rust-1.7.0.tar.gz", hash = "sha256:c7100999948235a38ae7e555fe199aa66c253dc384b125f5d85473bf81eae3a3"}, - {file = "setuptools_rust-1.7.0-py3-none-any.whl", hash = "sha256:071099885949132a2180d16abf907b60837e74b4085047ba7e9c0f5b365310c1"}, + {file = "setuptools-rust-1.8.0.tar.gz", hash = "sha256:5e02b7a80058853bf64127314f6b97d0efed11e08b94c88ca639a20976f6adc4"}, + {file = "setuptools_rust-1.8.0-py3-none-any.whl", hash = "sha256:95ec67edee2ca73233c9e75250e9d23a302aa23b4c8413dfd19c14c30d08f703"}, ] [package.dependencies] semantic-version = ">=2.8.2,<3" setuptools = ">=62.4" tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.7.4.3" [[package]] name = "signedjson" diff --git a/pyproject.toml b/pyproject.toml index f3764b1a57..5b9f9fbde0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -381,7 +381,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.7.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.0"] build-backend = "poetry.core.masonry.api" From fdce83ee60b3b5ffd0c41d112873a4de52b1e640 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 30 Oct 2023 14:34:37 +0000 Subject: [PATCH 114/142] Claim fallback keys in bulk (#16570) --- changelog.d/16570.feature | 1 + synapse/handlers/e2e_keys.py | 14 ++++ synapse/storage/database.py | 10 +++ .../storage/databases/main/end_to_end_keys.py | 60 +++++++++++++++ tests/handlers/test_e2e_keys.py | 77 +++++++++++++++++++ 5 files changed, 162 insertions(+) create mode 100644 changelog.d/16570.feature diff --git a/changelog.d/16570.feature b/changelog.d/16570.feature new file mode 100644 index 0000000000..c807945fa8 --- /dev/null +++ b/changelog.d/16570.feature @@ -0,0 +1 @@ +Improve the performance of claiming encryption keys. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 8c6432035d..91c5fe007d 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -659,6 +659,20 @@ class E2eKeysHandler: timeout: Optional[int], always_include_fallback_keys: bool, ) -> JsonDict: + """ + Args: + query: A chain of maps from (user_id, device_id, algorithm) to the requested + number of keys to claim. + user: The user who is claiming these keys. + timeout: How long to wait for any federation key claim requests before + giving up. + always_include_fallback_keys: always include a fallback key for local users' + devices, even if we managed to claim a one-time-key. + + Returns: a heterogeneous dict with two keys: + one_time_keys: chain of maps user ID -> device ID -> key ID -> key. + failures: map from remote destination to a JsonDict describing the error. + """ local_query: List[Tuple[str, str, str, int]] = [] remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {} diff --git a/synapse/storage/database.py b/synapse/storage/database.py index b1ece63845..a4e7048368 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -420,6 +420,16 @@ class LoggingTransaction: self._do_execute(self.txn.execute, sql, parameters) def executemany(self, sql: str, *args: Any) -> None: + """Repeatedly execute the same piece of SQL with different parameters. + + See https://peps.python.org/pep-0249/#executemany. Note in particular that + + > Use of this method for an operation which produces one or more result sets + > constitutes undefined behavior + + so you can't use this for e.g. a SELECT, an UPDATE ... RETURNING, or a + DELETE FROM... RETURNING. + """ # TODO: we should add a type for *args here. Looking at Cursor.executemany # and DBAPI2 it ought to be Sequence[_Parameter], but we pass in # Iterable[Iterable[Any]] in execute_batch and execute_values above, which mypy diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index f70f95eeba..08385d312f 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -24,6 +24,7 @@ from typing import ( Mapping, Optional, Sequence, + Set, Tuple, Union, cast, @@ -1260,6 +1261,65 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker Returns: A map of user ID -> a map device ID -> a map of key ID -> JSON. """ + if isinstance(self.database_engine, PostgresEngine): + return await self.db_pool.runInteraction( + "_claim_e2e_fallback_keys_bulk", + self._claim_e2e_fallback_keys_bulk_txn, + query_list, + db_autocommit=True, + ) + # Use an UPDATE FROM... RETURNING combined with a VALUES block to do + # everything in one query. Note: this is also supported in SQLite 3.33.0, + # (see https://www.sqlite.org/lang_update.html#update_from), but we do not + # have an equivalent of psycopg2's execute_values to do this in one query. + else: + return await self._claim_e2e_fallback_keys_simple(query_list) + + def _claim_e2e_fallback_keys_bulk_txn( + self, + txn: LoggingTransaction, + query_list: Iterable[Tuple[str, str, str, bool]], + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Efficient implementation of claim_e2e_fallback_keys for Postgres. + + Safe to autocommit: this is a single query. + """ + results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + + sql = """ + WITH claims(user_id, device_id, algorithm, mark_as_used) AS ( + VALUES ? + ) + UPDATE e2e_fallback_keys_json k + SET used = used OR mark_as_used + FROM claims + WHERE (k.user_id, k.device_id, k.algorithm) = (claims.user_id, claims.device_id, claims.algorithm) + RETURNING k.user_id, k.device_id, k.algorithm, k.key_id, k.key_json; + """ + claimed_keys = cast( + List[Tuple[str, str, str, str, str]], + txn.execute_values(sql, query_list), + ) + + seen_user_device: Set[Tuple[str, str]] = set() + for user_id, device_id, algorithm, key_id, key_json in claimed_keys: + device_results = results.setdefault(user_id, {}).setdefault(device_id, {}) + device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) + + if (user_id, device_id) in seen_user_device: + continue + seen_user_device.add((user_id, device_id)) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) + + return results + + async def _claim_e2e_fallback_keys_simple( + self, + query_list: Iterable[Tuple[str, str, str, bool]], + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Naive, inefficient implementation of claim_e2e_fallback_keys for SQLite.""" results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} for user_id, device_id, algorithm, mark_as_used in query_list: row = await self.db_pool.simple_select_one( diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index c5556f2844..24e405f429 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -322,6 +322,83 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) + def test_fallback_key_bulk(self) -> None: + """Like test_fallback_key, but claims multiple keys in one handler call.""" + alice = f"@alice:{self.hs.hostname}" + brian = f"@brian:{self.hs.hostname}" + chris = f"@chris:{self.hs.hostname}" + + # Have three users upload fallback keys for two devices. + fallback_keys = { + alice: { + "alice_dev_1": {"alg1:k1": "fallback_key1"}, + "alice_dev_2": {"alg2:k2": "fallback_key2"}, + }, + brian: { + "brian_dev_1": {"alg1:k3": "fallback_key3"}, + "brian_dev_2": {"alg2:k4": "fallback_key4"}, + }, + chris: { + "chris_dev_1": {"alg1:k5": "fallback_key5"}, + "chris_dev_2": {"alg2:k6": "fallback_key6"}, + }, + } + + for user_id, devices in fallback_keys.items(): + for device_id, key_dict in devices.items(): + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + {"fallback_keys": key_dict}, + ) + ) + + # Each device should have an unused fallback key. + for user_id, devices in fallback_keys.items(): + for device_id in devices: + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + expected_algorithm_name = f"alg{device_id[-1]}" + self.assertEqual(fallback_res, [expected_algorithm_name]) + + # Claim the fallback key for one device per user. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + { + alice: {"alice_dev_1": {"alg1": 1}}, + brian: {"brian_dev_2": {"alg2": 1}}, + chris: {"chris_dev_2": {"alg2": 1}}, + }, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + expected_claims = { + alice: {"alice_dev_1": {"alg1:k1": "fallback_key1"}}, + brian: {"brian_dev_2": {"alg2:k4": "fallback_key4"}}, + chris: {"chris_dev_2": {"alg2:k6": "fallback_key6"}}, + } + self.assertEqual( + claim_res, + {"failures": {}, "one_time_keys": expected_claims}, + ) + + for user_id, devices in fallback_keys.items(): + for device_id in devices: + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + # Claimed fallback keys should no longer show up as unused. + # Unclaimed fallback keys should still be unused. + if device_id in expected_claims[user_id]: + self.assertEqual(fallback_res, []) + else: + expected_algorithm_name = f"alg{device_id[-1]}" + self.assertEqual(fallback_res, [expected_algorithm_name]) + def test_fallback_key_always_returned(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" From 408c13801a244a89d23f9c8e8ccce1b3d049abb6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 30 Oct 2023 14:47:57 +0000 Subject: [PATCH 115/142] Add fast path for replication events stream fetch (#16580) We can bail early if the from token is greater than or equal to the current token. --- changelog.d/16580.bugfix | 1 + synapse/replication/tcp/streams/events.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/16580.bugfix diff --git a/changelog.d/16580.bugfix b/changelog.d/16580.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16580.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 38823113d8..57138fea80 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -157,6 +157,12 @@ class EventsStream(_StreamFromIdGen): current_token: Token, target_row_count: int, ) -> StreamUpdateResult: + # The events stream cannot be "reset", so its safe to return early if + # the from token is larger than the current token (the DB query will + # trivially return 0 rows anyway). + if from_token >= current_token: + return [], current_token, False + # the events stream merges together three separate sources: # * new events # * current_state changes From 91aa52c911630ac2a8abccb2c68f77fa15ea5c79 Mon Sep 17 00:00:00 2001 From: Niranjan Kurhade Date: Mon, 30 Oct 2023 21:35:34 +0530 Subject: [PATCH 116/142] Clients link fixed in README (#16569) --- README.rst | 2 +- changelog.d/16569.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16569.doc diff --git a/README.rst b/README.rst index d116cd51fb..4a90429647 100644 --- a/README.rst +++ b/README.rst @@ -122,7 +122,7 @@ You will need to change the server you are logging into from ``matrix.org`` and instead specify a Homeserver URL of ``https://:8448`` (or just ``https://`` if you are using a reverse proxy). If you prefer to use another client, refer to our -`client breakdown `_. +`client breakdown `_. If all goes well you should at least be able to log in, create a room, and start sending messages. diff --git a/changelog.d/16569.doc b/changelog.d/16569.doc new file mode 100644 index 0000000000..7b2a439d30 --- /dev/null +++ b/changelog.d/16569.doc @@ -0,0 +1 @@ +Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. From de981ae56720b06c33203e9edd3df08376afb907 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 30 Oct 2023 21:25:21 +0000 Subject: [PATCH 117/142] Claim local one-time-keys in bulk (#16565) Co-authored-by: Patrick Cloke --- changelog.d/16565.feature | 1 + synapse/handlers/e2e_keys.py | 10 + .../storage/databases/main/end_to_end_keys.py | 253 ++++++++++-------- tests/handlers/test_e2e_keys.py | 158 +++++++++++ 4 files changed, 308 insertions(+), 114 deletions(-) create mode 100644 changelog.d/16565.feature diff --git a/changelog.d/16565.feature b/changelog.d/16565.feature new file mode 100644 index 0000000000..c807945fa8 --- /dev/null +++ b/changelog.d/16565.feature @@ -0,0 +1 @@ +Improve the performance of claiming encryption keys. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 91c5fe007d..d340d4aebe 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -753,6 +753,16 @@ class E2eKeysHandler: async def upload_keys_for_user( self, user_id: str, device_id: str, keys: JsonDict ) -> JsonDict: + """ + Args: + user_id: user whose keys are being uploaded. + device_id: device whose keys are being uploaded. + keys: the body of a /keys/upload request. + + Returns a dictionary with one field: + "one_time_keys": A mapping from algorithm to number of keys for that + algorithm, including those previously persisted. + """ # This can only be called from the main process. assert isinstance(self.device_handler, DeviceHandler) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 08385d312f..4f96ac25c7 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1111,7 +1111,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker ... async def claim_e2e_one_time_keys( - self, query_list: Iterable[Tuple[str, str, str, int]] + self, query_list: Collection[Tuple[str, str, str, int]] ) -> Tuple[ Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]] ]: @@ -1121,131 +1121,63 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker query_list: An iterable of tuples of (user ID, device ID, algorithm). Returns: - A tuple pf: + A tuple (results, missing) of: A map of user ID -> a map device ID -> a map of key ID -> JSON. - A copy of the input which has not been fulfilled. + A copy of the input which has not been fulfilled. The returned counts + may be less than the input counts. In this case, the returned counts + are the number of claims that were not fulfilled. """ - - @trace - def _claim_e2e_one_time_key_simple( - txn: LoggingTransaction, - user_id: str, - device_id: str, - algorithm: str, - count: int, - ) -> List[Tuple[str, str]]: - """Claim OTK for device for DBs that don't support RETURNING. - - Returns: - A tuple of key name (algorithm + key ID) and key JSON, if an - OTK was found. - """ - - sql = """ - SELECT key_id, key_json FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT ? - """ - - txn.execute(sql, (user_id, device_id, algorithm, count)) - otk_rows = list(txn) - if not otk_rows: - return [] - - self.db_pool.simple_delete_many_txn( - txn, - table="e2e_one_time_keys_json", - column="key_id", - values=[otk_row[0] for otk_row in otk_rows], - keyvalues={ - "user_id": user_id, - "device_id": device_id, - "algorithm": algorithm, - }, - ) - self._invalidate_cache_and_stream( - txn, self.count_e2e_one_time_keys, (user_id, device_id) - ) - - return [ - (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows - ] - - @trace - def _claim_e2e_one_time_key_returning( - txn: LoggingTransaction, - user_id: str, - device_id: str, - algorithm: str, - count: int, - ) -> List[Tuple[str, str]]: - """Claim OTK for device for DBs that support RETURNING. - - Returns: - A tuple of key name (algorithm + key ID) and key JSON, if an - OTK was found. - """ - - # We can use RETURNING to do the fetch and DELETE in once step. - sql = """ - DELETE FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - AND key_id IN ( - SELECT key_id FROM e2e_one_time_keys_json - WHERE user_id = ? AND device_id = ? AND algorithm = ? - LIMIT ? - ) - RETURNING key_id, key_json - """ - - txn.execute( - sql, - (user_id, device_id, algorithm, user_id, device_id, algorithm, count), - ) - otk_rows = list(txn) - if not otk_rows: - return [] - - self._invalidate_cache_and_stream( - txn, self.count_e2e_one_time_keys, (user_id, device_id) - ) - - return [ - (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows - ] - results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} missing: List[Tuple[str, str, str, int]] = [] - for user_id, device_id, algorithm, count in query_list: - if self.database_engine.supports_returning: - # If we support RETURNING clause we can use a single query that - # allows us to use autocommit mode. - _claim_e2e_one_time_key = _claim_e2e_one_time_key_returning - db_autocommit = True - else: - _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple - db_autocommit = False + if isinstance(self.database_engine, PostgresEngine): + # If we can use execute_values we can use a single batch query + # in autocommit mode. + unfulfilled_claim_counts: Dict[Tuple[str, str, str], int] = {} + for user_id, device_id, algorithm, count in query_list: + unfulfilled_claim_counts[user_id, device_id, algorithm] = count - claim_rows = await self.db_pool.runInteraction( + bulk_claims = await self.db_pool.runInteraction( "claim_e2e_one_time_keys", - _claim_e2e_one_time_key, - user_id, - device_id, - algorithm, - count, - db_autocommit=db_autocommit, + self._claim_e2e_one_time_keys_bulk, + query_list, + db_autocommit=True, ) - if claim_rows: + + for user_id, device_id, algorithm, key_id, key_json in bulk_claims: device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - for claim_row in claim_rows: - device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) + unfulfilled_claim_counts[(user_id, device_id, algorithm)] -= 1 + # Did we get enough OTKs? - count -= len(claim_rows) - if count: - missing.append((user_id, device_id, algorithm, count)) + missing = [ + (user, device, alg, count) + for (user, device, alg), count in unfulfilled_claim_counts.items() + if count > 0 + ] + else: + for user_id, device_id, algorithm, count in query_list: + claim_rows = await self.db_pool.runInteraction( + "claim_e2e_one_time_keys", + self._claim_e2e_one_time_key_simple, + user_id, + device_id, + algorithm, + count, + db_autocommit=False, + ) + if claim_rows: + device_results = results.setdefault(user_id, {}).setdefault( + device_id, {} + ) + for claim_row in claim_rows: + device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + # Did we get enough OTKs? + count -= len(claim_rows) + if count: + missing.append((user_id, device_id, algorithm, count)) return results, missing @@ -1362,6 +1294,99 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker return results + @trace + def _claim_e2e_one_time_key_simple( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + algorithm: str, + count: int, + ) -> List[Tuple[str, str]]: + """Claim OTK for device for DBs that don't support RETURNING. + + Returns: + A tuple of key name (algorithm + key ID) and key JSON, if an + OTK was found. + """ + + sql = """ + SELECT key_id, key_json FROM e2e_one_time_keys_json + WHERE user_id = ? AND device_id = ? AND algorithm = ? + LIMIT ? + """ + + txn.execute(sql, (user_id, device_id, algorithm, count)) + otk_rows = list(txn) + if not otk_rows: + return [] + + self.db_pool.simple_delete_many_txn( + txn, + table="e2e_one_time_keys_json", + column="key_id", + values=[otk_row[0] for otk_row in otk_rows], + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + }, + ) + self._invalidate_cache_and_stream( + txn, self.count_e2e_one_time_keys, (user_id, device_id) + ) + + return [(f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows] + + @trace + def _claim_e2e_one_time_keys_bulk( + self, + txn: LoggingTransaction, + query_list: Iterable[Tuple[str, str, str, int]], + ) -> List[Tuple[str, str, str, str, str]]: + """Bulk claim OTKs, for DBs that support DELETE FROM... RETURNING. + + Args: + query_list: Collection of tuples (user_id, device_id, algorithm, count) + as passed to claim_e2e_one_time_keys. + + Returns: + A list of tuples (user_id, device_id, algorithm, key_id, key_json) + for each OTK claimed. + """ + sql = """ + WITH claims(user_id, device_id, algorithm, claim_count) AS ( + VALUES ? + ), ranked_keys AS ( + SELECT + user_id, device_id, algorithm, key_id, claim_count, + ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r + FROM e2e_one_time_keys_json + JOIN claims USING (user_id, device_id, algorithm) + ) + DELETE FROM e2e_one_time_keys_json k + WHERE (user_id, device_id, algorithm, key_id) IN ( + SELECT user_id, device_id, algorithm, key_id + FROM ranked_keys + WHERE r <= claim_count + ) + RETURNING user_id, device_id, algorithm, key_id, key_json; + """ + otk_rows = cast( + List[Tuple[str, str, str, str, str]], txn.execute_values(sql, query_list) + ) + + seen_user_device: Set[Tuple[str, str]] = set() + for user_id, device_id, _, _, _ in otk_rows: + if (user_id, device_id) in seen_user_device: + continue + seen_user_device.add((user_id, device_id)) + self._invalidate_cache_and_stream( + txn, self.count_e2e_one_time_keys, (user_id, device_id) + ) + + return otk_rows + class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def __init__( diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 24e405f429..90b4da9ad5 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -174,6 +174,164 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): }, ) + def test_claim_one_time_key_bulk(self) -> None: + """Like test_claim_one_time_key but claims multiple keys in one handler call.""" + # Apologies to the reader. This test is a little too verbose. It is particularly + # tricky to make assertions neatly with all these nested dictionaries in play. + + # Three users with two devices each. Each device uses two algorithms. + # Each algorithm is invoked with two keys. + alice = f"@alice:{self.hs.hostname}" + brian = f"@brian:{self.hs.hostname}" + chris = f"@chris:{self.hs.hostname}" + one_time_keys = { + alice: { + "alice_dev_1": { + "alg1:k1": {"dummy_id": 1}, + "alg1:k2": {"dummy_id": 2}, + "alg2:k3": {"dummy_id": 3}, + "alg2:k4": {"dummy_id": 4}, + }, + "alice_dev_2": { + "alg1:k5": {"dummy_id": 5}, + "alg1:k6": {"dummy_id": 6}, + "alg2:k7": {"dummy_id": 7}, + "alg2:k8": {"dummy_id": 8}, + }, + }, + brian: { + "brian_dev_1": { + "alg1:k9": {"dummy_id": 9}, + "alg1:k10": {"dummy_id": 10}, + "alg2:k11": {"dummy_id": 11}, + "alg2:k12": {"dummy_id": 12}, + }, + "brian_dev_2": { + "alg1:k13": {"dummy_id": 13}, + "alg1:k14": {"dummy_id": 14}, + "alg2:k15": {"dummy_id": 15}, + "alg2:k16": {"dummy_id": 16}, + }, + }, + chris: { + "chris_dev_1": { + "alg1:k17": {"dummy_id": 17}, + "alg1:k18": {"dummy_id": 18}, + "alg2:k19": {"dummy_id": 19}, + "alg2:k20": {"dummy_id": 20}, + }, + "chris_dev_2": { + "alg1:k21": {"dummy_id": 21}, + "alg1:k22": {"dummy_id": 22}, + "alg2:k23": {"dummy_id": 23}, + "alg2:k24": {"dummy_id": 24}, + }, + }, + } + for user_id, devices in one_time_keys.items(): + for device_id, keys_dict in devices.items(): + counts = self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + {"one_time_keys": keys_dict}, + ) + ) + # The upload should report 2 keys per algorithm. + expected_counts = { + "one_time_key_counts": { + # See count_e2e_one_time_keys for why this is hardcoded. + "signed_curve25519": 0, + "alg1": 2, + "alg2": 2, + }, + } + self.assertEqual(counts, expected_counts) + + # Claim a variety of keys. + # Raw format, easier to make test assertions about. + claims_to_make = { + (alice, "alice_dev_1", "alg1"): 1, + (alice, "alice_dev_1", "alg2"): 2, + (alice, "alice_dev_2", "alg2"): 1, + (brian, "brian_dev_1", "alg1"): 2, + (brian, "brian_dev_2", "alg2"): 9001, + (chris, "chris_dev_2", "alg2"): 1, + } + # Convert to the format the handler wants. + query: Dict[str, Dict[str, Dict[str, int]]] = {} + for (user_id, device_id, algorithm), count in claims_to_make.items(): + query.setdefault(user_id, {}).setdefault(device_id, {})[algorithm] = count + claim_res = self.get_success( + self.handler.claim_one_time_keys( + query, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + + # No failures, please! + self.assertEqual(claim_res["failures"], {}) + + # Check that we get exactly the (user, device, algorithm)s we asked for. + got_otks = claim_res["one_time_keys"] + claimed_user_device_algorithms = { + (user_id, device_id, alg_key_id.split(":")[0]) + for user_id, devices in got_otks.items() + for device_id, key_dict in devices.items() + for alg_key_id in key_dict + } + self.assertEqual(claimed_user_device_algorithms, set(claims_to_make)) + + # Now check the keys we got are what we expected. + def assertExactlyOneOtk( + user_id: str, device_id: str, *alg_key_pairs: str + ) -> None: + key_dict = got_otks[user_id][device_id] + found = 0 + for alg_key in alg_key_pairs: + if alg_key in key_dict: + expected_key_json = one_time_keys[user_id][device_id][alg_key] + self.assertEqual(key_dict[alg_key], expected_key_json) + found += 1 + self.assertEqual(found, 1) + + def assertAllOtks(user_id: str, device_id: str, *alg_key_pairs: str) -> None: + key_dict = got_otks[user_id][device_id] + for alg_key in alg_key_pairs: + expected_key_json = one_time_keys[user_id][device_id][alg_key] + self.assertEqual(key_dict[alg_key], expected_key_json) + + # Expect a single arbitrary key to be returned. + assertExactlyOneOtk(alice, "alice_dev_1", "alg1:k1", "alg1:k2") + assertExactlyOneOtk(alice, "alice_dev_2", "alg2:k7", "alg2:k8") + assertExactlyOneOtk(chris, "chris_dev_2", "alg2:k23", "alg2:k24") + + assertAllOtks(alice, "alice_dev_1", "alg2:k3", "alg2:k4") + assertAllOtks(brian, "brian_dev_1", "alg1:k9", "alg1:k10") + assertAllOtks(brian, "brian_dev_2", "alg2:k15", "alg2:k16") + + # Now check the unused key counts. + for user_id, devices in one_time_keys.items(): + for device_id in devices: + counts_by_alg = self.get_success( + self.store.count_e2e_one_time_keys(user_id, device_id) + ) + # Somewhat fiddley to compute the expected count dict. + expected_counts_by_alg = { + "signed_curve25519": 0, + } + for alg in ["alg1", "alg2"]: + claim_count = claims_to_make.get((user_id, device_id, alg), 0) + remaining_count = max(0, 2 - claim_count) + if remaining_count > 0: + expected_counts_by_alg[alg] = remaining_count + + self.assertEqual( + counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}" + ) + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" From 4724a6ded136c727bcff5082b94c2b3d6355e908 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 Oct 2023 13:47:08 +0000 Subject: [PATCH 118/142] 1.96.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++ changelog.d/16432.feature | 1 - changelog.d/16471.bugfix | 1 - changelog.d/16473.bugfix | 1 - changelog.d/16485.bugfix | 1 - changelog.d/16492.misc | 1 - changelog.d/16504.bugfix | 1 - changelog.d/16505.misc | 1 - changelog.d/16510.misc | 1 - changelog.d/16511.misc | 1 - changelog.d/16512.misc | 1 - changelog.d/16515.misc | 1 - changelog.d/16520.misc | 1 - changelog.d/16521.misc | 1 - changelog.d/16526.misc | 1 - changelog.d/16528.misc | 1 - changelog.d/16529.doc | 1 - changelog.d/16530.bugfix | 1 - changelog.d/16531.doc | 1 - changelog.d/16539.misc | 1 - changelog.d/16540.bugfix | 1 - changelog.d/16541.doc | 1 - changelog.d/16544.feature | 1 - changelog.d/16549.feature | 1 - changelog.d/16550.doc | 1 - changelog.d/16551.misc | 1 - changelog.d/16555.misc | 1 - changelog.d/16557.bugfix | 1 - changelog.d/16558.bugfix | 1 - changelog.d/16559.bugfix | 1 - changelog.d/16561.bugfix | 1 - changelog.d/16563.misc | 1 - changelog.d/16565.feature | 1 - changelog.d/16567.misc | 1 - changelog.d/16569.doc | 1 - changelog.d/16570.feature | 1 - changelog.d/16574.misc | 1 - changelog.d/16578.bugfix | 1 - changelog.d/16580.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 41 files changed, 68 insertions(+), 39 deletions(-) delete mode 100644 changelog.d/16432.feature delete mode 100644 changelog.d/16471.bugfix delete mode 100644 changelog.d/16473.bugfix delete mode 100644 changelog.d/16485.bugfix delete mode 100644 changelog.d/16492.misc delete mode 100644 changelog.d/16504.bugfix delete mode 100644 changelog.d/16505.misc delete mode 100644 changelog.d/16510.misc delete mode 100644 changelog.d/16511.misc delete mode 100644 changelog.d/16512.misc delete mode 100644 changelog.d/16515.misc delete mode 100644 changelog.d/16520.misc delete mode 100644 changelog.d/16521.misc delete mode 100644 changelog.d/16526.misc delete mode 100644 changelog.d/16528.misc delete mode 100644 changelog.d/16529.doc delete mode 100644 changelog.d/16530.bugfix delete mode 100644 changelog.d/16531.doc delete mode 100644 changelog.d/16539.misc delete mode 100644 changelog.d/16540.bugfix delete mode 100644 changelog.d/16541.doc delete mode 100644 changelog.d/16544.feature delete mode 100644 changelog.d/16549.feature delete mode 100644 changelog.d/16550.doc delete mode 100644 changelog.d/16551.misc delete mode 100644 changelog.d/16555.misc delete mode 100644 changelog.d/16557.bugfix delete mode 100644 changelog.d/16558.bugfix delete mode 100644 changelog.d/16559.bugfix delete mode 100644 changelog.d/16561.bugfix delete mode 100644 changelog.d/16563.misc delete mode 100644 changelog.d/16565.feature delete mode 100644 changelog.d/16567.misc delete mode 100644 changelog.d/16569.doc delete mode 100644 changelog.d/16570.feature delete mode 100644 changelog.d/16574.misc delete mode 100644 changelog.d/16578.bugfix delete mode 100644 changelog.d/16580.bugfix diff --git a/CHANGES.md b/CHANGES.md index caecc737f3..9e088630f7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +# Synapse 1.96.0rc1 (2023-10-31) + +### Features + +- Allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) +- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) +- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) +- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) + +### Bugfixes + +- Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) +- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) +- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) +- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) +- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) +- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) +- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) +- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) + +### Improved Documentation + +- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) +- Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) +- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) +- Pin the recommended poetry version in contributors' guide. ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) +- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) + +### Internal Changes + +- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) +- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) +- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) +- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) +- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) +- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) +- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) +- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) +- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) +- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) +- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) +- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) +- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) +- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) +- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) + +### Updates to locked dependencies + +* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) +* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) +* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) +* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) +* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) +* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) +* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) +* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) +* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) +* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) +* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) +* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) + # Synapse 1.95.0 (2023-10-24) ### Internal Changes diff --git a/changelog.d/16432.feature b/changelog.d/16432.feature deleted file mode 100644 index 9a76e85592..0000000000 --- a/changelog.d/16432.feature +++ /dev/null @@ -1 +0,0 @@ -Allow multiple workers to write to receipts stream. diff --git a/changelog.d/16471.bugfix b/changelog.d/16471.bugfix deleted file mode 100644 index c94cd5b78f..0000000000 --- a/changelog.d/16471.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. diff --git a/changelog.d/16473.bugfix b/changelog.d/16473.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16473.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16485.bugfix b/changelog.d/16485.bugfix deleted file mode 100644 index 3cd7e1877f..0000000000 --- a/changelog.d/16485.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. diff --git a/changelog.d/16492.misc b/changelog.d/16492.misc deleted file mode 100644 index ecb3356bdd..0000000000 --- a/changelog.d/16492.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). diff --git a/changelog.d/16504.bugfix b/changelog.d/16504.bugfix deleted file mode 100644 index 60839c474b..0000000000 --- a/changelog.d/16504.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. diff --git a/changelog.d/16505.misc b/changelog.d/16505.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16505.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16510.misc b/changelog.d/16510.misc deleted file mode 100644 index 5556b5d74c..0000000000 --- a/changelog.d/16510.misc +++ /dev/null @@ -1 +0,0 @@ -Improve replication performance when purging rooms. diff --git a/changelog.d/16511.misc b/changelog.d/16511.misc deleted file mode 100644 index 7b7d9ee5b8..0000000000 --- a/changelog.d/16511.misc +++ /dev/null @@ -1 +0,0 @@ -Run tests against Python 3.12. diff --git a/changelog.d/16512.misc b/changelog.d/16512.misc deleted file mode 100644 index dcc53510c4..0000000000 --- a/changelog.d/16512.misc +++ /dev/null @@ -1 +0,0 @@ -Run trial & integration tests in continuous integration when `.ci` directory is modified. diff --git a/changelog.d/16515.misc b/changelog.d/16515.misc deleted file mode 100644 index d54dd730e1..0000000000 --- a/changelog.d/16515.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate call to mark remote server 'awake' when using a federation sending worker. diff --git a/changelog.d/16520.misc b/changelog.d/16520.misc deleted file mode 100644 index ea10fd4345..0000000000 --- a/changelog.d/16520.misc +++ /dev/null @@ -1 +0,0 @@ -Enable dirty runs on Complement CI, which is significantly faster. diff --git a/changelog.d/16521.misc b/changelog.d/16521.misc deleted file mode 100644 index c6a8ddcf9c..0000000000 --- a/changelog.d/16521.misc +++ /dev/null @@ -1 +0,0 @@ -Stop deleting from an unused table. diff --git a/changelog.d/16526.misc b/changelog.d/16526.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16526.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16528.misc b/changelog.d/16528.misc deleted file mode 100644 index 32954ea675..0000000000 --- a/changelog.d/16528.misc +++ /dev/null @@ -1 +0,0 @@ -Fix running unit tests on Twisted trunk. diff --git a/changelog.d/16529.doc b/changelog.d/16529.doc deleted file mode 100644 index 0f8a87f293..0000000000 --- a/changelog.d/16529.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of presence router. diff --git a/changelog.d/16530.bugfix b/changelog.d/16530.bugfix deleted file mode 100644 index 503ea0af20..0000000000 --- a/changelog.d/16530.bugfix +++ /dev/null @@ -1 +0,0 @@ -Force TLS certificate verification in user registration script. diff --git a/changelog.d/16531.doc b/changelog.d/16531.doc deleted file mode 100644 index 0932d1abf1..0000000000 --- a/changelog.d/16531.doc +++ /dev/null @@ -1 +0,0 @@ -Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. diff --git a/changelog.d/16539.misc b/changelog.d/16539.misc deleted file mode 100644 index cd21bdb26d..0000000000 --- a/changelog.d/16539.misc +++ /dev/null @@ -1 +0,0 @@ -Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. diff --git a/changelog.d/16540.bugfix b/changelog.d/16540.bugfix deleted file mode 100644 index 34ee9facf9..0000000000 --- a/changelog.d/16540.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. diff --git a/changelog.d/16541.doc b/changelog.d/16541.doc deleted file mode 100644 index 39aeecada6..0000000000 --- a/changelog.d/16541.doc +++ /dev/null @@ -1 +0,0 @@ -Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. diff --git a/changelog.d/16544.feature b/changelog.d/16544.feature deleted file mode 100644 index 92bf701be6..0000000000 --- a/changelog.d/16544.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new module API for controller presence. diff --git a/changelog.d/16549.feature b/changelog.d/16549.feature deleted file mode 100644 index 51129200f3..0000000000 --- a/changelog.d/16549.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. diff --git a/changelog.d/16550.doc b/changelog.d/16550.doc deleted file mode 100644 index 77ba422a06..0000000000 --- a/changelog.d/16550.doc +++ /dev/null @@ -1 +0,0 @@ -Pin the recommended poetry version in contributors' guide. diff --git a/changelog.d/16551.misc b/changelog.d/16551.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16551.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16555.misc b/changelog.d/16555.misc deleted file mode 100644 index d02efb2114..0000000000 --- a/changelog.d/16555.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce some spurious logging in worker mode. diff --git a/changelog.d/16557.bugfix b/changelog.d/16557.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16557.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16558.bugfix b/changelog.d/16558.bugfix deleted file mode 100644 index 64f419fd82..0000000000 --- a/changelog.d/16558.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. diff --git a/changelog.d/16559.bugfix b/changelog.d/16559.bugfix deleted file mode 100644 index e0fb16f807..0000000000 --- a/changelog.d/16559.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where invited/knocking users would not leave during a room purge. diff --git a/changelog.d/16561.bugfix b/changelog.d/16561.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16561.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16563.misc b/changelog.d/16563.misc deleted file mode 100644 index e433659e8f..0000000000 --- a/changelog.d/16563.misc +++ /dev/null @@ -1 +0,0 @@ -Stop porting a table in port db that we're going to nuke and rebuild anyway. diff --git a/changelog.d/16565.feature b/changelog.d/16565.feature deleted file mode 100644 index c807945fa8..0000000000 --- a/changelog.d/16565.feature +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of claiming encryption keys. diff --git a/changelog.d/16567.misc b/changelog.d/16567.misc deleted file mode 100644 index 858fbac7f2..0000000000 --- a/changelog.d/16567.misc +++ /dev/null @@ -1 +0,0 @@ -Deal with warnings from running complement in CI. diff --git a/changelog.d/16569.doc b/changelog.d/16569.doc deleted file mode 100644 index 7b2a439d30..0000000000 --- a/changelog.d/16569.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. diff --git a/changelog.d/16570.feature b/changelog.d/16570.feature deleted file mode 100644 index c807945fa8..0000000000 --- a/changelog.d/16570.feature +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of claiming encryption keys. diff --git a/changelog.d/16574.misc b/changelog.d/16574.misc deleted file mode 100644 index fae0f00fb3..0000000000 --- a/changelog.d/16574.misc +++ /dev/null @@ -1 +0,0 @@ -Allow building with `setuptools_rust` 1.8.0. diff --git a/changelog.d/16578.bugfix b/changelog.d/16578.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16578.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16580.bugfix b/changelog.d/16580.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16580.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/debian/changelog b/debian/changelog index 9bd5490ede..a77824e89a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.96.0~rc1) stable; urgency=medium + + * New Synapse release 1.96.0rc1. + + -- Synapse Packaging team Tue, 31 Oct 2023 13:47:01 +0000 + matrix-synapse-py3 (1.95.0) stable; urgency=medium * New Synapse release 1.95.0. diff --git a/pyproject.toml b/pyproject.toml index 5b9f9fbde0..23e0004395 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.95.0" +version = "1.96.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 7a3a55ac98847d7adb0e200378abe07ef8d0c645 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Oct 2023 09:58:30 -0400 Subject: [PATCH 119/142] Merge pull request from GHSA-mp92-3jfm-3575 --- synapse/federation/federation_server.py | 8 +++++++- synapse/handlers/device.py | 3 +++ synapse/handlers/e2e_keys.py | 6 ++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3b27925517..8e3064c7e7 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -84,7 +84,7 @@ from synapse.replication.http.federation import ( from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, StateMap, get_domain_from_id, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache @@ -999,6 +999,12 @@ class FederationServer(FederationBase): async def on_claim_client_keys( self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool ) -> Dict[str, Any]: + if any( + not self.hs.is_mine(UserID.from_string(user_id)) + for user_id, _, _, _ in query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self._e2e_keys_handler.claim_local_one_time_keys( query, always_include_fallback_keys=always_include_fallback_keys diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 3ce96ef3cb..93472d0117 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -328,6 +328,9 @@ class DeviceWorkerHandler: return result async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: + if not self.hs.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "User is not hosted on this homeserver") + stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query( user_id ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d340d4aebe..d06524495f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -542,6 +542,12 @@ class E2eKeysHandler: device_keys_query: Dict[str, Optional[List[str]]] = query_body.get( "device_keys", {} ) + if any( + not self.is_mine(UserID.from_string(user_id)) + for user_id in device_keys_query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + res = await self.query_local_devices( device_keys_query, include_displaynames=( From daec55e1fe120c564240c5386e77941372bf458f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Oct 2023 09:58:30 -0400 Subject: [PATCH 120/142] Merge pull request from GHSA-mp92-3jfm-3575 --- synapse/federation/federation_server.py | 8 +++++++- synapse/handlers/device.py | 3 +++ synapse/handlers/e2e_keys.py | 6 ++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6ac8d16095..356ab0492b 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -84,7 +84,7 @@ from synapse.replication.http.federation import ( from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, StateMap, get_domain_from_id, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache @@ -999,6 +999,12 @@ class FederationServer(FederationBase): async def on_claim_client_keys( self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool ) -> Dict[str, Any]: + if any( + not self.hs.is_mine(UserID.from_string(user_id)) + for user_id, _, _, _ in query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) results = await self._e2e_keys_handler.claim_local_one_time_keys( query, always_include_fallback_keys=always_include_fallback_keys diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 544bc7c13d..b0f6011629 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -328,6 +328,9 @@ class DeviceWorkerHandler: return result async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: + if not self.hs.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "User is not hosted on this homeserver") + stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query( user_id ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 8c6432035d..5a0c1f47be 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -542,6 +542,12 @@ class E2eKeysHandler: device_keys_query: Dict[str, Optional[List[str]]] = query_body.get( "device_keys", {} ) + if any( + not self.is_mine(UserID.from_string(user_id)) + for user_id in device_keys_query + ): + raise SynapseError(400, "User is not hosted on this homeserver") + res = await self.query_local_devices( device_keys_query, include_displaynames=( From a11511954a58975d2e5400257a0cecfd27413447 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 Oct 2023 14:02:32 +0000 Subject: [PATCH 121/142] 1.95.1 --- CHANGES.md | 14 ++++++++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index caecc737f3..5aecdfb23d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,17 @@ +# Synapse 1.95.1 (2023-10-31) + +## Security advisory + +The following issue is fixed in 1.95.1. + +- [GHSA-mp92-3jfm-3575](https://github.com/matrix-org/synapse/security/advisories/GHSA-mp92-3jfm-3575) / [CVE-2023-43796](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-43796) — Moderate Severity + + Cached device information of remote users can be queried from Synapse. This can be used to enumerate the remote users known to a homeserver. + +See the advisory for more details. If you have any questions, email security@matrix.org. + + + # Synapse 1.95.0 (2023-10-24) ### Internal Changes diff --git a/debian/changelog b/debian/changelog index 9bd5490ede..2f9a7d3724 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.95.1) stable; urgency=medium + + * New Synapse release 1.95.1. + + -- Synapse Packaging team Tue, 31 Oct 2023 14:00:00 +0000 + matrix-synapse-py3 (1.95.0) stable; urgency=medium * New Synapse release 1.95.0. diff --git a/pyproject.toml b/pyproject.toml index f3764b1a57..b9cabe57e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.95.0" +version = "1.95.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 60c5e8d79b2d74f00a0ee97c041dba374b27c6e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 Oct 2023 14:08:45 +0000 Subject: [PATCH 122/142] Revert "1.96.0rc1" This reverts commit 4724a6ded136c727bcff5082b94c2b3d6355e908. --- CHANGES.md | 61 --------------------------------------- changelog.d/16432.feature | 1 + changelog.d/16471.bugfix | 1 + changelog.d/16473.bugfix | 1 + changelog.d/16485.bugfix | 1 + changelog.d/16492.misc | 1 + changelog.d/16504.bugfix | 1 + changelog.d/16505.misc | 1 + changelog.d/16510.misc | 1 + changelog.d/16511.misc | 1 + changelog.d/16512.misc | 1 + changelog.d/16515.misc | 1 + changelog.d/16520.misc | 1 + changelog.d/16521.misc | 1 + changelog.d/16526.misc | 1 + changelog.d/16528.misc | 1 + changelog.d/16529.doc | 1 + changelog.d/16530.bugfix | 1 + changelog.d/16531.doc | 1 + changelog.d/16539.misc | 1 + changelog.d/16540.bugfix | 1 + changelog.d/16541.doc | 1 + changelog.d/16544.feature | 1 + changelog.d/16549.feature | 1 + changelog.d/16550.doc | 1 + changelog.d/16551.misc | 1 + changelog.d/16555.misc | 1 + changelog.d/16557.bugfix | 1 + changelog.d/16558.bugfix | 1 + changelog.d/16559.bugfix | 1 + changelog.d/16561.bugfix | 1 + changelog.d/16563.misc | 1 + changelog.d/16565.feature | 1 + changelog.d/16567.misc | 1 + changelog.d/16569.doc | 1 + changelog.d/16570.feature | 1 + changelog.d/16574.misc | 1 + changelog.d/16578.bugfix | 1 + changelog.d/16580.bugfix | 1 + debian/changelog | 6 ---- pyproject.toml | 2 +- 41 files changed, 39 insertions(+), 68 deletions(-) create mode 100644 changelog.d/16432.feature create mode 100644 changelog.d/16471.bugfix create mode 100644 changelog.d/16473.bugfix create mode 100644 changelog.d/16485.bugfix create mode 100644 changelog.d/16492.misc create mode 100644 changelog.d/16504.bugfix create mode 100644 changelog.d/16505.misc create mode 100644 changelog.d/16510.misc create mode 100644 changelog.d/16511.misc create mode 100644 changelog.d/16512.misc create mode 100644 changelog.d/16515.misc create mode 100644 changelog.d/16520.misc create mode 100644 changelog.d/16521.misc create mode 100644 changelog.d/16526.misc create mode 100644 changelog.d/16528.misc create mode 100644 changelog.d/16529.doc create mode 100644 changelog.d/16530.bugfix create mode 100644 changelog.d/16531.doc create mode 100644 changelog.d/16539.misc create mode 100644 changelog.d/16540.bugfix create mode 100644 changelog.d/16541.doc create mode 100644 changelog.d/16544.feature create mode 100644 changelog.d/16549.feature create mode 100644 changelog.d/16550.doc create mode 100644 changelog.d/16551.misc create mode 100644 changelog.d/16555.misc create mode 100644 changelog.d/16557.bugfix create mode 100644 changelog.d/16558.bugfix create mode 100644 changelog.d/16559.bugfix create mode 100644 changelog.d/16561.bugfix create mode 100644 changelog.d/16563.misc create mode 100644 changelog.d/16565.feature create mode 100644 changelog.d/16567.misc create mode 100644 changelog.d/16569.doc create mode 100644 changelog.d/16570.feature create mode 100644 changelog.d/16574.misc create mode 100644 changelog.d/16578.bugfix create mode 100644 changelog.d/16580.bugfix diff --git a/CHANGES.md b/CHANGES.md index 9e088630f7..caecc737f3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,64 +1,3 @@ -# Synapse 1.96.0rc1 (2023-10-31) - -### Features - -- Allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) -- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) -- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) -- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) - -### Bugfixes - -- Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) -- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) -- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) -- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) -- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) -- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) -- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) -- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) - -### Improved Documentation - -- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) -- Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) -- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) -- Pin the recommended poetry version in contributors' guide. ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) -- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) - -### Internal Changes - -- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) -- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) -- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) -- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) -- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) -- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) -- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) -- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) -- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) -- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) -- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) -- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) -- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) -- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) -- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) - -### Updates to locked dependencies - -* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) -* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) -* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) -* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) -* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) -* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) -* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) -* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) -* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) -* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) -* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) -* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) - # Synapse 1.95.0 (2023-10-24) ### Internal Changes diff --git a/changelog.d/16432.feature b/changelog.d/16432.feature new file mode 100644 index 0000000000..9a76e85592 --- /dev/null +++ b/changelog.d/16432.feature @@ -0,0 +1 @@ +Allow multiple workers to write to receipts stream. diff --git a/changelog.d/16471.bugfix b/changelog.d/16471.bugfix new file mode 100644 index 0000000000..c94cd5b78f --- /dev/null +++ b/changelog.d/16471.bugfix @@ -0,0 +1 @@ +Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. diff --git a/changelog.d/16473.bugfix b/changelog.d/16473.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16473.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16485.bugfix b/changelog.d/16485.bugfix new file mode 100644 index 0000000000..3cd7e1877f --- /dev/null +++ b/changelog.d/16485.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. diff --git a/changelog.d/16492.misc b/changelog.d/16492.misc new file mode 100644 index 0000000000..ecb3356bdd --- /dev/null +++ b/changelog.d/16492.misc @@ -0,0 +1 @@ +Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). diff --git a/changelog.d/16504.bugfix b/changelog.d/16504.bugfix new file mode 100644 index 0000000000..60839c474b --- /dev/null +++ b/changelog.d/16504.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. diff --git a/changelog.d/16505.misc b/changelog.d/16505.misc new file mode 100644 index 0000000000..bd7cdd42af --- /dev/null +++ b/changelog.d/16505.misc @@ -0,0 +1 @@ +Reduce memory allocations. diff --git a/changelog.d/16510.misc b/changelog.d/16510.misc new file mode 100644 index 0000000000..5556b5d74c --- /dev/null +++ b/changelog.d/16510.misc @@ -0,0 +1 @@ +Improve replication performance when purging rooms. diff --git a/changelog.d/16511.misc b/changelog.d/16511.misc new file mode 100644 index 0000000000..7b7d9ee5b8 --- /dev/null +++ b/changelog.d/16511.misc @@ -0,0 +1 @@ +Run tests against Python 3.12. diff --git a/changelog.d/16512.misc b/changelog.d/16512.misc new file mode 100644 index 0000000000..dcc53510c4 --- /dev/null +++ b/changelog.d/16512.misc @@ -0,0 +1 @@ +Run trial & integration tests in continuous integration when `.ci` directory is modified. diff --git a/changelog.d/16515.misc b/changelog.d/16515.misc new file mode 100644 index 0000000000..d54dd730e1 --- /dev/null +++ b/changelog.d/16515.misc @@ -0,0 +1 @@ +Remove duplicate call to mark remote server 'awake' when using a federation sending worker. diff --git a/changelog.d/16520.misc b/changelog.d/16520.misc new file mode 100644 index 0000000000..ea10fd4345 --- /dev/null +++ b/changelog.d/16520.misc @@ -0,0 +1 @@ +Enable dirty runs on Complement CI, which is significantly faster. diff --git a/changelog.d/16521.misc b/changelog.d/16521.misc new file mode 100644 index 0000000000..c6a8ddcf9c --- /dev/null +++ b/changelog.d/16521.misc @@ -0,0 +1 @@ +Stop deleting from an unused table. diff --git a/changelog.d/16526.misc b/changelog.d/16526.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16526.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/changelog.d/16528.misc b/changelog.d/16528.misc new file mode 100644 index 0000000000..32954ea675 --- /dev/null +++ b/changelog.d/16528.misc @@ -0,0 +1 @@ +Fix running unit tests on Twisted trunk. diff --git a/changelog.d/16529.doc b/changelog.d/16529.doc new file mode 100644 index 0000000000..0f8a87f293 --- /dev/null +++ b/changelog.d/16529.doc @@ -0,0 +1 @@ +Improve documentation of presence router. diff --git a/changelog.d/16530.bugfix b/changelog.d/16530.bugfix new file mode 100644 index 0000000000..503ea0af20 --- /dev/null +++ b/changelog.d/16530.bugfix @@ -0,0 +1 @@ +Force TLS certificate verification in user registration script. diff --git a/changelog.d/16531.doc b/changelog.d/16531.doc new file mode 100644 index 0000000000..0932d1abf1 --- /dev/null +++ b/changelog.d/16531.doc @@ -0,0 +1 @@ +Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. diff --git a/changelog.d/16539.misc b/changelog.d/16539.misc new file mode 100644 index 0000000000..cd21bdb26d --- /dev/null +++ b/changelog.d/16539.misc @@ -0,0 +1 @@ +Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. diff --git a/changelog.d/16540.bugfix b/changelog.d/16540.bugfix new file mode 100644 index 0000000000..34ee9facf9 --- /dev/null +++ b/changelog.d/16540.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. diff --git a/changelog.d/16541.doc b/changelog.d/16541.doc new file mode 100644 index 0000000000..39aeecada6 --- /dev/null +++ b/changelog.d/16541.doc @@ -0,0 +1 @@ +Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. diff --git a/changelog.d/16544.feature b/changelog.d/16544.feature new file mode 100644 index 0000000000..92bf701be6 --- /dev/null +++ b/changelog.d/16544.feature @@ -0,0 +1 @@ +Add a new module API for controller presence. diff --git a/changelog.d/16549.feature b/changelog.d/16549.feature new file mode 100644 index 0000000000..51129200f3 --- /dev/null +++ b/changelog.d/16549.feature @@ -0,0 +1 @@ +Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. diff --git a/changelog.d/16550.doc b/changelog.d/16550.doc new file mode 100644 index 0000000000..77ba422a06 --- /dev/null +++ b/changelog.d/16550.doc @@ -0,0 +1 @@ +Pin the recommended poetry version in contributors' guide. diff --git a/changelog.d/16551.misc b/changelog.d/16551.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16551.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/changelog.d/16555.misc b/changelog.d/16555.misc new file mode 100644 index 0000000000..d02efb2114 --- /dev/null +++ b/changelog.d/16555.misc @@ -0,0 +1 @@ +Reduce some spurious logging in worker mode. diff --git a/changelog.d/16557.bugfix b/changelog.d/16557.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16557.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16558.bugfix b/changelog.d/16558.bugfix new file mode 100644 index 0000000000..64f419fd82 --- /dev/null +++ b/changelog.d/16558.bugfix @@ -0,0 +1 @@ +Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. diff --git a/changelog.d/16559.bugfix b/changelog.d/16559.bugfix new file mode 100644 index 0000000000..e0fb16f807 --- /dev/null +++ b/changelog.d/16559.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where invited/knocking users would not leave during a room purge. diff --git a/changelog.d/16561.bugfix b/changelog.d/16561.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16561.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16563.misc b/changelog.d/16563.misc new file mode 100644 index 0000000000..e433659e8f --- /dev/null +++ b/changelog.d/16563.misc @@ -0,0 +1 @@ +Stop porting a table in port db that we're going to nuke and rebuild anyway. diff --git a/changelog.d/16565.feature b/changelog.d/16565.feature new file mode 100644 index 0000000000..c807945fa8 --- /dev/null +++ b/changelog.d/16565.feature @@ -0,0 +1 @@ +Improve the performance of claiming encryption keys. diff --git a/changelog.d/16567.misc b/changelog.d/16567.misc new file mode 100644 index 0000000000..858fbac7f2 --- /dev/null +++ b/changelog.d/16567.misc @@ -0,0 +1 @@ +Deal with warnings from running complement in CI. diff --git a/changelog.d/16569.doc b/changelog.d/16569.doc new file mode 100644 index 0000000000..7b2a439d30 --- /dev/null +++ b/changelog.d/16569.doc @@ -0,0 +1 @@ +Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. diff --git a/changelog.d/16570.feature b/changelog.d/16570.feature new file mode 100644 index 0000000000..c807945fa8 --- /dev/null +++ b/changelog.d/16570.feature @@ -0,0 +1 @@ +Improve the performance of claiming encryption keys. diff --git a/changelog.d/16574.misc b/changelog.d/16574.misc new file mode 100644 index 0000000000..fae0f00fb3 --- /dev/null +++ b/changelog.d/16574.misc @@ -0,0 +1 @@ +Allow building with `setuptools_rust` 1.8.0. diff --git a/changelog.d/16578.bugfix b/changelog.d/16578.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16578.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16580.bugfix b/changelog.d/16580.bugfix new file mode 100644 index 0000000000..4f4a0380cd --- /dev/null +++ b/changelog.d/16580.bugfix @@ -0,0 +1 @@ +Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/debian/changelog b/debian/changelog index a77824e89a..9bd5490ede 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,3 @@ -matrix-synapse-py3 (1.96.0~rc1) stable; urgency=medium - - * New Synapse release 1.96.0rc1. - - -- Synapse Packaging team Tue, 31 Oct 2023 13:47:01 +0000 - matrix-synapse-py3 (1.95.0) stable; urgency=medium * New Synapse release 1.95.0. diff --git a/pyproject.toml b/pyproject.toml index 23e0004395..5b9f9fbde0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.96.0rc1" +version = "1.95.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 4bb2b4aa9a48d0c44503f6d3c9f8d80d24e30a5f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 Oct 2023 14:09:13 +0000 Subject: [PATCH 123/142] 1.96.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++ changelog.d/16432.feature | 1 - changelog.d/16471.bugfix | 1 - changelog.d/16473.bugfix | 1 - changelog.d/16485.bugfix | 1 - changelog.d/16492.misc | 1 - changelog.d/16504.bugfix | 1 - changelog.d/16505.misc | 1 - changelog.d/16510.misc | 1 - changelog.d/16511.misc | 1 - changelog.d/16512.misc | 1 - changelog.d/16515.misc | 1 - changelog.d/16520.misc | 1 - changelog.d/16521.misc | 1 - changelog.d/16526.misc | 1 - changelog.d/16528.misc | 1 - changelog.d/16529.doc | 1 - changelog.d/16530.bugfix | 1 - changelog.d/16531.doc | 1 - changelog.d/16539.misc | 1 - changelog.d/16540.bugfix | 1 - changelog.d/16541.doc | 1 - changelog.d/16544.feature | 1 - changelog.d/16549.feature | 1 - changelog.d/16550.doc | 1 - changelog.d/16551.misc | 1 - changelog.d/16555.misc | 1 - changelog.d/16557.bugfix | 1 - changelog.d/16558.bugfix | 1 - changelog.d/16559.bugfix | 1 - changelog.d/16561.bugfix | 1 - changelog.d/16563.misc | 1 - changelog.d/16565.feature | 1 - changelog.d/16567.misc | 1 - changelog.d/16569.doc | 1 - changelog.d/16570.feature | 1 - changelog.d/16574.misc | 1 - changelog.d/16578.bugfix | 1 - changelog.d/16580.bugfix | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 41 files changed, 68 insertions(+), 39 deletions(-) delete mode 100644 changelog.d/16432.feature delete mode 100644 changelog.d/16471.bugfix delete mode 100644 changelog.d/16473.bugfix delete mode 100644 changelog.d/16485.bugfix delete mode 100644 changelog.d/16492.misc delete mode 100644 changelog.d/16504.bugfix delete mode 100644 changelog.d/16505.misc delete mode 100644 changelog.d/16510.misc delete mode 100644 changelog.d/16511.misc delete mode 100644 changelog.d/16512.misc delete mode 100644 changelog.d/16515.misc delete mode 100644 changelog.d/16520.misc delete mode 100644 changelog.d/16521.misc delete mode 100644 changelog.d/16526.misc delete mode 100644 changelog.d/16528.misc delete mode 100644 changelog.d/16529.doc delete mode 100644 changelog.d/16530.bugfix delete mode 100644 changelog.d/16531.doc delete mode 100644 changelog.d/16539.misc delete mode 100644 changelog.d/16540.bugfix delete mode 100644 changelog.d/16541.doc delete mode 100644 changelog.d/16544.feature delete mode 100644 changelog.d/16549.feature delete mode 100644 changelog.d/16550.doc delete mode 100644 changelog.d/16551.misc delete mode 100644 changelog.d/16555.misc delete mode 100644 changelog.d/16557.bugfix delete mode 100644 changelog.d/16558.bugfix delete mode 100644 changelog.d/16559.bugfix delete mode 100644 changelog.d/16561.bugfix delete mode 100644 changelog.d/16563.misc delete mode 100644 changelog.d/16565.feature delete mode 100644 changelog.d/16567.misc delete mode 100644 changelog.d/16569.doc delete mode 100644 changelog.d/16570.feature delete mode 100644 changelog.d/16574.misc delete mode 100644 changelog.d/16578.bugfix delete mode 100644 changelog.d/16580.bugfix diff --git a/CHANGES.md b/CHANGES.md index 5aecdfb23d..736cfbe235 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +# Synapse 1.96.0rc1 (2023-10-31) + +### Features + +- Allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) +- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) +- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) +- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) + +### Bugfixes + +- Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) +- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) +- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) +- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) +- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) +- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) +- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) +- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) + +### Improved Documentation + +- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) +- Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) +- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) +- Pin the recommended poetry version in contributors' guide. ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) +- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) + +### Internal Changes + +- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) +- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) +- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) +- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) +- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) +- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) +- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) +- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) +- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) +- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) +- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) +- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) +- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) +- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) +- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) + +### Updates to locked dependencies + +* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) +* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) +* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) +* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) +* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) +* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) +* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) +* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) +* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) +* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) +* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) +* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) + # Synapse 1.95.1 (2023-10-31) ## Security advisory diff --git a/changelog.d/16432.feature b/changelog.d/16432.feature deleted file mode 100644 index 9a76e85592..0000000000 --- a/changelog.d/16432.feature +++ /dev/null @@ -1 +0,0 @@ -Allow multiple workers to write to receipts stream. diff --git a/changelog.d/16471.bugfix b/changelog.d/16471.bugfix deleted file mode 100644 index c94cd5b78f..0000000000 --- a/changelog.d/16471.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. diff --git a/changelog.d/16473.bugfix b/changelog.d/16473.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16473.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16485.bugfix b/changelog.d/16485.bugfix deleted file mode 100644 index 3cd7e1877f..0000000000 --- a/changelog.d/16485.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. diff --git a/changelog.d/16492.misc b/changelog.d/16492.misc deleted file mode 100644 index ecb3356bdd..0000000000 --- a/changelog.d/16492.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). diff --git a/changelog.d/16504.bugfix b/changelog.d/16504.bugfix deleted file mode 100644 index 60839c474b..0000000000 --- a/changelog.d/16504.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. diff --git a/changelog.d/16505.misc b/changelog.d/16505.misc deleted file mode 100644 index bd7cdd42af..0000000000 --- a/changelog.d/16505.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory allocations. diff --git a/changelog.d/16510.misc b/changelog.d/16510.misc deleted file mode 100644 index 5556b5d74c..0000000000 --- a/changelog.d/16510.misc +++ /dev/null @@ -1 +0,0 @@ -Improve replication performance when purging rooms. diff --git a/changelog.d/16511.misc b/changelog.d/16511.misc deleted file mode 100644 index 7b7d9ee5b8..0000000000 --- a/changelog.d/16511.misc +++ /dev/null @@ -1 +0,0 @@ -Run tests against Python 3.12. diff --git a/changelog.d/16512.misc b/changelog.d/16512.misc deleted file mode 100644 index dcc53510c4..0000000000 --- a/changelog.d/16512.misc +++ /dev/null @@ -1 +0,0 @@ -Run trial & integration tests in continuous integration when `.ci` directory is modified. diff --git a/changelog.d/16515.misc b/changelog.d/16515.misc deleted file mode 100644 index d54dd730e1..0000000000 --- a/changelog.d/16515.misc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate call to mark remote server 'awake' when using a federation sending worker. diff --git a/changelog.d/16520.misc b/changelog.d/16520.misc deleted file mode 100644 index ea10fd4345..0000000000 --- a/changelog.d/16520.misc +++ /dev/null @@ -1 +0,0 @@ -Enable dirty runs on Complement CI, which is significantly faster. diff --git a/changelog.d/16521.misc b/changelog.d/16521.misc deleted file mode 100644 index c6a8ddcf9c..0000000000 --- a/changelog.d/16521.misc +++ /dev/null @@ -1 +0,0 @@ -Stop deleting from an unused table. diff --git a/changelog.d/16526.misc b/changelog.d/16526.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16526.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16528.misc b/changelog.d/16528.misc deleted file mode 100644 index 32954ea675..0000000000 --- a/changelog.d/16528.misc +++ /dev/null @@ -1 +0,0 @@ -Fix running unit tests on Twisted trunk. diff --git a/changelog.d/16529.doc b/changelog.d/16529.doc deleted file mode 100644 index 0f8a87f293..0000000000 --- a/changelog.d/16529.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of presence router. diff --git a/changelog.d/16530.bugfix b/changelog.d/16530.bugfix deleted file mode 100644 index 503ea0af20..0000000000 --- a/changelog.d/16530.bugfix +++ /dev/null @@ -1 +0,0 @@ -Force TLS certificate verification in user registration script. diff --git a/changelog.d/16531.doc b/changelog.d/16531.doc deleted file mode 100644 index 0932d1abf1..0000000000 --- a/changelog.d/16531.doc +++ /dev/null @@ -1 +0,0 @@ -Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. diff --git a/changelog.d/16539.misc b/changelog.d/16539.misc deleted file mode 100644 index cd21bdb26d..0000000000 --- a/changelog.d/16539.misc +++ /dev/null @@ -1 +0,0 @@ -Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. diff --git a/changelog.d/16540.bugfix b/changelog.d/16540.bugfix deleted file mode 100644 index 34ee9facf9..0000000000 --- a/changelog.d/16540.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. diff --git a/changelog.d/16541.doc b/changelog.d/16541.doc deleted file mode 100644 index 39aeecada6..0000000000 --- a/changelog.d/16541.doc +++ /dev/null @@ -1 +0,0 @@ -Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. diff --git a/changelog.d/16544.feature b/changelog.d/16544.feature deleted file mode 100644 index 92bf701be6..0000000000 --- a/changelog.d/16544.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new module API for controller presence. diff --git a/changelog.d/16549.feature b/changelog.d/16549.feature deleted file mode 100644 index 51129200f3..0000000000 --- a/changelog.d/16549.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. diff --git a/changelog.d/16550.doc b/changelog.d/16550.doc deleted file mode 100644 index 77ba422a06..0000000000 --- a/changelog.d/16550.doc +++ /dev/null @@ -1 +0,0 @@ -Pin the recommended poetry version in contributors' guide. diff --git a/changelog.d/16551.misc b/changelog.d/16551.misc deleted file mode 100644 index 93ceaeafc9..0000000000 --- a/changelog.d/16551.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/16555.misc b/changelog.d/16555.misc deleted file mode 100644 index d02efb2114..0000000000 --- a/changelog.d/16555.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce some spurious logging in worker mode. diff --git a/changelog.d/16557.bugfix b/changelog.d/16557.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16557.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16558.bugfix b/changelog.d/16558.bugfix deleted file mode 100644 index 64f419fd82..0000000000 --- a/changelog.d/16558.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. diff --git a/changelog.d/16559.bugfix b/changelog.d/16559.bugfix deleted file mode 100644 index e0fb16f807..0000000000 --- a/changelog.d/16559.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where invited/knocking users would not leave during a room purge. diff --git a/changelog.d/16561.bugfix b/changelog.d/16561.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16561.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16563.misc b/changelog.d/16563.misc deleted file mode 100644 index e433659e8f..0000000000 --- a/changelog.d/16563.misc +++ /dev/null @@ -1 +0,0 @@ -Stop porting a table in port db that we're going to nuke and rebuild anyway. diff --git a/changelog.d/16565.feature b/changelog.d/16565.feature deleted file mode 100644 index c807945fa8..0000000000 --- a/changelog.d/16565.feature +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of claiming encryption keys. diff --git a/changelog.d/16567.misc b/changelog.d/16567.misc deleted file mode 100644 index 858fbac7f2..0000000000 --- a/changelog.d/16567.misc +++ /dev/null @@ -1 +0,0 @@ -Deal with warnings from running complement in CI. diff --git a/changelog.d/16569.doc b/changelog.d/16569.doc deleted file mode 100644 index 7b2a439d30..0000000000 --- a/changelog.d/16569.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. diff --git a/changelog.d/16570.feature b/changelog.d/16570.feature deleted file mode 100644 index c807945fa8..0000000000 --- a/changelog.d/16570.feature +++ /dev/null @@ -1 +0,0 @@ -Improve the performance of claiming encryption keys. diff --git a/changelog.d/16574.misc b/changelog.d/16574.misc deleted file mode 100644 index fae0f00fb3..0000000000 --- a/changelog.d/16574.misc +++ /dev/null @@ -1 +0,0 @@ -Allow building with `setuptools_rust` 1.8.0. diff --git a/changelog.d/16578.bugfix b/changelog.d/16578.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16578.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/changelog.d/16580.bugfix b/changelog.d/16580.bugfix deleted file mode 100644 index 4f4a0380cd..0000000000 --- a/changelog.d/16580.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. diff --git a/debian/changelog b/debian/changelog index 2f9a7d3724..cbfcb8f44d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.96.0~rc1) stable; urgency=medium + + * New Synapse release 1.96.0rc1. + + -- Synapse Packaging team Tue, 31 Oct 2023 14:09:09 +0000 + matrix-synapse-py3 (1.95.1) stable; urgency=medium * New Synapse release 1.95.1. diff --git a/pyproject.toml b/pyproject.toml index f73726e008..23e0004395 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.95.1" +version = "1.96.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From c5b543938b08b0507a554f2e435054ed7a06b01a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 31 Oct 2023 14:17:27 +0000 Subject: [PATCH 124/142] Update changelog --- CHANGES.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 736cfbe235..2e7f199299 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,14 +2,14 @@ ### Features -- Allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) +- Add experimental support to allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) - Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) - Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) - Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) ### Bugfixes -- Fixed a bug that prevents Grafana from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) +- Fixed a bug in the example Grafana dashboard that prevents it from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) - Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) - Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) - Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) @@ -21,9 +21,9 @@ ### Improved Documentation - Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) -- Add a sentence to the opentracing docs on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) +- Add a sentence to the [opentracing docs](https://matrix-org.github.io/synapse/latest/opentracing.html) on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) - Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) -- Pin the recommended poetry version in contributors' guide. ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) +- Pin the recommended poetry version in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) - Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) ### Internal Changes @@ -38,7 +38,6 @@ - Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) - Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) - Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) -- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) - Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) - Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) - Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) @@ -52,6 +51,7 @@ * Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) * Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) * Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) +- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) * Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) * Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) * Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) From 70b503f1449a99ee9735e9d30d6a2404e00901fa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Oct 2023 10:32:35 -0400 Subject: [PATCH 125/142] Fix import ordering issue introduced in 7a3a55ac98847d7adb0e200378abe07ef8d0c645. --- synapse/federation/federation_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8e3064c7e7..2bb2c64ebe 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -84,7 +84,7 @@ from synapse.replication.http.federation import ( from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary -from synapse.types import JsonDict, StateMap, get_domain_from_id, UserID +from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache From cfb6d38c47711b8dfaf0125353aec88d16708b97 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Oct 2023 13:13:28 -0400 Subject: [PATCH 126/142] Remove remaining usage of cursor_to_dict. (#16564) --- changelog.d/16564.misc | 1 + synapse/handlers/admin.py | 2 +- synapse/handlers/room_list.py | 43 +++++----- synapse/handlers/room_summary.py | 26 +++--- synapse/rest/admin/media.py | 6 +- synapse/rest/admin/registration_tokens.py | 13 ++- synapse/rest/admin/rooms.py | 11 ++- synapse/rest/admin/users.py | 10 ++- synapse/storage/background_updates.py | 14 ++-- synapse/storage/database.py | 15 ---- synapse/storage/databases/main/__init__.py | 52 +++++++++--- synapse/storage/databases/main/devices.py | 55 ++++++++----- .../databases/main/media_repository.py | 48 ++++++++--- .../storage/databases/main/registration.py | 42 ++++++---- synapse/storage/databases/main/room.py | 82 +++++++++++++++---- tests/handlers/test_register.py | 22 ++--- tests/storage/test_main.py | 4 +- tests/storage/test_room.py | 11 +-- 18 files changed, 300 insertions(+), 157 deletions(-) create mode 100644 changelog.d/16564.misc diff --git a/changelog.d/16564.misc b/changelog.d/16564.misc new file mode 100644 index 0000000000..93ceaeafc9 --- /dev/null +++ b/changelog.d/16564.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 2c2baeac67..d06f8e3296 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -283,7 +283,7 @@ class AdminHandler: start, limit, user_id ) for media in media_ids: - writer.write_media_id(media["media_id"], media) + writer.write_media_id(media.media_id, attr.asdict(media)) logger.info( "[%s] Written %d media_ids of %s", diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 36e2db8975..2947e154be 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -33,6 +33,7 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) +from synapse.storage.databases.main.room import LargestRoomStats from synapse.types import JsonDict, JsonMapping, ThirdPartyInstanceID from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.caches.response_cache import ResponseCache @@ -170,26 +171,24 @@ class RoomListHandler: ignore_non_federatable=from_federation, ) - def build_room_entry(room: JsonDict) -> JsonDict: + def build_room_entry(room: LargestRoomStats) -> JsonDict: entry = { - "room_id": room["room_id"], - "name": room["name"], - "topic": room["topic"], - "canonical_alias": room["canonical_alias"], - "num_joined_members": room["joined_members"], - "avatar_url": room["avatar"], - "world_readable": room["history_visibility"] + "room_id": room.room_id, + "name": room.name, + "topic": room.topic, + "canonical_alias": room.canonical_alias, + "num_joined_members": room.joined_members, + "avatar_url": room.avatar, + "world_readable": room.history_visibility == HistoryVisibility.WORLD_READABLE, - "guest_can_join": room["guest_access"] == "can_join", - "join_rule": room["join_rules"], - "room_type": room["room_type"], + "guest_can_join": room.guest_access == "can_join", + "join_rule": room.join_rules, + "room_type": room.room_type, } # Filter out Nones – rather omit the field altogether return {k: v for k, v in entry.items() if v is not None} - results = [build_room_entry(r) for r in results] - response: JsonDict = {} num_results = len(results) if limit is not None: @@ -212,33 +211,33 @@ class RoomListHandler: # If there was a token given then we assume that there # must be previous results. response["prev_batch"] = RoomListNextBatch( - last_joined_members=initial_entry["num_joined_members"], - last_room_id=initial_entry["room_id"], + last_joined_members=initial_entry.joined_members, + last_room_id=initial_entry.room_id, direction_is_forward=False, ).to_token() if more_to_come: response["next_batch"] = RoomListNextBatch( - last_joined_members=final_entry["num_joined_members"], - last_room_id=final_entry["room_id"], + last_joined_members=final_entry.joined_members, + last_room_id=final_entry.room_id, direction_is_forward=True, ).to_token() else: if has_batch_token: response["next_batch"] = RoomListNextBatch( - last_joined_members=final_entry["num_joined_members"], - last_room_id=final_entry["room_id"], + last_joined_members=final_entry.joined_members, + last_room_id=final_entry.room_id, direction_is_forward=True, ).to_token() if more_to_come: response["prev_batch"] = RoomListNextBatch( - last_joined_members=initial_entry["num_joined_members"], - last_room_id=initial_entry["room_id"], + last_joined_members=initial_entry.joined_members, + last_room_id=initial_entry.room_id, direction_is_forward=False, ).to_token() - response["chunk"] = results + response["chunk"] = [build_room_entry(r) for r in results] response["total_room_count_estimate"] = await self.store.count_public_rooms( network_tuple, diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index dd559b4c45..1dfb12e065 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -703,24 +703,24 @@ class RoomSummaryHandler: # there should always be an entry assert stats is not None, "unable to retrieve stats for %s" % (room_id,) - entry = { - "room_id": stats["room_id"], - "name": stats["name"], - "topic": stats["topic"], - "canonical_alias": stats["canonical_alias"], - "num_joined_members": stats["joined_members"], - "avatar_url": stats["avatar"], - "join_rule": stats["join_rules"], + entry: JsonDict = { + "room_id": stats.room_id, + "name": stats.name, + "topic": stats.topic, + "canonical_alias": stats.canonical_alias, + "num_joined_members": stats.joined_members, + "avatar_url": stats.avatar, + "join_rule": stats.join_rules, "world_readable": ( - stats["history_visibility"] == HistoryVisibility.WORLD_READABLE + stats.history_visibility == HistoryVisibility.WORLD_READABLE ), - "guest_can_join": stats["guest_access"] == "can_join", - "room_type": stats["room_type"], + "guest_can_join": stats.guest_access == "can_join", + "room_type": stats.room_type, } if self._msc3266_enabled: - entry["im.nheko.summary.version"] = stats["version"] - entry["im.nheko.summary.encryption"] = stats["encryption"] + entry["im.nheko.summary.version"] = stats.version + entry["im.nheko.summary.encryption"] = stats.encryption # Federation requests need to provide additional information so the # requested server is able to filter the response appropriately. diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index b7637dff0b..8cf5268854 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -17,6 +17,8 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple +import attr + from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -418,7 +420,7 @@ class UserMediaRestServlet(RestServlet): start, limit, user_id, order_by, direction ) - ret = {"media": media, "total": total} + ret = {"media": [attr.asdict(m) for m in media], "total": total} if (start + limit) < total: ret["next_token"] = start + len(media) @@ -477,7 +479,7 @@ class UserMediaRestServlet(RestServlet): ) deleted_media, total = await self.media_repository.delete_local_media_ids( - [row["media_id"] for row in media] + [m.media_id for m in media] ) return HTTPStatus.OK, {"deleted_media": deleted_media, "total": total} diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index ffce92d45e..f3e06d3da3 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -77,7 +77,18 @@ class ListRegistrationTokensRestServlet(RestServlet): await assert_requester_is_admin(self.auth, request) valid = parse_boolean(request, "valid") token_list = await self.store.get_registration_tokens(valid) - return HTTPStatus.OK, {"registration_tokens": token_list} + return HTTPStatus.OK, { + "registration_tokens": [ + { + "token": t[0], + "uses_allowed": t[1], + "pending": t[2], + "completed": t[3], + "expiry_time": t[4], + } + for t in token_list + ] + } class NewRegistrationTokenRestServlet(RestServlet): diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 0659f22a89..23a034522c 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -16,6 +16,8 @@ from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple, cast from urllib import parse as urlparse +import attr + from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter @@ -306,10 +308,13 @@ class RoomRestServlet(RestServlet): raise NotFoundError("Room not found") members = await self.store.get_users_in_room(room_id) - ret["joined_local_devices"] = await self.store.count_devices_by_users(members) - ret["forgotten"] = await self.store.is_locally_forgotten_room(room_id) + result = attr.asdict(ret) + result["joined_local_devices"] = await self.store.count_devices_by_users( + members + ) + result["forgotten"] = await self.store.is_locally_forgotten_room(room_id) - return HTTPStatus.OK, ret + return HTTPStatus.OK, result async def on_DELETE( self, request: SynapseRequest, room_id: str diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 7fe16130e7..73878dd99d 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -18,6 +18,8 @@ import secrets from http import HTTPStatus from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +import attr + from synapse.api.constants import Direction, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( @@ -161,11 +163,13 @@ class UsersRestServletV2(RestServlet): ) # If support for MSC3866 is not enabled, don't show the approval flag. + filter = None if not self._msc3866_enabled: - for user in users: - del user["approved"] - ret = {"users": users, "total": total} + def _filter(a: attr.Attribute) -> bool: + return a.name != "approved" + + ret = {"users": [attr.asdict(u, filter=filter) for u in users], "total": total} if (start + limit) < total: ret["next_token"] = str(start + len(users)) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 12829d3d7d..7426dbcad6 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -28,6 +28,7 @@ from typing import ( Sequence, Tuple, Type, + cast, ) import attr @@ -488,14 +489,14 @@ class BackgroundUpdater: True if we have finished running all the background updates, otherwise False """ - def get_background_updates_txn(txn: Cursor) -> List[Dict[str, Any]]: + def get_background_updates_txn(txn: Cursor) -> List[Tuple[str, Optional[str]]]: txn.execute( """ SELECT update_name, depends_on FROM background_updates ORDER BY ordering, update_name """ ) - return self.db_pool.cursor_to_dict(txn) + return cast(List[Tuple[str, Optional[str]]], txn.fetchall()) if not self._current_background_update: all_pending_updates = await self.db_pool.runInteraction( @@ -507,14 +508,13 @@ class BackgroundUpdater: return True # find the first update which isn't dependent on another one in the queue. - pending = {update["update_name"] for update in all_pending_updates} - for upd in all_pending_updates: - depends_on = upd["depends_on"] + pending = {update_name for update_name, depends_on in all_pending_updates} + for update_name, depends_on in all_pending_updates: if not depends_on or depends_on not in pending: break logger.info( "Not starting on bg update %s until %s is done", - upd["update_name"], + update_name, depends_on, ) else: @@ -524,7 +524,7 @@ class BackgroundUpdater: "another: dependency cycle?" ) - self._current_background_update = upd["update_name"] + self._current_background_update = update_name # We have a background update to run, otherwise we would have returned # early. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a4e7048368..6d54bb0eb2 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -18,7 +18,6 @@ import logging import time import types from collections import defaultdict -from sys import intern from time import monotonic as monotonic_time from typing import ( TYPE_CHECKING, @@ -1042,20 +1041,6 @@ class DatabasePool: self._db_pool.runWithConnection(inner_func, *args, **kwargs) ) - @staticmethod - def cursor_to_dict(cursor: Cursor) -> List[Dict[str, Any]]: - """Converts a SQL cursor into an list of dicts. - - Args: - cursor: The DBAPI cursor which has executed a query. - Returns: - A list of dicts where the key is the column header. - """ - assert cursor.description is not None, "cursor.description was None" - col_headers = [intern(str(column[0])) for column in cursor.description] - results = [dict(zip(col_headers, row)) for row in cursor] - return results - async def execute(self, desc: str, query: str, *args: Any) -> List[Tuple[Any, ...]]: """Runs a single query for a result set. diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 840d725114..89f4077351 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -17,6 +17,8 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast +import attr + from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig from synapse.storage._base import make_in_list_sql_clause @@ -28,7 +30,7 @@ from synapse.storage.database import ( from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor -from synapse.types import JsonDict, get_domain_from_id +from synapse.types import get_domain_from_id from .account_data import AccountDataStore from .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore @@ -82,6 +84,25 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class UserPaginateResponse: + """This is very similar to UserInfo, but not quite the same.""" + + name: str + user_type: Optional[str] + is_guest: bool + admin: bool + deactivated: bool + shadow_banned: bool + displayname: Optional[str] + avatar_url: Optional[str] + creation_ts: Optional[int] + approved: bool + erased: bool + last_seen_ts: int + locked: bool + + class DataStore( EventsBackgroundUpdatesStore, ExperimentalFeaturesStore, @@ -156,7 +177,7 @@ class DataStore( approved: bool = True, not_user_types: Optional[List[str]] = None, locked: bool = False, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[UserPaginateResponse], int]: """Function to retrieve a paginated list of users from users list. This will return a json list of users and the total number of users matching the filter criteria. @@ -182,7 +203,7 @@ class DataStore( def get_users_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[JsonDict], int]: + ) -> Tuple[List[UserPaginateResponse], int]: filters = [] args: list = [] @@ -282,13 +303,24 @@ class DataStore( """ args += [limit, start] txn.execute(sql, args) - users = self.db_pool.cursor_to_dict(txn) - - # some of those boolean values are returned as integers when we're on SQLite - columns_to_boolify = ["erased"] - for user in users: - for column in columns_to_boolify: - user[column] = bool(user[column]) + users = [ + UserPaginateResponse( + name=row[0], + user_type=row[1], + is_guest=bool(row[2]), + admin=bool(row[3]), + deactivated=bool(row[4]), + shadow_banned=bool(row[5]), + displayname=row[6], + avatar_url=row[7], + creation_ts=row[8], + approved=bool(row[9]), + erased=bool(row[10]), + last_seen_ts=row[11], + locked=bool(row[12]), + ) + for row in txn + ] return users, count diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 49edbb9e06..b0811a4cf1 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1620,7 +1620,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): # # For each duplicate, we delete all the existing rows and put one back. - KEY_COLS = ["stream_id", "destination", "user_id", "device_id"] last_row = progress.get( "last_row", {"stream_id": 0, "destination": "", "user_id": "", "device_id": ""}, @@ -1628,44 +1627,62 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): def _txn(txn: LoggingTransaction) -> int: clause, args = make_tuple_comparison_clause( - [(x, last_row[x]) for x in KEY_COLS] + [ + ("stream_id", last_row["stream_id"]), + ("destination", last_row["destination"]), + ("user_id", last_row["user_id"]), + ("device_id", last_row["device_id"]), + ] ) - sql = """ + sql = f""" SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts FROM device_lists_outbound_pokes - WHERE %s - GROUP BY %s + WHERE {clause} + GROUP BY stream_id, destination, user_id, device_id HAVING count(*) > 1 - ORDER BY %s + ORDER BY stream_id, destination, user_id, device_id LIMIT ? - """ % ( - clause, # WHERE - ",".join(KEY_COLS), # GROUP BY - ",".join(KEY_COLS), # ORDER BY - ) + """ txn.execute(sql, args + [batch_size]) - rows = self.db_pool.cursor_to_dict(txn) + rows = txn.fetchall() - row = None - for row in rows: + stream_id, destination, user_id, device_id = None, None, None, None + for stream_id, destination, user_id, device_id, _ in rows: self.db_pool.simple_delete_txn( txn, "device_lists_outbound_pokes", - {x: row[x] for x in KEY_COLS}, + { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + }, ) - row["sent"] = False self.db_pool.simple_insert_txn( txn, "device_lists_outbound_pokes", - row, + { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + "sent": False, + }, ) - if row: + if rows: self.db_pool.updates._background_update_progress_txn( txn, BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES, - {"last_row": row}, + { + "last_row": { + "stream_id": stream_id, + "destination": destination, + "user_id": user_id, + "device_id": device_id, + } + }, ) return len(rows) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index aeb3db596c..c8d7c9fd32 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -26,6 +26,8 @@ from typing import ( cast, ) +import attr + from synapse.api.constants import Direction from synapse.logging.opentracing import trace from synapse.media._base import ThumbnailInfo @@ -45,6 +47,18 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( ) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LocalMedia: + media_id: str + media_type: str + media_length: int + upload_name: str + created_ts: int + last_access_ts: int + quarantined_by: Optional[str] + safe_from_quarantine: bool + + class MediaSortOrder(Enum): """ Enum to define the sorting method used when returning media with @@ -180,7 +194,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): user_id: str, order_by: str = MediaSortOrder.CREATED_TS.value, direction: Direction = Direction.FORWARDS, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> Tuple[List[LocalMedia], int]: """Get a paginated list of metadata for a local piece of media which an user_id has uploaded @@ -197,7 +211,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): def get_local_media_by_user_paginate_txn( txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: + ) -> Tuple[List[LocalMedia], int]: # Set ordering order_by_column = MediaSortOrder(order_by).value @@ -217,14 +231,14 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): sql = """ SELECT - "media_id", - "media_type", - "media_length", - "upload_name", - "created_ts", - "last_access_ts", - "quarantined_by", - "safe_from_quarantine" + media_id, + media_type, + media_length, + upload_name, + created_ts, + last_access_ts, + quarantined_by, + safe_from_quarantine FROM local_media_repository WHERE user_id = ? ORDER BY {order_by_column} {order}, media_id ASC @@ -236,7 +250,19 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): args += [limit, start] txn.execute(sql, args) - media = self.db_pool.cursor_to_dict(txn) + media = [ + LocalMedia( + media_id=row[0], + media_type=row[1], + media_length=row[2], + upload_name=row[3], + created_ts=row[4], + last_access_ts=row[5], + quarantined_by=row[6], + safe_from_quarantine=bool(row[7]), + ) + for row in txn + ] return media, count return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index e09ab21593..933d76e905 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1517,7 +1517,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): async def get_registration_tokens( self, valid: Optional[bool] = None - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: """List all registration tokens. Used by the admin API. Args: @@ -1526,34 +1526,48 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): Default is None: return all tokens regardless of validity. Returns: - A list of dicts, each containing details of a token. + A list of tuples containing: + * The token + * The number of users allowed (or None) + * Whether it is pending + * Whether it has been completed + * An expiry time (or None if no expiry) """ def select_registration_tokens_txn( txn: LoggingTransaction, now: int, valid: Optional[bool] - ) -> List[Dict[str, Any]]: + ) -> List[Tuple[str, Optional[int], int, int, Optional[int]]]: if valid is None: # Return all tokens regardless of validity - txn.execute("SELECT * FROM registration_tokens") + txn.execute( + """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + """ + ) elif valid: # Select valid tokens only - sql = ( - "SELECT * FROM registration_tokens WHERE " - "(uses_allowed > pending + completed OR uses_allowed IS NULL) " - "AND (expiry_time > ? OR expiry_time IS NULL)" - ) + sql = """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + WHERE (uses_allowed > pending + completed OR uses_allowed IS NULL) + AND (expiry_time > ? OR expiry_time IS NULL) + """ txn.execute(sql, [now]) else: # Select invalid tokens only - sql = ( - "SELECT * FROM registration_tokens WHERE " - "uses_allowed <= pending + completed OR expiry_time <= ?" - ) + sql = """ + SELECT token, uses_allowed, pending, completed, expiry_time + FROM registration_tokens + WHERE uses_allowed <= pending + completed OR expiry_time <= ? + """ txn.execute(sql, [now]) - return self.db_pool.cursor_to_dict(txn) + return cast( + List[Tuple[str, Optional[int], int, int, Optional[int]]], txn.fetchall() + ) return await self.db_pool.runInteraction( "select_registration_tokens", diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3e8fcf1975..6d4b9891e7 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -78,6 +78,31 @@ class RatelimitOverride: burst_count: int +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LargestRoomStats: + room_id: str + name: Optional[str] + canonical_alias: Optional[str] + joined_members: int + join_rules: Optional[str] + guest_access: Optional[str] + history_visibility: Optional[str] + state_events: int + avatar: Optional[str] + topic: Optional[str] + room_type: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomStats(LargestRoomStats): + joined_local_members: int + version: Optional[str] + creator: Optional[str] + encryption: Optional[str] + federatable: bool + public: bool + + class RoomSortOrder(Enum): """ Enum to define the sorting method used when returning rooms with get_rooms_paginate @@ -204,7 +229,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): allow_none=True, ) - async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]: + async def get_room_with_stats(self, room_id: str) -> Optional[RoomStats]: """Retrieve room with statistics. Args: @@ -215,7 +240,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def get_room_with_stats_txn( txn: LoggingTransaction, room_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[RoomStats]: sql = """ SELECT room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room AS joined_local_members, rooms.room_version AS version, @@ -229,15 +254,28 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): WHERE room_id = ? """ txn.execute(sql, [room_id]) - # Catch error if sql returns empty result to return "None" instead of an error - try: - res = self.db_pool.cursor_to_dict(txn)[0] - except IndexError: + row = txn.fetchone() + if not row: return None - - res["federatable"] = bool(res["federatable"]) - res["public"] = bool(res["public"]) - return res + return RoomStats( + room_id=row[0], + name=row[1], + canonical_alias=row[2], + joined_members=row[3], + joined_local_members=row[4], + version=row[5], + creator=row[6], + encryption=row[7], + federatable=bool(row[8]), + public=bool(row[9]), + join_rules=row[10], + guest_access=row[11], + history_visibility=row[12], + state_events=row[13], + avatar=row[14], + topic=row[15], + room_type=row[16], + ) return await self.db_pool.runInteraction( "get_room_with_stats", get_room_with_stats_txn, room_id @@ -368,7 +406,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): bounds: Optional[Tuple[int, str]], forwards: bool, ignore_non_federatable: bool = False, - ) -> List[Dict[str, Any]]: + ) -> List[LargestRoomStats]: """Gets the largest public rooms (where largest is in terms of joined members, as tracked in the statistics table). @@ -505,20 +543,34 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): def _get_largest_public_rooms_txn( txn: LoggingTransaction, - ) -> List[Dict[str, Any]]: + ) -> List[LargestRoomStats]: txn.execute(sql, query_args) - results = self.db_pool.cursor_to_dict(txn) + results = [ + LargestRoomStats( + room_id=r[0], + name=r[1], + canonical_alias=r[3], + joined_members=r[4], + join_rules=r[8], + guest_access=r[7], + history_visibility=r[6], + state_events=0, + avatar=r[5], + topic=r[2], + room_type=r[9], + ) + for r in txn + ] if not forwards: results.reverse() return results - ret_val = await self.db_pool.runInteraction( + return await self.db_pool.runInteraction( "get_largest_public_rooms", _get_largest_public_rooms_txn ) - return ret_val @cached(max_entries=10000) async def is_room_blocked(self, room_id: str) -> Optional[bool]: diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index e9fbf32c7c..032b89d684 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -342,10 +342,10 @@ class RegistrationTestCase(unittest.HomeserverTestCase): # Ensure the room is properly not federated. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["federatable"]) - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "public") - self.assertIsNone(room["guest_access"]) + self.assertFalse(room.federatable) + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "public") + self.assertIsNone(room.guest_access) # The user should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -372,7 +372,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): # Ensure the room is properly a public room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertEqual(room["join_rules"], "public") + self.assertEqual(room.join_rules, "public") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) @@ -411,9 +411,9 @@ class RegistrationTestCase(unittest.HomeserverTestCase): # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "invite") - self.assertEqual(room["guest_access"], "can_join") + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "invite") + self.assertEqual(room.guest_access, "can_join") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) @@ -455,9 +455,9 @@ class RegistrationTestCase(unittest.HomeserverTestCase): # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) assert room is not None - self.assertFalse(room["public"]) - self.assertEqual(room["join_rules"], "invite") - self.assertEqual(room["guest_access"], "can_join") + self.assertFalse(room.public) + self.assertEqual(room.join_rules, "invite") + self.assertEqual(room.guest_access, "can_join") # Both users should be in the room. rooms = self.get_success(self.store.get_rooms_for_user(inviter)) diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index b8823d6993..01c0e5e671 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -39,11 +39,11 @@ class DataStoreTestCase(unittest.HomeserverTestCase): ) self.assertEqual(1, total) - self.assertEqual(self.displayname, users.pop()["displayname"]) + self.assertEqual(self.displayname, users.pop().displayname) users, total = self.get_success( self.store.get_users_paginate(0, 10, name="BC", guests=False) ) self.assertEqual(1, total) - self.assertEqual(self.displayname, users.pop()["displayname"]) + self.assertEqual(self.displayname, users.pop().displayname) diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 1e27f2c275..ce34195a25 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -59,14 +59,9 @@ class RoomStoreTestCase(HomeserverTestCase): def test_get_room_with_stats(self) -> None: res = self.get_success(self.store.get_room_with_stats(self.room.to_string())) assert res is not None - self.assertLessEqual( - { - "room_id": self.room.to_string(), - "creator": self.u_creator.to_string(), - "public": True, - }.items(), - res.items(), - ) + self.assertEqual(res.room_id, self.room.to_string()) + self.assertEqual(res.creator, self.u_creator.to_string()) + self.assertTrue(res.public) def test_get_room_with_stats_unknown_room(self) -> None: self.assertIsNone( From ed1b8795766e71416d6955412f782f4960547ccc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Oct 2023 16:16:17 -0400 Subject: [PATCH 127/142] Do not call getfullargspec on every call. (#16589) getfullargspec is relatively expensive and the results will not change between calls, so precalculate it outside the wrapper. --- changelog.d/16589.misc | 1 + synapse/logging/opentracing.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/16589.misc diff --git a/changelog.d/16589.misc b/changelog.d/16589.misc new file mode 100644 index 0000000000..6e69368bbf --- /dev/null +++ b/changelog.d/16589.misc @@ -0,0 +1 @@ +Improve performance when using opentracing. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 4454fe29a5..e297fa9c8b 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1019,11 +1019,14 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func + # getfullargspec is somewhat expensive, so ensure it is only called a single + # time (the function signature shouldn't change anyway). + argspec = inspect.getfullargspec(func) + @contextlib.contextmanager def _wrapping_logic( - func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: - argspec = inspect.getfullargspec(func) # We use `[1:]` to skip the `self` object reference and `start=1` to # make the index line up with `argspec.args`. # From c812f43bd74347da1ced0ddcfff9d199988add34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 10:23:13 +0000 Subject: [PATCH 128/142] Bump twisted from 23.8.0 to 23.10.0 (#16588) --- changelog.d/16588.misc | 1 + poetry.lock | 17 ++++++++--------- synapse/util/__init__.py | 2 +- tests/http/__init__.py | 2 +- tests/server.py | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 changelog.d/16588.misc diff --git a/changelog.d/16588.misc b/changelog.d/16588.misc new file mode 100644 index 0000000000..c12b6cfc28 --- /dev/null +++ b/changelog.d/16588.misc @@ -0,0 +1 @@ +Bump twisted from 23.8.0 to 23.10.0. diff --git a/poetry.lock b/poetry.lock index 00f5b4a20a..334005241e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2972,13 +2972,13 @@ urllib3 = ">=1.26.0" [[package]] name = "twisted" -version = "23.8.0" +version = "23.10.0" description = "An asynchronous networking framework written in Python" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8.0" files = [ - {file = "twisted-23.8.0-py3-none-any.whl", hash = "sha256:b8bdba145de120ffb36c20e6e071cce984e89fba798611ed0704216fb7f884cd"}, - {file = "twisted-23.8.0.tar.gz", hash = "sha256:3c73360add17336a622c0d811c2a2ce29866b6e59b1125fd6509b17252098a24"}, + {file = "twisted-23.10.0-py3-none-any.whl", hash = "sha256:4ae8bce12999a35f7fe6443e7f1893e6fe09588c8d2bed9c35cdce8ff2d5b444"}, + {file = "twisted-23.10.0.tar.gz", hash = "sha256:987847a0790a2c597197613686e2784fd54167df3a55d0fb17c8412305d76ce5"}, ] [package.dependencies] @@ -2991,19 +2991,18 @@ incremental = ">=22.10.0" pyopenssl = {version = ">=21.0.0", optional = true, markers = "extra == \"tls\""} service-identity = {version = ">=18.1.0", optional = true, markers = "extra == \"tls\""} twisted-iocpsupport = {version = ">=1.0.2,<2", markers = "platform_system == \"Windows\""} -typing-extensions = ">=3.10.0" +typing-extensions = ">=4.2.0" zope-interface = ">=5" [package.extras] -all-non-platform = ["twisted[conch,contextvars,http2,serial,test,tls]", "twisted[conch,contextvars,http2,serial,test,tls]"] +all-non-platform = ["twisted[conch,http2,serial,test,tls]", "twisted[conch,http2,serial,test,tls]"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] -contextvars = ["contextvars (>=2.4,<3)"] dev = ["coverage (>=6b1,<7)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "twisted[dev-release]", "twistedchecker (>=0.7,<1.0)"] -dev-release = ["pydoctor (>=23.4.0,<23.5.0)", "pydoctor (>=23.4.0,<23.5.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "readthedocs-sphinx-ext (>=2.2,<3.0)", "sphinx (>=5,<7)", "sphinx (>=5,<7)", "sphinx-rtd-theme (>=1.2,<2.0)", "sphinx-rtd-theme (>=1.2,<2.0)", "towncrier (>=22.12,<23.0)", "towncrier (>=22.12,<23.0)", "urllib3 (<2)", "urllib3 (<2)"] +dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"] gtk-platform = ["pygobject", "pygobject", "twisted[all-non-platform]", "twisted[all-non-platform]"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] macos-platform = ["pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "twisted[all-non-platform]", "twisted[all-non-platform]"] -mypy = ["mypy (==0.981)", "mypy-extensions (==0.4.3)", "mypy-zope (==0.3.11)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] +mypy = ["mypy (>=1.5.1,<1.6.0)", "mypy-zope (>=1.0.1,<1.1.0)", "twisted[all-non-platform,dev]", "types-pyopenssl", "types-setuptools"] osx-platform = ["twisted[macos-platform]", "twisted[macos-platform]"] serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9f3b8741c1..8d9df352b2 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -93,7 +93,7 @@ class Clock: _reactor: IReactorTime = attr.ib() - @defer.inlineCallbacks # type: ignore[arg-type] # Issue in Twisted's type annotations + @defer.inlineCallbacks def sleep(self, seconds: float) -> "Generator[Deferred[float], Any, Any]": d: defer.Deferred[float] = defer.Deferred() with context.PreserveLoggingContext(): diff --git a/tests/http/__init__.py b/tests/http/__init__.py index d5306e7ee0..9108a3007b 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -182,7 +182,7 @@ def wrap_server_factory_for_tls( ) else: return TLSMemoryBIOFactory( - connection_creator, isClient=False, wrappedFactory=factory, clock=clock # type: ignore[call-arg] + connection_creator, isClient=False, wrappedFactory=factory, clock=clock ) diff --git a/tests/server.py b/tests/server.py index cfb0fb823b..c8342db399 100644 --- a/tests/server.py +++ b/tests/server.py @@ -484,7 +484,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): if twisted.version > Version("Twisted", 23, 8, 0): from twisted.protocols import tls - tls._get_default_clock = lambda: self # type: ignore[attr-defined] + tls._get_default_clock = lambda: self self.nameResolver = SimpleResolverComplexifier(FakeResolver()) super().__init__() From 0afbef30cfb28fbee09989b0a089c86352126ad2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 2 Nov 2023 09:41:00 -0400 Subject: [PATCH 129/142] Use simple_select_many_txn in event persistance code. (#16585) Just to standardize on the normal helpers, it might also have a slight perf improvement on PostgreSQL which will now use `ANY (?)` instead of `IN (?, ?, ...)`. --- changelog.d/16585.misc | 1 + synapse/storage/databases/main/events.py | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 changelog.d/16585.misc diff --git a/changelog.d/16585.misc b/changelog.d/16585.misc new file mode 100644 index 0000000000..01f3ecc843 --- /dev/null +++ b/changelog.d/16585.misc @@ -0,0 +1 @@ +Use standard SQL helpers in persistence code. \ No newline at end of file diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 3c1492e3ad..b74ff1c498 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1350,13 +1350,19 @@ class PersistEventsStore: PartialStateConflictError: if attempting to persist a partial state event in a room that has been un-partial stated. """ - txn.execute( - "SELECT event_id, outlier FROM events WHERE event_id in (%s)" - % (",".join(["?"] * len(events_and_contexts)),), - [event.event_id for event, _ in events_and_contexts], + rows = cast( + List[Tuple[str, bool]], + self.db_pool.simple_select_many_txn( + txn, + "events", + "event_id", + [event.event_id for event, _ in events_and_contexts], + keyvalues={}, + retcols=("event_id", "outlier"), + ), ) - have_persisted = dict(cast(Iterable[Tuple[str, bool]], txn)) + have_persisted = dict(rows) logger.debug( "_update_outliers_txn: events=%s have_persisted=%s", From 92828a7f958b2cb1925e2a64ed08c2efb6293787 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Nov 2023 07:30:31 -0400 Subject: [PATCH 130/142] Simplify event persistence code (#16584) The event persistence code used to handle multiple rooms at a time, but was simplified to only ever be called with a single room at a time (different rooms are now handled in parallel). The code is still generic to multiple rooms causing a lot of work that is unnecessary (e.g. unnecessary loops, and partitioning data by room). This strips out the ability to handle multiple rooms at once, greatly simplifying the code. --- changelog.d/16584.misc | 1 + changelog.d/16586.misc | 1 + synapse/storage/controllers/persist_events.py | 252 ++++++------ synapse/storage/databases/main/events.py | 380 +++++++++--------- 4 files changed, 324 insertions(+), 310 deletions(-) create mode 100644 changelog.d/16584.misc create mode 100644 changelog.d/16586.misc diff --git a/changelog.d/16584.misc b/changelog.d/16584.misc new file mode 100644 index 0000000000..beec8f2301 --- /dev/null +++ b/changelog.d/16584.misc @@ -0,0 +1 @@ +Simplify persistance code to be per-room. diff --git a/changelog.d/16586.misc b/changelog.d/16586.misc new file mode 100644 index 0000000000..f02c4a2060 --- /dev/null +++ b/changelog.d/16586.misc @@ -0,0 +1 @@ +Avoid updating the stream cache unnecessarily. diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index f39ae2d635..1529c86cc5 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -542,13 +542,15 @@ class EventsPersistenceStorageController: return await res.get_state(self._state_controller, StateFilter.all()) async def _persist_event_batch( - self, _room_id: str, task: _PersistEventsTask + self, room_id: str, task: _PersistEventsTask ) -> Dict[str, str]: """Callback for the _event_persist_queue Calculates the change to current state and forward extremities, and persists the given events and with those updates. + Assumes that we are only persisting events for one room at a time. + Returns: A dictionary of event ID to event ID we didn't persist as we already had another event persisted with the same TXN ID. @@ -594,140 +596,23 @@ class EventsPersistenceStorageController: # We can't easily parallelize these since different chunks # might contain the same event. :( - # NB: Assumes that we are only persisting events for one room - # at a time. - - # map room_id->set[event_ids] giving the new forward - # extremities in each room - new_forward_extremities: Dict[str, Set[str]] = {} - - # map room_id->(to_delete, to_insert) where to_delete is a list - # of type/state keys to remove from current state, and to_insert - # is a map (type,key)->event_id giving the state delta in each - # room - state_delta_for_room: Dict[str, DeltaState] = {} + new_forward_extremities = None + state_delta_for_room = None if not backfilled: with Measure(self._clock, "_calculate_state_and_extrem"): - # Work out the new "current state" for each room. + # Work out the new "current state" for the room. # We do this by working out what the new extremities are and then # calculating the state from that. - events_by_room: Dict[str, List[Tuple[EventBase, EventContext]]] = {} - for event, context in chunk: - events_by_room.setdefault(event.room_id, []).append( - (event, context) - ) - - for room_id, ev_ctx_rm in events_by_room.items(): - latest_event_ids = ( - await self.main_store.get_latest_event_ids_in_room(room_id) - ) - new_latest_event_ids = await self._calculate_new_extremities( - room_id, ev_ctx_rm, latest_event_ids - ) - - if new_latest_event_ids == latest_event_ids: - # No change in extremities, so no change in state - continue - - # there should always be at least one forward extremity. - # (except during the initial persistence of the send_join - # results, in which case there will be no existing - # extremities, so we'll `continue` above and skip this bit.) - assert new_latest_event_ids, "No forward extremities left!" - - new_forward_extremities[room_id] = new_latest_event_ids - - len_1 = ( - len(latest_event_ids) == 1 - and len(new_latest_event_ids) == 1 - ) - if len_1: - all_single_prev_not_state = all( - len(event.prev_event_ids()) == 1 - and not event.is_state() - for event, ctx in ev_ctx_rm - ) - # Don't bother calculating state if they're just - # a long chain of single ancestor non-state events. - if all_single_prev_not_state: - continue - - state_delta_counter.inc() - if len(new_latest_event_ids) == 1: - state_delta_single_event_counter.inc() - - # This is a fairly handwavey check to see if we could - # have guessed what the delta would have been when - # processing one of these events. - # What we're interested in is if the latest extremities - # were the same when we created the event as they are - # now. When this server creates a new event (as opposed - # to receiving it over federation) it will use the - # forward extremities as the prev_events, so we can - # guess this by looking at the prev_events and checking - # if they match the current forward extremities. - for ev, _ in ev_ctx_rm: - prev_event_ids = set(ev.prev_event_ids()) - if latest_event_ids == prev_event_ids: - state_delta_reuse_delta_counter.inc() - break - - logger.debug("Calculating state delta for room %s", room_id) - with Measure( - self._clock, "persist_events.get_new_state_after_events" - ): - res = await self._get_new_state_after_events( - room_id, - ev_ctx_rm, - latest_event_ids, - new_latest_event_ids, - ) - current_state, delta_ids, new_latest_event_ids = res - - # there should always be at least one forward extremity. - # (except during the initial persistence of the send_join - # results, in which case there will be no existing - # extremities, so we'll `continue` above and skip this bit.) - assert new_latest_event_ids, "No forward extremities left!" - - new_forward_extremities[room_id] = new_latest_event_ids - - # If either are not None then there has been a change, - # and we need to work out the delta (or use that - # given) - delta = None - if delta_ids is not None: - # If there is a delta we know that we've - # only added or replaced state, never - # removed keys entirely. - delta = DeltaState([], delta_ids) - elif current_state is not None: - with Measure( - self._clock, "persist_events.calculate_state_delta" - ): - delta = await self._calculate_state_delta( - room_id, current_state - ) - - if delta: - # If we have a change of state then lets check - # whether we're actually still a member of the room, - # or if our last user left. If we're no longer in - # the room then we delete the current state and - # extremities. - is_still_joined = await self._is_server_still_joined( - room_id, - ev_ctx_rm, - delta, - ) - if not is_still_joined: - logger.info("Server no longer in room %s", room_id) - delta.no_longer_in_room = True - - state_delta_for_room[room_id] = delta + ( + new_forward_extremities, + state_delta_for_room, + ) = await self._calculate_new_forward_extremities_and_state_delta( + room_id, chunk + ) await self.persist_events_store._persist_events_and_state_updates( + room_id, chunk, state_delta_for_room=state_delta_for_room, new_forward_extremities=new_forward_extremities, @@ -737,6 +622,117 @@ class EventsPersistenceStorageController: return replaced_events + async def _calculate_new_forward_extremities_and_state_delta( + self, room_id: str, ev_ctx_rm: List[Tuple[EventBase, EventContext]] + ) -> Tuple[Optional[Set[str]], Optional[DeltaState]]: + """Calculates the new forward extremities and state delta for a room + given events to persist. + + Assumes that we are only persisting events for one room at a time. + + Returns: + A tuple of: + A set of str giving the new forward extremities the room + + The state delta for the room. + """ + + latest_event_ids = await self.main_store.get_latest_event_ids_in_room(room_id) + new_latest_event_ids = await self._calculate_new_extremities( + room_id, ev_ctx_rm, latest_event_ids + ) + + if new_latest_event_ids == latest_event_ids: + # No change in extremities, so no change in state + return (None, None) + + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + + new_forward_extremities = new_latest_event_ids + + len_1 = len(latest_event_ids) == 1 and len(new_latest_event_ids) == 1 + if len_1: + all_single_prev_not_state = all( + len(event.prev_event_ids()) == 1 and not event.is_state() + for event, ctx in ev_ctx_rm + ) + # Don't bother calculating state if they're just + # a long chain of single ancestor non-state events. + if all_single_prev_not_state: + return (new_forward_extremities, None) + + state_delta_counter.inc() + if len(new_latest_event_ids) == 1: + state_delta_single_event_counter.inc() + + # This is a fairly handwavey check to see if we could + # have guessed what the delta would have been when + # processing one of these events. + # What we're interested in is if the latest extremities + # were the same when we created the event as they are + # now. When this server creates a new event (as opposed + # to receiving it over federation) it will use the + # forward extremities as the prev_events, so we can + # guess this by looking at the prev_events and checking + # if they match the current forward extremities. + for ev, _ in ev_ctx_rm: + prev_event_ids = set(ev.prev_event_ids()) + if latest_event_ids == prev_event_ids: + state_delta_reuse_delta_counter.inc() + break + + logger.debug("Calculating state delta for room %s", room_id) + with Measure(self._clock, "persist_events.get_new_state_after_events"): + res = await self._get_new_state_after_events( + room_id, + ev_ctx_rm, + latest_event_ids, + new_latest_event_ids, + ) + current_state, delta_ids, new_latest_event_ids = res + + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + + new_forward_extremities = new_latest_event_ids + + # If either are not None then there has been a change, + # and we need to work out the delta (or use that + # given) + delta = None + if delta_ids is not None: + # If there is a delta we know that we've + # only added or replaced state, never + # removed keys entirely. + delta = DeltaState([], delta_ids) + elif current_state is not None: + with Measure(self._clock, "persist_events.calculate_state_delta"): + delta = await self._calculate_state_delta(room_id, current_state) + + if delta: + # If we have a change of state then lets check + # whether we're actually still a member of the room, + # or if our last user left. If we're no longer in + # the room then we delete the current state and + # extremities. + is_still_joined = await self._is_server_still_joined( + room_id, + ev_ctx_rm, + delta, + ) + if not is_still_joined: + logger.info("Server no longer in room %s", room_id) + delta.no_longer_in_room = True + + return (new_forward_extremities, delta) + async def _calculate_new_extremities( self, room_id: str, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b74ff1c498..647ba182f6 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -79,7 +79,7 @@ class DeltaState: Attributes: to_delete: List of type/state_keys to delete from current state to_insert: Map of state to upsert into current state - no_longer_in_room: The server is not longer in the room, so the room + no_longer_in_room: The server is no longer in the room, so the room should e.g. be removed from `current_state_events` table. """ @@ -131,22 +131,25 @@ class PersistEventsStore: @trace async def _persist_events_and_state_updates( self, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], *, - state_delta_for_room: Dict[str, DeltaState], - new_forward_extremities: Dict[str, Set[str]], + state_delta_for_room: Optional[DeltaState], + new_forward_extremities: Optional[Set[str]], use_negative_stream_ordering: bool = False, inhibit_local_membership_updates: bool = False, ) -> None: """Persist a set of events alongside updates to the current state and - forward extremities tables. + forward extremities tables. + + Assumes that we are only persisting events for one room at a time. Args: + room_id: events_and_contexts: - state_delta_for_room: Map from room_id to the delta to apply to - room state - new_forward_extremities: Map from room_id to set of event IDs - that are the new forward extremities of the room. + state_delta_for_room: The delta to apply to the room state + new_forward_extremities: A set of event IDs that are the new forward + extremities of the room. use_negative_stream_ordering: Whether to start stream_ordering on the negative side and decrement. This should be set as True for backfilled events because backfilled events get a negative @@ -196,6 +199,7 @@ class PersistEventsStore: await self.db_pool.runInteraction( "persist_events", self._persist_events_txn, + room_id=room_id, events_and_contexts=events_and_contexts, inhibit_local_membership_updates=inhibit_local_membership_updates, state_delta_for_room=state_delta_for_room, @@ -221,9 +225,9 @@ class PersistEventsStore: event_counter.labels(event.type, origin_type, origin_entity).inc() - for room_id, latest_event_ids in new_forward_extremities.items(): + if new_forward_extremities: self.store.get_latest_event_ids_in_room.prefill( - (room_id,), frozenset(latest_event_ids) + (room_id,), frozenset(new_forward_extremities) ) async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]: @@ -336,10 +340,11 @@ class PersistEventsStore: self, txn: LoggingTransaction, *, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], inhibit_local_membership_updates: bool, - state_delta_for_room: Dict[str, DeltaState], - new_forward_extremities: Dict[str, Set[str]], + state_delta_for_room: Optional[DeltaState], + new_forward_extremities: Optional[Set[str]], ) -> None: """Insert some number of room events into the necessary database tables. @@ -347,8 +352,11 @@ class PersistEventsStore: and the rejections table. Things reading from those table will need to check whether the event was rejected. + Assumes that we are only persisting events for one room at a time. + Args: txn + room_id: The room the events are from events_and_contexts: events to persist inhibit_local_membership_updates: Stop the local_current_membership from being updated by these events. This should be set to True @@ -357,10 +365,9 @@ class PersistEventsStore: delete_existing True to purge existing table rows for the events from the database. This is useful when retrying due to IntegrityError. - state_delta_for_room: The current-state delta for each room. - new_forward_extremities: The new forward extremities for each room. - For each room, a list of the event ids which are the forward - extremities. + state_delta_for_room: The current-state delta for the room. + new_forward_extremities: The new forward extremities for the room: + a set of the event ids which are the forward extremities. Raises: PartialStateConflictError: if attempting to persist a partial state event in @@ -376,14 +383,13 @@ class PersistEventsStore: # # Annoyingly SQLite doesn't support row level locking. if isinstance(self.database_engine, PostgresEngine): - for room_id in {e.room_id for e, _ in events_and_contexts}: - txn.execute( - "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE", - (room_id,), - ) - row = txn.fetchone() - if row is None: - raise Exception(f"Room does not exist {room_id}") + txn.execute( + "SELECT room_version FROM rooms WHERE room_id = ? FOR SHARE", + (room_id,), + ) + row = txn.fetchone() + if row is None: + raise Exception(f"Room does not exist {room_id}") # stream orderings should have been assigned by now assert min_stream_order @@ -419,7 +425,9 @@ class PersistEventsStore: events_and_contexts ) - self._update_room_depths_txn(txn, events_and_contexts=events_and_contexts) + self._update_room_depths_txn( + txn, room_id, events_and_contexts=events_and_contexts + ) # _update_outliers_txn filters out any events which have already been # persisted, and returns the filtered list. @@ -432,11 +440,13 @@ class PersistEventsStore: self._store_event_txn(txn, events_and_contexts=events_and_contexts) - self._update_forward_extremities_txn( - txn, - new_forward_extremities=new_forward_extremities, - max_stream_order=max_stream_order, - ) + if new_forward_extremities: + self._update_forward_extremities_txn( + txn, + room_id, + new_forward_extremities=new_forward_extremities, + max_stream_order=max_stream_order, + ) self._persist_transaction_ids_txn(txn, events_and_contexts) @@ -464,7 +474,10 @@ class PersistEventsStore: # We call this last as it assumes we've inserted the events into # room_memberships, where applicable. # NB: This function invalidates all state related caches - self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) + if state_delta_for_room: + self._update_current_state_txn( + txn, room_id, state_delta_for_room, min_stream_order + ) def _persist_event_auth_chain_txn( self, @@ -1026,74 +1039,75 @@ class PersistEventsStore: await self.db_pool.runInteraction( "update_current_state", self._update_current_state_txn, - state_delta_by_room={room_id: state_delta}, + room_id, + delta_state=state_delta, stream_id=stream_ordering, ) def _update_current_state_txn( self, txn: LoggingTransaction, - state_delta_by_room: Dict[str, DeltaState], + room_id: str, + delta_state: DeltaState, stream_id: int, ) -> None: - for room_id, delta_state in state_delta_by_room.items(): - to_delete = delta_state.to_delete - to_insert = delta_state.to_insert + to_delete = delta_state.to_delete + to_insert = delta_state.to_insert - # Figure out the changes of membership to invalidate the - # `get_rooms_for_user` cache. - # We find out which membership events we may have deleted - # and which we have added, then we invalidate the caches for all - # those users. - members_changed = { - state_key - for ev_type, state_key in itertools.chain(to_delete, to_insert) - if ev_type == EventTypes.Member - } + # Figure out the changes of membership to invalidate the + # `get_rooms_for_user` cache. + # We find out which membership events we may have deleted + # and which we have added, then we invalidate the caches for all + # those users. + members_changed = { + state_key + for ev_type, state_key in itertools.chain(to_delete, to_insert) + if ev_type == EventTypes.Member + } - if delta_state.no_longer_in_room: - # Server is no longer in the room so we delete the room from - # current_state_events, being careful we've already updated the - # rooms.room_version column (which gets populated in a - # background task). - self._upsert_room_version_txn(txn, room_id) + if delta_state.no_longer_in_room: + # Server is no longer in the room so we delete the room from + # current_state_events, being careful we've already updated the + # rooms.room_version column (which gets populated in a + # background task). + self._upsert_room_version_txn(txn, room_id) - # Before deleting we populate the current_state_delta_stream - # so that async background tasks get told what happened. - sql = """ + # Before deleting we populate the current_state_delta_stream + # so that async background tasks get told what happened. + sql = """ INSERT INTO current_state_delta_stream (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) SELECT ?, ?, room_id, type, state_key, null, event_id FROM current_state_events WHERE room_id = ? """ - txn.execute(sql, (stream_id, self._instance_name, room_id)) + txn.execute(sql, (stream_id, self._instance_name, room_id)) - # We also want to invalidate the membership caches for users - # that were in the room. - users_in_room = self.store.get_users_in_room_txn(txn, room_id) - members_changed.update(users_in_room) + # We also want to invalidate the membership caches for users + # that were in the room. + users_in_room = self.store.get_users_in_room_txn(txn, room_id) + members_changed.update(users_in_room) - self.db_pool.simple_delete_txn( - txn, - table="current_state_events", - keyvalues={"room_id": room_id}, - ) - else: - # We're still in the room, so we update the current state as normal. + self.db_pool.simple_delete_txn( + txn, + table="current_state_events", + keyvalues={"room_id": room_id}, + ) + else: + # We're still in the room, so we update the current state as normal. - # First we add entries to the current_state_delta_stream. We - # do this before updating the current_state_events table so - # that we can use it to calculate the `prev_event_id`. (This - # allows us to not have to pull out the existing state - # unnecessarily). - # - # The stream_id for the update is chosen to be the minimum of the stream_ids - # for the batch of the events that we are persisting; that means we do not - # end up in a situation where workers see events before the - # current_state_delta updates. - # - sql = """ + # First we add entries to the current_state_delta_stream. We + # do this before updating the current_state_events table so + # that we can use it to calculate the `prev_event_id`. (This + # allows us to not have to pull out the existing state + # unnecessarily). + # + # The stream_id for the update is chosen to be the minimum of the stream_ids + # for the batch of the events that we are persisting; that means we do not + # end up in a situation where workers see events before the + # current_state_delta updates. + # + sql = """ INSERT INTO current_state_delta_stream (stream_id, instance_name, room_id, type, state_key, event_id, prev_event_id) SELECT ?, ?, ?, ?, ?, ?, ( @@ -1101,39 +1115,39 @@ class PersistEventsStore: WHERE room_id = ? AND type = ? AND state_key = ? ) """ - txn.execute_batch( - sql, + txn.execute_batch( + sql, + ( ( - ( - stream_id, - self._instance_name, - room_id, - etype, - state_key, - to_insert.get((etype, state_key)), - room_id, - etype, - state_key, - ) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), - ) - # Now we actually update the current_state_events table + stream_id, + self._instance_name, + room_id, + etype, + state_key, + to_insert.get((etype, state_key)), + room_id, + etype, + state_key, + ) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) + # Now we actually update the current_state_events table - txn.execute_batch( - "DELETE FROM current_state_events" - " WHERE room_id = ? AND type = ? AND state_key = ?", - ( - (room_id, etype, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - ), - ) + txn.execute_batch( + "DELETE FROM current_state_events" + " WHERE room_id = ? AND type = ? AND state_key = ?", + ( + (room_id, etype, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + ), + ) - # We include the membership in the current state table, hence we do - # a lookup when we insert. This assumes that all events have already - # been inserted into room_memberships. - txn.execute_batch( - """INSERT INTO current_state_events + # We include the membership in the current state table, hence we do + # a lookup when we insert. This assumes that all events have already + # been inserted into room_memberships. + txn.execute_batch( + """INSERT INTO current_state_events (room_id, type, state_key, event_id, membership, event_stream_ordering) VALUES ( ?, ?, ?, ?, @@ -1141,34 +1155,34 @@ class PersistEventsStore: (SELECT stream_ordering FROM events WHERE event_id = ?) ) """, - [ - (room_id, key[0], key[1], ev_id, ev_id, ev_id) - for key, ev_id in to_insert.items() - ], - ) + [ + (room_id, key[0], key[1], ev_id, ev_id, ev_id) + for key, ev_id in to_insert.items() + ], + ) - # We now update `local_current_membership`. We do this regardless - # of whether we're still in the room or not to handle the case where - # e.g. we just got banned (where we need to record that fact here). + # We now update `local_current_membership`. We do this regardless + # of whether we're still in the room or not to handle the case where + # e.g. we just got banned (where we need to record that fact here). - # Note: Do we really want to delete rows here (that we do not - # subsequently reinsert below)? While technically correct it means - # we have no record of the fact the user *was* a member of the - # room but got, say, state reset out of it. - if to_delete or to_insert: - txn.execute_batch( - "DELETE FROM local_current_membership" - " WHERE room_id = ? AND user_id = ?", - ( - (room_id, state_key) - for etype, state_key in itertools.chain(to_delete, to_insert) - if etype == EventTypes.Member and self.is_mine_id(state_key) - ), - ) + # Note: Do we really want to delete rows here (that we do not + # subsequently reinsert below)? While technically correct it means + # we have no record of the fact the user *was* a member of the + # room but got, say, state reset out of it. + if to_delete or to_insert: + txn.execute_batch( + "DELETE FROM local_current_membership" + " WHERE room_id = ? AND user_id = ?", + ( + (room_id, state_key) + for etype, state_key in itertools.chain(to_delete, to_insert) + if etype == EventTypes.Member and self.is_mine_id(state_key) + ), + ) - if to_insert: - txn.execute_batch( - """INSERT INTO local_current_membership + if to_insert: + txn.execute_batch( + """INSERT INTO local_current_membership (room_id, user_id, event_id, membership, event_stream_ordering) VALUES ( ?, ?, ?, @@ -1176,29 +1190,27 @@ class PersistEventsStore: (SELECT stream_ordering FROM events WHERE event_id = ?) ) """, - [ - (room_id, key[1], ev_id, ev_id, ev_id) - for key, ev_id in to_insert.items() - if key[0] == EventTypes.Member and self.is_mine_id(key[1]) - ], - ) - - txn.call_after( - self.store._curr_state_delta_stream_cache.entity_has_changed, - room_id, - stream_id, + [ + (room_id, key[1], ev_id, ev_id, ev_id) + for key, ev_id in to_insert.items() + if key[0] == EventTypes.Member and self.is_mine_id(key[1]) + ], ) - # Invalidate the various caches - self.store._invalidate_state_caches_and_stream( - txn, room_id, members_changed - ) + txn.call_after( + self.store._curr_state_delta_stream_cache.entity_has_changed, + room_id, + stream_id, + ) - # Check if any of the remote membership changes requires us to - # unsubscribe from their device lists. - self.store.handle_potentially_left_users_txn( - txn, {m for m in members_changed if not self.hs.is_mine_id(m)} - ) + # Invalidate the various caches + self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed) + + # Check if any of the remote membership changes requires us to + # unsubscribe from their device lists. + self.store.handle_potentially_left_users_txn( + txn, {m for m in members_changed if not self.hs.is_mine_id(m)} + ) def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None: """Update the room version in the database based off current state @@ -1232,23 +1244,19 @@ class PersistEventsStore: def _update_forward_extremities_txn( self, txn: LoggingTransaction, - new_forward_extremities: Dict[str, Set[str]], + room_id: str, + new_forward_extremities: Set[str], max_stream_order: int, ) -> None: - for room_id in new_forward_extremities.keys(): - self.db_pool.simple_delete_txn( - txn, table="event_forward_extremities", keyvalues={"room_id": room_id} - ) + self.db_pool.simple_delete_txn( + txn, table="event_forward_extremities", keyvalues={"room_id": room_id} + ) self.db_pool.simple_insert_many_txn( txn, table="event_forward_extremities", keys=("event_id", "room_id"), - values=[ - (ev_id, room_id) - for room_id, new_extrem in new_forward_extremities.items() - for ev_id in new_extrem - ], + values=[(ev_id, room_id) for ev_id in new_forward_extremities], ) # We now insert into stream_ordering_to_exterm a mapping from room_id, # new stream_ordering to new forward extremeties in the room. @@ -1260,8 +1268,7 @@ class PersistEventsStore: keys=("room_id", "event_id", "stream_ordering"), values=[ (room_id, event_id, max_stream_order) - for room_id, new_extrem in new_forward_extremities.items() - for event_id in new_extrem + for event_id in new_forward_extremities ], ) @@ -1298,36 +1305,45 @@ class PersistEventsStore: def _update_room_depths_txn( self, txn: LoggingTransaction, + room_id: str, events_and_contexts: List[Tuple[EventBase, EventContext]], ) -> None: """Update min_depth for each room Args: txn: db connection + room_id: The room ID events_and_contexts: events we are persisting """ - depth_updates: Dict[str, int] = {} + stream_ordering: Optional[int] = None + depth_update = 0 for event, context in events_and_contexts: - # Then update the `stream_ordering` position to mark the latest - # event as the front of the room. This should not be done for - # backfilled events because backfilled events have negative - # stream_ordering and happened in the past so we know that we don't - # need to update the stream_ordering tip/front for the room. + # Don't update the stream ordering for backfilled events because + # backfilled events have negative stream_ordering and happened in the + # past, so we know that we don't need to update the stream_ordering + # tip/front for the room. assert event.internal_metadata.stream_ordering is not None if event.internal_metadata.stream_ordering >= 0: - txn.call_after( - self.store._events_stream_cache.entity_has_changed, - event.room_id, - event.internal_metadata.stream_ordering, - ) + if stream_ordering is None: + stream_ordering = event.internal_metadata.stream_ordering + else: + stream_ordering = max( + stream_ordering, event.internal_metadata.stream_ordering + ) if not event.internal_metadata.is_outlier() and not context.rejected: - depth_updates[event.room_id] = max( - event.depth, depth_updates.get(event.room_id, event.depth) - ) + depth_update = max(event.depth, depth_update) - for room_id, depth in depth_updates.items(): - self._update_min_depth_for_room_txn(txn, room_id, depth) + # Then update the `stream_ordering` position to mark the latest event as + # the front of the room. + if stream_ordering is not None: + txn.call_after( + self.store._events_stream_cache.entity_has_changed, + room_id, + stream_ordering, + ) + + self._update_min_depth_for_room_txn(txn, room_id, depth_update) def _update_outliers_txn( self, From 2f689a63263170068e0ee666422d5e0f61d21cfb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:11:15 -0500 Subject: [PATCH 131/142] Bump ruff from 0.0.292 to 0.1.4 (#16600) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 334005241e..0312ffe890 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2439,28 +2439,28 @@ files = [ [[package]] name = "ruff" -version = "0.0.292" -description = "An extremely fast Python linter, written in Rust." +version = "0.1.4" +description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"}, - {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"}, - {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"}, - {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"}, - {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"}, - {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"}, - {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"}, - {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"}, + {file = "ruff-0.1.4-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:864958706b669cce31d629902175138ad8a069d99ca53514611521f532d91495"}, + {file = "ruff-0.1.4-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:9fdd61883bb34317c788af87f4cd75dfee3a73f5ded714b77ba928e418d6e39e"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4eaca8c9cc39aa7f0f0d7b8fe24ecb51232d1bb620fc4441a61161be4a17539"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a9a1301dc43cbf633fb603242bccd0aaa34834750a14a4c1817e2e5c8d60de17"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e8db8ab6f100f02e28b3d713270c857d370b8d61871d5c7d1702ae411df683"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:80fea754eaae06335784b8ea053d6eb8e9aac75359ebddd6fee0858e87c8d510"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bc02a480d4bfffd163a723698da15d1a9aec2fced4c06f2a753f87f4ce6969c"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9862811b403063765b03e716dac0fda8fdbe78b675cd947ed5873506448acea4"}, + {file = "ruff-0.1.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58826efb8b3efbb59bb306f4b19640b7e366967a31c049d49311d9eb3a4c60cb"}, + {file = "ruff-0.1.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:fdfd453fc91d9d86d6aaa33b1bafa69d114cf7421057868f0b79104079d3e66e"}, + {file = "ruff-0.1.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e8791482d508bd0b36c76481ad3117987301b86072158bdb69d796503e1c84a8"}, + {file = "ruff-0.1.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:01206e361021426e3c1b7fba06ddcb20dbc5037d64f6841e5f2b21084dc51800"}, + {file = "ruff-0.1.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:645591a613a42cb7e5c2b667cbefd3877b21e0252b59272ba7212c3d35a5819f"}, + {file = "ruff-0.1.4-py3-none-win32.whl", hash = "sha256:99908ca2b3b85bffe7e1414275d004917d1e0dfc99d497ccd2ecd19ad115fd0d"}, + {file = "ruff-0.1.4-py3-none-win_amd64.whl", hash = "sha256:1dfd6bf8f6ad0a4ac99333f437e0ec168989adc5d837ecd38ddb2cc4a2e3db8a"}, + {file = "ruff-0.1.4-py3-none-win_arm64.whl", hash = "sha256:d98ae9ebf56444e18a3e3652b3383204748f73e247dea6caaf8b52d37e6b32da"}, + {file = "ruff-0.1.4.tar.gz", hash = "sha256:21520ecca4cc555162068d87c747b8f95e1e95f8ecfcbbe59e8dd00710586315"}, ] [[package]] @@ -3447,4 +3447,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "a08543c65f18cc7e9dea648e89c18ab88fc1747aa2e029aa208f777fc3db06dd" +content-hash = "369455d6a67753a6bcfbad3cd86801b1dd02896d0180080e2ba9501e007353ec" diff --git a/pyproject.toml b/pyproject.toml index 23e0004395..e5c0158a7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -321,7 +321,7 @@ all = [ # This helps prevents merge conflicts when running a batch of dependabot updates. isort = ">=5.10.1" black = ">=22.7.0" -ruff = "0.0.292" +ruff = "0.1.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From a28339b8676c8a3abef82293c6df31ed572e798f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:11:31 -0500 Subject: [PATCH 132/142] Bump types-jsonschema from 4.19.0.3 to 4.19.0.4 (#16599) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0312ffe890..f54b8b8491 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3069,13 +3069,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.19.0.3" +version = "4.19.0.4" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.19.0.3.tar.gz", hash = "sha256:e0fc0f5d51fd0988bf193be42174a5376b0096820ff79505d9c1b66de23f0581"}, - {file = "types_jsonschema-4.19.0.3-py3-none-any.whl", hash = "sha256:5cedbb661e5ca88d95b94b79902423e3f97a389c245e5fe0ab384122f27d56b9"}, + {file = "types-jsonschema-4.19.0.4.tar.gz", hash = "sha256:994feb6632818259c4b5dbd733867824cb475029a6abc2c2b5201a2268b6e7d2"}, + {file = "types_jsonschema-4.19.0.4-py3-none-any.whl", hash = "sha256:b73c3f4ba3cd8108602d1198a438e2698d5eb6b9db206ed89a33e24729b0abe7"}, ] [package.dependencies] From 5cf29886949a92c84dbb33b8feb356ff403ebf7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:12:01 -0500 Subject: [PATCH 133/142] Bump types-pyyaml from 6.0.12.11 to 6.0.12.12 (#16602) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index f54b8b8491..6fd48358d7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3141,13 +3141,13 @@ cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.11" +version = "6.0.12.12" description = "Typing stubs for PyYAML" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"}, - {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, + {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"}, + {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"}, ] [[package]] From 1a9b22a3d12316e1c483c8066ed8383bf520e785 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:12:18 -0500 Subject: [PATCH 134/142] Bump setuptools-rust from 1.8.0 to 1.8.1 (#16601) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6fd48358d7..41556635d3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2580,13 +2580,13 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( [[package]] name = "setuptools-rust" -version = "1.8.0" +version = "1.8.1" description = "Setuptools Rust extension plugin" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-rust-1.8.0.tar.gz", hash = "sha256:5e02b7a80058853bf64127314f6b97d0efed11e08b94c88ca639a20976f6adc4"}, - {file = "setuptools_rust-1.8.0-py3-none-any.whl", hash = "sha256:95ec67edee2ca73233c9e75250e9d23a302aa23b4c8413dfd19c14c30d08f703"}, + {file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"}, + {file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"}, ] [package.dependencies] From cc4fe68adff0fb5660b94f92bd40978e7e292098 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Nov 2023 08:31:22 -0500 Subject: [PATCH 135/142] Support reactor timing metric on more reactors. (#16532) Previously only Twisted's EPollReactor was compatible with the reactor timing metric, notably not working when asyncio was used. After this change, the following configurations support the reactor timing metric: * poll, epoll, or select reactors * asyncio reactor with a poll, epoll, select, /dev/poll, or kqueue event loop. --- changelog.d/16532.misc | 1 + mypy.ini | 4 +- synapse/metrics/_reactor_metrics.py | 128 ++++++++++++++++++++++------ 3 files changed, 105 insertions(+), 28 deletions(-) create mode 100644 changelog.d/16532.misc diff --git a/changelog.d/16532.misc b/changelog.d/16532.misc new file mode 100644 index 0000000000..437e00210b --- /dev/null +++ b/changelog.d/16532.misc @@ -0,0 +1 @@ +Support reactor tick timings on more types of event loops. diff --git a/mypy.ini b/mypy.ini index fdfe9432fc..1a2b9ea410 100644 --- a/mypy.ini +++ b/mypy.ini @@ -37,8 +37,8 @@ files = build_rust.py [mypy-synapse.metrics._reactor_metrics] -# This module imports select.epoll. That exists on Linux, but doesn't on macOS. -# See https://github.com/matrix-org/synapse/pull/11771. +# This module pokes at the internals of OS-specific classes, to appease mypy +# on different systems we add additional ignores. warn_unused_ignores = False [mypy-synapse.util.caches.treecache] diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py index a2c6e6842d..dd486dd3e2 100644 --- a/synapse/metrics/_reactor_metrics.py +++ b/synapse/metrics/_reactor_metrics.py @@ -12,17 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -import select +import logging import time -from typing import Any, Iterable, List, Tuple +from selectors import SelectSelector, _PollLikeSelector # type: ignore[attr-defined] +from typing import Any, Callable, Iterable from prometheus_client import Histogram, Metric from prometheus_client.core import REGISTRY, GaugeMetricFamily -from twisted.internet import reactor +from twisted.internet import reactor, selectreactor +from twisted.internet.asyncioreactor import AsyncioSelectorReactor from synapse.metrics._types import Collector +try: + from selectors import KqueueSelector +except ImportError: + + class KqueueSelector: # type: ignore[no-redef] + pass + + +try: + from twisted.internet.epollreactor import EPollReactor +except ImportError: + + class EPollReactor: # type: ignore[no-redef] + pass + + +try: + from twisted.internet.pollreactor import PollReactor +except ImportError: + + class PollReactor: # type: ignore[no-redef] + pass + + +logger = logging.getLogger(__name__) + # # Twisted reactor metrics # @@ -34,52 +62,100 @@ tick_time = Histogram( ) -class EpollWrapper: - """a wrapper for an epoll object which records the time between polls""" +class CallWrapper: + """A wrapper for a callable which records the time between calls""" - def __init__(self, poller: "select.epoll"): # type: ignore[name-defined] + def __init__(self, wrapped: Callable[..., Any]): self.last_polled = time.time() - self._poller = poller + self._wrapped = wrapped - def poll(self, *args, **kwargs) -> List[Tuple[int, int]]: # type: ignore[no-untyped-def] - # record the time since poll() was last called. This gives a good proxy for + def __call__(self, *args, **kwargs) -> Any: # type: ignore[no-untyped-def] + # record the time since this was last called. This gives a good proxy for # how long it takes to run everything in the reactor - ie, how long anything # waiting for the next tick will have to wait. tick_time.observe(time.time() - self.last_polled) - ret = self._poller.poll(*args, **kwargs) + ret = self._wrapped(*args, **kwargs) self.last_polled = time.time() return ret + +class ObjWrapper: + """A wrapper for an object which wraps a specified method in CallWrapper. + + Other methods/attributes are passed to the original object. + + This is necessary when the wrapped object does not allow the attribute to be + overwritten. + """ + + def __init__(self, wrapped: Any, method_name: str): + self._wrapped = wrapped + self._method_name = method_name + self._wrapped_method = CallWrapper(getattr(wrapped, method_name)) + def __getattr__(self, item: str) -> Any: - return getattr(self._poller, item) + if item == self._method_name: + return self._wrapped_method + + return getattr(self._wrapped, item) class ReactorLastSeenMetric(Collector): - def __init__(self, epoll_wrapper: EpollWrapper): - self._epoll_wrapper = epoll_wrapper + def __init__(self, call_wrapper: CallWrapper): + self._call_wrapper = call_wrapper def collect(self) -> Iterable[Metric]: cm = GaugeMetricFamily( "python_twisted_reactor_last_seen", "Seconds since the Twisted reactor was last seen", ) - cm.add_metric([], time.time() - self._epoll_wrapper.last_polled) + cm.add_metric([], time.time() - self._call_wrapper.last_polled) yield cm +# Twisted has already select a reasonable reactor for us, so assumptions can be +# made about the shape. +wrapper = None try: - # if the reactor has a `_poller` attribute, which is an `epoll` object - # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will - # measure the time between ticks - from select import epoll # type: ignore[attr-defined] + if isinstance(reactor, (PollReactor, EPollReactor)): + reactor._poller = ObjWrapper(reactor._poller, "poll") # type: ignore[attr-defined] + wrapper = reactor._poller._wrapped_method # type: ignore[attr-defined] - poller = reactor._poller # type: ignore[attr-defined] -except (AttributeError, ImportError): - pass -else: - if isinstance(poller, epoll): - poller = EpollWrapper(poller) - reactor._poller = poller # type: ignore[attr-defined] - REGISTRY.register(ReactorLastSeenMetric(poller)) + elif isinstance(reactor, selectreactor.SelectReactor): + # Twisted uses a module-level _select function. + wrapper = selectreactor._select = CallWrapper(selectreactor._select) + + elif isinstance(reactor, AsyncioSelectorReactor): + # For asyncio look at the underlying asyncio event loop. + asyncio_loop = reactor._asyncioEventloop # A sub-class of BaseEventLoop, + + # A sub-class of BaseSelector. + selector = asyncio_loop._selector # type: ignore[attr-defined] + + if isinstance(selector, SelectSelector): + wrapper = selector._select = CallWrapper(selector._select) # type: ignore[attr-defined] + + # poll, epoll, and /dev/poll. + elif isinstance(selector, _PollLikeSelector): + selector._selector = ObjWrapper(selector._selector, "poll") # type: ignore[attr-defined] + wrapper = selector._selector._wrapped_method # type: ignore[attr-defined] + + elif isinstance(selector, KqueueSelector): + selector._selector = ObjWrapper(selector._selector, "control") # type: ignore[attr-defined] + wrapper = selector._selector._wrapped_method # type: ignore[attr-defined] + + else: + # E.g. this does not support the (Windows-only) ProactorEventLoop. + logger.warning( + "Skipping configuring ReactorLastSeenMetric: unexpected asyncio loop selector: %r via %r", + selector, + asyncio_loop, + ) +except Exception as e: + logger.warning("Configuring ReactorLastSeenMetric failed: %r", e) + + +if wrapper: + REGISTRY.register(ReactorLastSeenMetric(wrapper)) From 1dd3074629883aaf8c6510d15ceaa3d567a91262 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Nov 2023 09:13:53 -0500 Subject: [PATCH 136/142] Bump setuptools_rust to match pinned version. (#16605) --- changelog.d/16605.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/16605.misc diff --git a/changelog.d/16605.misc b/changelog.d/16605.misc new file mode 100644 index 0000000000..2db7da5692 --- /dev/null +++ b/changelog.d/16605.misc @@ -0,0 +1 @@ +Bump setuptools-rust from 1.8.0 to 1.8.1. diff --git a/pyproject.toml b/pyproject.toml index e5c0158a7b..df132c0236 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -381,7 +381,7 @@ furo = ">=2022.12.7,<2024.0.0" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.0"] +requires = ["poetry-core>=1.1.0,<=1.7.0", "setuptools_rust>=1.3,<=1.8.1"] build-backend = "poetry.core.masonry.api" From 7e5d3b06fa8b6ce3676eb1178d7db0e252d48679 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Nov 2023 15:41:57 -0500 Subject: [PATCH 137/142] Collect information for PushRuleEvaluator in parallel. (#16590) Fetch information needed for push rule evaluation in parallel. Ideally this would use query pipelining, but this is not available in psycopg2. Due to the database thread pool this may result in little to no parallelization. --- changelog.d/16590.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 56 +++++++++++++++------ synapse/storage/databases/main/push_rule.py | 50 +++++++++++------- synapse/util/async_helpers.py | 14 ++++++ 4 files changed, 87 insertions(+), 34 deletions(-) create mode 100644 changelog.d/16590.misc diff --git a/changelog.d/16590.misc b/changelog.d/16590.misc new file mode 100644 index 0000000000..6db04b0c98 --- /dev/null +++ b/changelog.d/16590.misc @@ -0,0 +1 @@ +Run push rule evaluator setup in parallel. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 14784312dc..5934b1ef34 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -25,10 +25,13 @@ from typing import ( Sequence, Tuple, Union, + cast, ) from prometheus_client import Counter +from twisted.internet.defer import Deferred + from synapse.api.constants import ( MAIN_TIMELINE, EventContentFields, @@ -40,11 +43,15 @@ from synapse.api.room_versions import PushRuleRoomFlag from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership +from synapse.storage.roommember import ProfileInfo from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator from synapse.types import JsonValue from synapse.types.state import StateFilter +from synapse.util import unwrapFirstError +from synapse.util.async_helpers import gather_results from synapse.util.caches import register_cache from synapse.util.metrics import measure_func from synapse.visibility import filter_event_for_clients_with_state @@ -342,15 +349,41 @@ class BulkPushRuleEvaluator: rules_by_user = await self._get_rules_for_event(event) actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {} - room_member_count = await self.store.get_number_joined_users_in_room( - event.room_id - ) - + # Gather a bunch of info in parallel. + # + # This has a lot of ignored types and casting due to the use of @cached + # decorated functions passed into run_in_background. + # + # See https://github.com/matrix-org/synapse/issues/16606 ( - power_levels, - sender_power_level, - ) = await self._get_power_levels_and_sender_level( - event, context, event_id_to_event + room_member_count, + (power_levels, sender_power_level), + related_events, + profiles, + ) = await make_deferred_yieldable( + cast( + "Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", + gather_results( + ( + run_in_background( # type: ignore[call-arg] + self.store.get_number_joined_users_in_room, event.room_id # type: ignore[arg-type] + ), + run_in_background( + self._get_power_levels_and_sender_level, + event, + context, + event_id_to_event, + ), + run_in_background(self._related_events, event), + run_in_background( # type: ignore[call-arg] + self.store.get_subset_users_in_room_with_profiles, + event.room_id, # type: ignore[arg-type] + rules_by_user.keys(), # type: ignore[arg-type] + ), + ), + consumeErrors=True, + ).addErrback(unwrapFirstError), + ) ) # Find the event's thread ID. @@ -366,8 +399,6 @@ class BulkPushRuleEvaluator: # the parent is part of a thread. thread_id = await self.store.get_thread_id(relation.parent_id) - related_events = await self._related_events(event) - # It's possible that old room versions have non-integer power levels (floats or # strings; even the occasional `null`). For old rooms, we interpret these as if # they were integers. Do this here for the `@room` power level threshold. @@ -400,11 +431,6 @@ class BulkPushRuleEvaluator: self.hs.config.experimental.msc1767_enabled, # MSC3931 flag ) - users = rules_by_user.keys() - profiles = await self.store.get_subset_users_in_room_with_profiles( - event.room_id, users - ) - for uid, rules in rules_by_user.items(): if event.sender == uid: continue diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 22025eca56..37135d431d 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -28,8 +28,11 @@ from typing import ( cast, ) +from twisted.internet import defer + from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.replication.tcp.streams import PushRulesStream from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -51,7 +54,8 @@ from synapse.storage.util.id_generators import ( ) from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules from synapse.types import JsonDict -from synapse.util import json_encoder +from synapse.util import json_encoder, unwrapFirstError +from synapse.util.async_helpers import gather_results from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -249,23 +253,33 @@ class PushRulesWorkerStore( user_id: [] for user_id in user_ids } - rows = cast( - List[Tuple[str, str, int, int, str, str]], - await self.db_pool.simple_select_many_batch( - table="push_rules", - column="user_name", - iterable=user_ids, - retcols=( - "user_name", - "rule_id", - "priority_class", - "priority", - "conditions", - "actions", + # gatherResults loses all type information. + rows, enabled_map_by_user = await make_deferred_yieldable( + gather_results( + ( + cast( + "defer.Deferred[List[Tuple[str, str, int, int, str, str]]]", + run_in_background( + self.db_pool.simple_select_many_batch, + table="push_rules", + column="user_name", + iterable=user_ids, + retcols=( + "user_name", + "rule_id", + "priority_class", + "priority", + "conditions", + "actions", + ), + desc="bulk_get_push_rules", + batch_size=1000, + ), + ), + run_in_background(self.bulk_get_push_rules_enabled, user_ids), ), - desc="bulk_get_push_rules", - batch_size=1000, - ), + consumeErrors=True, + ).addErrback(unwrapFirstError) ) # Sort by highest priority_class, then highest priority. @@ -276,8 +290,6 @@ class PushRulesWorkerStore( (rule_id, priority_class, conditions, actions) ) - enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) - results: Dict[str, FilteredPushRules] = {} for user_id, rules in raw_rules.items(): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 0cbeb0c365..8a55e4e41d 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -345,6 +345,7 @@ async def yieldable_gather_results_delaying_cancellation( T1 = TypeVar("T1") T2 = TypeVar("T2") T3 = TypeVar("T3") +T4 = TypeVar("T4") @overload @@ -380,6 +381,19 @@ def gather_results( ... +@overload +def gather_results( + deferredList: Tuple[ + "defer.Deferred[T1]", + "defer.Deferred[T2]", + "defer.Deferred[T3]", + "defer.Deferred[T4]", + ], + consumeErrors: bool = ..., +) -> "defer.Deferred[Tuple[T1, T2, T3, T4]]": + ... + + def gather_results( # type: ignore[misc] deferredList: Tuple["defer.Deferred[T1]", ...], consumeErrors: bool = False, From ec9ff389f4c64d31da46b904381087aef0c86796 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Nov 2023 09:34:23 -0500 Subject: [PATCH 138/142] More tests for the simple_* methods. (#16596) Expand tests for the simple_* database methods, additionally test against both PostgreSQL and SQLite variants. --- changelog.d/16596.misc | 1 + synapse/storage/database.py | 13 +- tests/storage/test_base.py | 646 +++++++++++++++++++++++++++++++++++- 3 files changed, 633 insertions(+), 27 deletions(-) create mode 100644 changelog.d/16596.misc diff --git a/changelog.d/16596.misc b/changelog.d/16596.misc new file mode 100644 index 0000000000..fa457b12e5 --- /dev/null +++ b/changelog.d/16596.misc @@ -0,0 +1 @@ +Improve tests of the SQL generator. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 6d54bb0eb2..abc7d8a5d2 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1401,12 +1401,12 @@ class DatabasePool: allvalues.update(values) latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values) - sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % ( + sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %sDO %s" % ( table, ", ".join(k for k in allvalues), ", ".join("?" for _ in allvalues), ", ".join(k for k in keyvalues), - f"WHERE {where_clause}" if where_clause else "", + f"WHERE {where_clause} " if where_clause else "", latter, ) txn.execute(sql, list(allvalues.values())) @@ -2062,9 +2062,7 @@ class DatabasePool: where_clause = "" # UPDATE mytable SET col1 = ?, col2 = ? WHERE col3 = ? AND col4 = ? - sql = f""" - UPDATE {table} SET {set_clause} {where_clause} - """ + sql = f"UPDATE {table} SET {set_clause} {where_clause}" txn.execute_batch(sql, args) @@ -2283,8 +2281,6 @@ class DatabasePool: if not values: return 0 - sql = "DELETE FROM %s" % table - clause, values = make_in_list_sql_clause(txn.database_engine, column, values) clauses = [clause] @@ -2292,8 +2288,7 @@ class DatabasePool: clauses.append("%s = ?" % (key,)) values.append(value) - if clauses: - sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) + sql = "DELETE FROM %s WHERE %s" % (table, " AND ".join(clauses)) txn.execute(sql, values) return txn.rowcount diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index e4a52c301e..b4c490b568 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -14,7 +14,7 @@ from collections import OrderedDict from typing import Generator -from unittest.mock import Mock +from unittest.mock import Mock, call, patch from twisted.internet import defer @@ -24,43 +24,90 @@ from synapse.storage.engines import create_engine from tests import unittest from tests.server import TestHomeServer -from tests.utils import default_config +from tests.utils import USE_POSTGRES_FOR_TESTS, default_config class SQLBaseStoreTestCase(unittest.TestCase): """Test the "simple" SQL generating methods in SQLBaseStore.""" def setUp(self) -> None: - self.db_pool = Mock(spec=["runInteraction"]) + # This is the Twisted connection pool. + conn_pool = Mock(spec=["runInteraction", "runWithConnection"]) self.mock_txn = Mock() - self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"]) + if USE_POSTGRES_FOR_TESTS: + # To avoid testing psycopg2 itself, patch execute_batch/execute_values + # to assert how it is called. + from psycopg2 import extras + + self.mock_execute_batch = Mock() + self.execute_batch_patcher = patch.object( + extras, "execute_batch", new=self.mock_execute_batch + ) + self.execute_batch_patcher.start() + self.mock_execute_values = Mock() + self.execute_values_patcher = patch.object( + extras, "execute_values", new=self.mock_execute_values + ) + self.execute_values_patcher.start() + + self.mock_conn = Mock( + spec_set=[ + "cursor", + "rollback", + "commit", + "closed", + "reconnect", + "set_session", + "encoding", + ] + ) + self.mock_conn.encoding = "UNICODE" + else: + self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"]) self.mock_conn.cursor.return_value = self.mock_txn + self.mock_txn.connection = self.mock_conn self.mock_conn.rollback.return_value = None # Our fake runInteraction just runs synchronously inline def runInteraction(func, *args, **kwargs) -> defer.Deferred: # type: ignore[no-untyped-def] return defer.succeed(func(self.mock_txn, *args, **kwargs)) - self.db_pool.runInteraction = runInteraction + conn_pool.runInteraction = runInteraction def runWithConnection(func, *args, **kwargs): # type: ignore[no-untyped-def] return defer.succeed(func(self.mock_conn, *args, **kwargs)) - self.db_pool.runWithConnection = runWithConnection + conn_pool.runWithConnection = runWithConnection config = default_config(name="test", parse=True) hs = TestHomeServer("test", config=config) - sqlite_config = {"name": "sqlite3"} - engine = create_engine(sqlite_config) + if USE_POSTGRES_FOR_TESTS: + db_config = {"name": "psycopg2", "args": {}} + else: + db_config = {"name": "sqlite3"} + engine = create_engine(db_config) + fake_engine = Mock(wraps=engine) fake_engine.in_transaction.return_value = False + fake_engine.module.OperationalError = engine.module.OperationalError + fake_engine.module.DatabaseError = engine.module.DatabaseError + fake_engine.module.IntegrityError = engine.module.IntegrityError + # Don't convert param style to make assertions easier. + fake_engine.convert_param_style = lambda sql: sql + # To fix isinstance(...) checks. + fake_engine.__class__ = engine.__class__ # type: ignore[assignment] - db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) - db._db_pool = self.db_pool + db = DatabasePool(Mock(), Mock(config=db_config), fake_engine) + db._db_pool = conn_pool self.datastore = SQLBaseStore(db, None, hs) # type: ignore[arg-type] + def tearDown(self) -> None: + if USE_POSTGRES_FOR_TESTS: + self.execute_batch_patcher.stop() + self.execute_values_patcher.stop() + @defer.inlineCallbacks def test_insert_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -71,7 +118,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "INSERT INTO tablename (columname) VALUES(?)", ("Value",) ) @@ -87,10 +134,73 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)", (1, 2, 3) ) + @defer.inlineCallbacks + def test_insert_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_insert_many( + table="tablename", + keys=( + "col1", + "col2", + ), + values=[ + ( + "val1", + "val2", + ), + ("val3", "val4"), + ], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (col1, col2) VALUES ?", + [("val1", "val2"), ("val3", "val4")], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (col1, col2) VALUES(?, ?)", + [("val1", "val2"), ("val3", "val4")], + ) + + @defer.inlineCallbacks + def test_insert_many_no_iterable( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_insert_many( + table="tablename", + keys=( + "col1", + "col2", + ), + values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (col1, col2) VALUES ?", + [], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (col1, col2) VALUES(?, ?)", [] + ) + @defer.inlineCallbacks def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -103,7 +213,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) self.assertEqual("Value", value) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -121,7 +231,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) self.assertEqual({"colA": 1, "colB": 2, "colC": 3}, ret) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -156,10 +266,58 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) self.assertEqual([(1,), (2,), (3,)], ret) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "SELECT colA FROM tablename WHERE keycol = ?", ["A set"] ) + @defer.inlineCallbacks + def test_select_many_batch( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 3 + self.mock_txn.fetchall.side_effect = [[(1,), (2,)], [(3,)]] + + ret = yield defer.ensureDeferred( + self.datastore.db_pool.simple_select_many_batch( + table="tablename", + column="col1", + iterable=("val1", "val2", "val3"), + retcols=("col2",), + keyvalues={"col3": "val4"}, + batch_size=2, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?", + [["val1", "val2"], "val4"], + ), + call( + "SELECT col2 FROM tablename WHERE col1 = ANY(?) AND col3 = ?", + [["val3"], "val4"], + ), + ], + ) + self.assertEqual([(1,), (2,), (3,)], ret) + + def test_select_many_no_iterable(self) -> None: + self.mock_txn.rowcount = 3 + self.mock_txn.fetchall.side_effect = [(1,), (2,)] + + ret = self.datastore.db_pool.simple_select_many_txn( + self.mock_txn, + table="tablename", + column="col1", + iterable=(), + retcols=("col2",), + keyvalues={"col3": "val4"}, + ) + + self.mock_txn.execute.assert_not_called() + self.assertEqual([], ret) + @defer.inlineCallbacks def test_update_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -172,7 +330,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "UPDATE tablename SET columnname = ? WHERE keycol = ?", ["New Value", "TheKey"], ) @@ -191,11 +349,76 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?", [3, 4, 1, 2], ) + @defer.inlineCallbacks + def test_update_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[("val1", "val2")], + value_names=("col3",), + value_values=[("val3",)], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_batch.assert_called_once_with( + self.mock_txn, + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [("val3", "val1", "val2"), ("val3", "val1", "val2")], + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [("val3", "val1", "val2"), ("val3", "val1", "val2")], + ) + + # key_values and value_values must be the same length. + with self.assertRaises(ValueError): + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[("val1", "val2")], + value_names=("col3",), + value_values=[], + desc="", + ) + ) + + @defer.inlineCallbacks + def test_update_many_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_update_many( + table="tablename", + key_names=("col1", "col2"), + key_values=[], + value_names=("col3",), + value_values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_batch.assert_called_once_with( + self.mock_txn, + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [], + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", + [], + ) + @defer.inlineCallbacks def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: self.mock_txn.rowcount = 1 @@ -206,6 +429,393 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) ) - self.mock_txn.execute.assert_called_with( + self.mock_txn.execute.assert_called_once_with( "DELETE FROM tablename WHERE keycol = ?", ["Go away"] ) + + @defer.inlineCallbacks + def test_delete_many(self) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 2 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=("val1", "val2"), + keyvalues={"col2": "val3"}, + desc="", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "DELETE FROM tablename WHERE col1 = ANY(?) AND col2 = ?", + [["val1", "val2"], "val3"], + ) + self.assertEqual(result, 2) + + @defer.inlineCallbacks + def test_delete_many_no_iterable( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=(), + keyvalues={"col2": "val3"}, + desc="", + ) + ) + + self.mock_txn.execute.assert_not_called() + self.assertEqual(result, 0) + + @defer.inlineCallbacks + def test_delete_many_no_keyvalues( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 2 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_delete_many( + table="tablename", + column="col1", + iterable=("val1", "val2"), + keyvalues={}, + desc="", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "DELETE FROM tablename WHERE col1 = ANY(?)", [["val1", "val2"]] + ) + self.assertEqual(result, 2) + + @defer.inlineCallbacks + def test_upsert(self) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING", + ["value"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_with_insertion( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, thirdcol, othercol) VALUES (?, ?, ?) ON CONFLICT (columnname) DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "insertionval", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_with_where( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + where_clause="thirdcol IS NULL", + ) + ) + + self.mock_txn.execute.assert_called_once_with( + "INSERT INTO tablename (columnname, othercol) VALUES (?, ?) ON CONFLICT (columnname) WHERE thirdcol IS NULL DO UPDATE SET othercol=EXCLUDED.othercol", + ["oldvalue", "newvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_many(self) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert_many( + table="tablename", + key_names=["keycol1", "keycol2"], + key_values=[["keyval1", "keyval2"], ["keyval3", "keyval4"]], + value_names=["valuecol3"], + value_values=[["val5"], ["val6"]], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES ? ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3", + [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (keycol1, keycol2, valuecol3) VALUES (?, ?, ?) ON CONFLICT (keycol1, keycol2) DO UPDATE SET valuecol3=EXCLUDED.valuecol3", + [("keyval1", "keyval2", "val5"), ("keyval3", "keyval4", "val6")], + ) + + @defer.inlineCallbacks + def test_upsert_many_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert_many( + table="tablename", + key_names=["columnname"], + key_values=[["oldvalue"]], + value_names=[], + value_values=[], + desc="", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_execute_values.assert_called_once_with( + self.mock_txn, + "INSERT INTO tablename (columnname) VALUES ? ON CONFLICT (columnname) DO NOTHING", + [("oldvalue",)], + template=None, + fetch=False, + ) + else: + self.mock_txn.executemany.assert_called_once_with( + "INSERT INTO tablename (columnname) VALUES (?) ON CONFLICT (columnname) DO NOTHING", + [("oldvalue",)], + ) + + @defer.inlineCallbacks + def test_upsert_emulated_no_values_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.fetchall.return_value = [(1,)] + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call("SELECT 1 FROM tablename WHERE columnname = ?", ["value"]), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "SELECT 1 FROM tablename WHERE columnname = ?", ["value"] + ) + self.assertFalse(result) + + @defer.inlineCallbacks + def test_upsert_emulated_no_values_not_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.fetchall.return_value = [] + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "value"}, + values={}, + insertion_values={"columnname": "value"}, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "SELECT 1 FROM tablename WHERE columnname = ?", + ["value"], + ), + call("INSERT INTO tablename (columnname) VALUES (?)", ["value"]), + ], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_insertion_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_insertion_not_exists( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 0 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + insertion_values={"thirdcol": "insertionval"}, + ) + ) + + self.mock_txn.execute.assert_has_calls( + [ + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ?", + ["newvalue", "oldvalue"], + ), + call( + "INSERT INTO tablename (columnname, othercol, thirdcol) VALUES (?, ?, ?)", + ["oldvalue", "newvalue", "insertionval"], + ), + ] + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_where( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={"othercol": "newvalue"}, + where_clause="thirdcol IS NULL", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL", + ["newvalue", "oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "UPDATE tablename SET othercol = ? WHERE columnname = ? AND thirdcol IS NULL", + ["newvalue", "oldvalue"], + ) + self.assertTrue(result) + + @defer.inlineCallbacks + def test_upsert_emulated_with_where_no_values( + self, + ) -> Generator["defer.Deferred[object]", object, None]: + self.datastore.db_pool._unsafe_to_upsert_tables.add("tablename") + + self.mock_txn.rowcount = 1 + + result = yield defer.ensureDeferred( + self.datastore.db_pool.simple_upsert( + table="tablename", + keyvalues={"columnname": "oldvalue"}, + values={}, + where_clause="thirdcol IS NULL", + ) + ) + + if USE_POSTGRES_FOR_TESTS: + self.mock_txn.execute.assert_has_calls( + [ + call("LOCK TABLE tablename in EXCLUSIVE MODE", ()), + call( + "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL", + ["oldvalue"], + ), + ] + ) + else: + self.mock_txn.execute.assert_called_once_with( + "SELECT 1 FROM tablename WHERE columnname = ? AND thirdcol IS NULL", + ["oldvalue"], + ) + self.assertFalse(result) From 9738b1c4975b293a1bc25ee27b5527724038baa1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Nov 2023 14:00:25 -0500 Subject: [PATCH 139/142] Avoid executing no-op queries. (#16583) If simple_{insert,upsert,update}_many_txn is called without any data to modify then return instead of executing the query. This matches the behavior of simple_{select,delete}_many_txn. --- changelog.d/16583.misc | 1 + synapse/storage/database.py | 32 ++++++++++++++++------- synapse/storage/databases/main/devices.py | 2 +- synapse/storage/databases/main/events.py | 12 ++++----- synapse/storage/databases/main/room.py | 2 +- synapse/storage/databases/main/search.py | 4 +-- tests/storage/test_base.py | 25 ++++-------------- 7 files changed, 39 insertions(+), 39 deletions(-) create mode 100644 changelog.d/16583.misc diff --git a/changelog.d/16583.misc b/changelog.d/16583.misc new file mode 100644 index 0000000000..df5b27b112 --- /dev/null +++ b/changelog.d/16583.misc @@ -0,0 +1 @@ +Avoid executing no-op queries. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index abc7d8a5d2..792f2e7cdf 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1117,7 +1117,7 @@ class DatabasePool: txn: LoggingTransaction, table: str, keys: Collection[str], - values: Iterable[Iterable[Any]], + values: Collection[Iterable[Any]], ) -> None: """Executes an INSERT query on the named table. @@ -1130,6 +1130,9 @@ class DatabasePool: keys: list of column names values: for each row, a list of values in the same order as `keys` """ + # If there's nothing to insert, then skip executing the query. + if not values: + return if isinstance(txn.database_engine, PostgresEngine): # We use `execute_values` as it can be a lot faster than `execute_batch`, @@ -1455,7 +1458,7 @@ class DatabasePool: key_names: Collection[str], key_values: Collection[Iterable[Any]], value_names: Collection[str], - value_values: Iterable[Iterable[Any]], + value_values: Collection[Iterable[Any]], ) -> None: """ Upsert, many times. @@ -1468,6 +1471,19 @@ class DatabasePool: value_values: A list of each row's value column values. Ignored if value_names is empty. """ + # If there's nothing to upsert, then skip executing the query. + if not key_values: + return + + # No value columns, therefore make a blank list so that the following + # zip() works correctly. + if not value_names: + value_values = [() for x in range(len(key_values))] + elif len(value_values) != len(key_values): + raise ValueError( + f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number." + ) + if table not in self._unsafe_to_upsert_tables: return self.simple_upsert_many_txn_native_upsert( txn, table, key_names, key_values, value_names, value_values @@ -1502,10 +1518,6 @@ class DatabasePool: value_values: A list of each row's value column values. Ignored if value_names is empty. """ - # No value columns, therefore make a blank list so that the following - # zip() works correctly. - if not value_names: - value_values = [() for x in range(len(key_values))] # Lock the table just once, to prevent it being done once per row. # Note that, according to Postgres' documentation, once obtained, @@ -1543,10 +1555,7 @@ class DatabasePool: allnames.extend(value_names) if not value_names: - # No value columns, therefore make a blank list so that the - # following zip() works correctly. latter = "NOTHING" - value_values = [() for x in range(len(key_values))] else: latter = "UPDATE SET " + ", ".join( k + "=EXCLUDED." + k for k in value_names @@ -1910,6 +1919,7 @@ class DatabasePool: Returns: The results as a list of tuples. """ + # If there's nothing to select, then skip executing the query. if not iterable: return [] @@ -2044,6 +2054,9 @@ class DatabasePool: raise ValueError( f"{len(key_values)} key rows and {len(value_values)} value rows: should be the same number." ) + # If there is nothing to update, then skip executing the query. + if not key_values: + return # List of tuples of (value values, then key values) # (This matches the order needed for the query) @@ -2278,6 +2291,7 @@ class DatabasePool: Returns: Number rows deleted """ + # If there's nothing to delete, then skip executing the query. if not values: return 0 diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index b0811a4cf1..04d12a876c 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -703,7 +703,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): key_names=("destination", "user_id"), key_values=[(destination, user_id) for user_id, _ in rows], value_names=("stream_id",), - value_values=((stream_id,) for _, stream_id in rows), + value_values=[(stream_id,) for _, stream_id in rows], ) # Delete all sent outbound pokes diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 647ba182f6..7c34bde3e5 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1476,7 +1476,7 @@ class PersistEventsStore: txn, table="event_json", keys=("event_id", "room_id", "internal_metadata", "json", "format_version"), - values=( + values=[ ( event.event_id, event.room_id, @@ -1485,7 +1485,7 @@ class PersistEventsStore: event.format_version, ) for event, _ in events_and_contexts - ), + ], ) self.db_pool.simple_insert_many_txn( @@ -1508,7 +1508,7 @@ class PersistEventsStore: "state_key", "rejection_reason", ), - values=( + values=[ ( self._instance_name, event.internal_metadata.stream_ordering, @@ -1527,7 +1527,7 @@ class PersistEventsStore: context.rejected, ) for event, context in events_and_contexts - ), + ], ) # If we're persisting an unredacted event we go and ensure @@ -1550,11 +1550,11 @@ class PersistEventsStore: txn, table="state_events", keys=("event_id", "room_id", "type", "state_key"), - values=( + values=[ (event.event_id, event.room_id, event.type, event.state_key) for event, _ in events_and_contexts if event.is_state() - ), + ], ) def _store_rejected_events_txn( diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 6d4b9891e7..afb880532e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2268,7 +2268,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): txn, table="partial_state_rooms_servers", keys=("room_id", "server_name"), - values=((room_id, s) for s in servers), + values=[(room_id, s) for s in servers], ) self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) self._invalidate_cache_and_stream( diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index dbde9130c6..f4bef4c99b 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -106,7 +106,7 @@ class SearchWorkerStore(SQLBaseStore): txn, table="event_search", keys=("event_id", "room_id", "key", "value"), - values=( + values=[ ( entry.event_id, entry.room_id, @@ -114,7 +114,7 @@ class SearchWorkerStore(SQLBaseStore): _clean_value_for_search(entry.value), ) for entry in entries - ), + ], ) else: diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index b4c490b568..de4fcfe026 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -189,17 +189,9 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) if USE_POSTGRES_FOR_TESTS: - self.mock_execute_values.assert_called_once_with( - self.mock_txn, - "INSERT INTO tablename (col1, col2) VALUES ?", - [], - template=None, - fetch=False, - ) + self.mock_execute_values.assert_not_called() else: - self.mock_txn.executemany.assert_called_once_with( - "INSERT INTO tablename (col1, col2) VALUES(?, ?)", [] - ) + self.mock_txn.executemany.assert_not_called() @defer.inlineCallbacks def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]: @@ -393,7 +385,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) @defer.inlineCallbacks - def test_update_many_no_values( + def test_update_many_no_iterable( self, ) -> Generator["defer.Deferred[object]", object, None]: yield defer.ensureDeferred( @@ -408,16 +400,9 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) if USE_POSTGRES_FOR_TESTS: - self.mock_execute_batch.assert_called_once_with( - self.mock_txn, - "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", - [], - ) + self.mock_execute_batch.assert_not_called() else: - self.mock_txn.executemany.assert_called_once_with( - "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", - [], - ) + self.mock_txn.executemany.assert_not_called() @defer.inlineCallbacks def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]: From 455ef041871fe944a0f3b7b1f5073663f20a99be Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Nov 2023 14:02:09 -0500 Subject: [PATCH 140/142] Avoid updating the same rows multiple times with simple_update_many_txn. (#16609) simple_update_many_txn had a bug in it which would cause each update to be applied twice. --- changelog.d/16609.bugfix | 1 + synapse/storage/database.py | 5 +---- tests/storage/test_base.py | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) create mode 100644 changelog.d/16609.bugfix diff --git a/changelog.d/16609.bugfix b/changelog.d/16609.bugfix new file mode 100644 index 0000000000..a52d395cd3 --- /dev/null +++ b/changelog.d/16609.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 792f2e7cdf..f50a4ce2fc 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2060,10 +2060,7 @@ class DatabasePool: # List of tuples of (value values, then key values) # (This matches the order needed for the query) - args = [tuple(x) + tuple(y) for x, y in zip(value_values, key_values)] - - for ks, vs in zip(key_values, value_values): - args.append(tuple(vs) + tuple(ks)) + args = [tuple(vv) + tuple(kv) for vv, kv in zip(value_values, key_values)] # 'col1 = ?, col2 = ?, ...' set_clause = ", ".join(f"{n} = ?" for n in value_names) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index de4fcfe026..f34b6b2dcf 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -363,12 +363,12 @@ class SQLBaseStoreTestCase(unittest.TestCase): self.mock_execute_batch.assert_called_once_with( self.mock_txn, "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", - [("val3", "val1", "val2"), ("val3", "val1", "val2")], + [("val3", "val1", "val2")], ) else: self.mock_txn.executemany.assert_called_once_with( "UPDATE tablename SET col3 = ? WHERE col1 = ? AND col2 = ?", - [("val3", "val1", "val2"), ("val3", "val1", "val2")], + [("val3", "val1", "val2")], ) # key_values and value_values must be the same length. From 2a336cd2fc7e043c3d24f501341be2c06c401755 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:27:51 -0500 Subject: [PATCH 141/142] Bump serde_json from 1.0.107 to 1.0.108 (#16604) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3f7e66909b..8fd9a634b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", From f6aa047aa259aba87881bcefe97aff07ddcbb4e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:28:51 -0500 Subject: [PATCH 142/142] Bump pyicu from 2.11 to 2.12 (#16603) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/poetry.lock b/poetry.lock index 41556635d3..77643b0569 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2012,12 +2012,12 @@ plugins = ["importlib-metadata"] [[package]] name = "pyicu" -version = "2.11" +version = "2.12" description = "Python extension wrapping the ICU C++ API" optional = true python-versions = "*" files = [ - {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"}, + {file = "PyICU-2.12.tar.gz", hash = "sha256:bd7ab5efa93ad692e6daa29cd249364e521218329221726a113ca3cb281c8611"}, ] [[package]]