Merge remote-tracking branch 'origin/release-v1.62' into matrix-org-hotfixes

pull/13598/head
Erik Johnston 2022-06-28 13:15:03 +01:00
commit 1bfb991453
51 changed files with 786 additions and 303 deletions

View File

@ -27,9 +27,10 @@ export VIRTUALENV_NO_DOWNLOAD=1
# Patch the project definitions in-place:
# - Replace all lower and tilde bounds with exact bounds
# - Make the pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Replace all caret bounds---but not the one that defines the supported Python version!
# - Delete all lines referring to psycopg2 --- so no testing of postgres support.
# - Use pyopenssl 17.0, which is the oldest version that works with
# a `cryptography` compiled against OpenSSL 1.1.
# - Omit systemd: we're not logging to journal here.
# TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints
@ -40,6 +41,7 @@ export VIRTUALENV_NO_DOWNLOAD=1
sed -i \
-e "s/[~>]=/==/g" \
-e '/^python = "^/!s/\^/==/g' \
-e "/psycopg2/d" \
-e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \
-e '/systemd/d' \

1
changelog.d/12939.bugfix Normal file
View File

@ -0,0 +1 @@
Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`.

1
changelog.d/12944.misc Normal file
View File

@ -0,0 +1 @@
Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints.

View File

@ -1 +1 @@
Add type annotations to `tests.state.test_v2`.
Add type hints to tests.

View File

@ -0,0 +1 @@
Speed up fetching of device list changes in `/sync` and `/keys/changes`.

1
changelog.d/13054.misc Normal file
View File

@ -0,0 +1 @@
Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks.

1
changelog.d/13062.misc Normal file
View File

@ -0,0 +1 @@
Allow MSC3030 'timestamp_to_event' calls from anyone on world-readable rooms.

1
changelog.d/13076.doc Normal file
View File

@ -0,0 +1 @@
Update OpenTracing docs to reference the configuration manual rather than the configuration file.

1
changelog.d/13082.misc Normal file
View File

@ -0,0 +1 @@
Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds.

1
changelog.d/13087.bugfix Normal file
View File

@ -0,0 +1 @@
Fix some inconsistencies in the event authentication code.

1
changelog.d/13088.bugfix Normal file
View File

@ -0,0 +1 @@
Fix some inconsistencies in the event authentication code.

1
changelog.d/13093.misc Normal file
View File

@ -0,0 +1 @@
Simplify the alias deletion logic as an application service.

1
changelog.d/13095.doc Normal file
View File

@ -0,0 +1 @@
Update information on downstream Debian packages.

1
changelog.d/13096.misc Normal file
View File

@ -0,0 +1 @@
Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room.

View File

@ -0,0 +1 @@
Speed up fetching of device list changes in `/sync` and `/keys/changes`.

1
changelog.d/13099.misc Normal file
View File

@ -0,0 +1 @@
Add type hints to tests.

1
changelog.d/13106.bugfix Normal file
View File

@ -0,0 +1 @@
Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias.

1
changelog.d/13112.doc Normal file
View File

@ -0,0 +1 @@
Remove documentation for the Delete Group Admin API which no longer exists.

1
changelog.d/13118.misc Normal file
View File

@ -0,0 +1 @@
Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room.

View File

@ -0,0 +1 @@
Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`.

1
changelog.d/13124.misc Normal file
View File

@ -0,0 +1 @@
Add type annotations to `tests.test_server`.

View File

@ -31,17 +31,3 @@ autorestart=true
# Redis can be disabled if the image is being used without workers
autostart={{ enable_redis }}
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
# Additional process blocks
{{ worker_config }}

View File

@ -0,0 +1,30 @@
[program:synapse_main]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
priority=10
# Log startup failures to supervisord's stdout/err
# Regular synapse logs will still go in the configured data directory
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=unexpected
exitcodes=0
{% for worker in workers %}
[program:synapse_{{ worker.name }}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
--config-path="{{ main_config_path }}"
--config-path=/conf/workers/shared.yaml
--config-path=/conf/workers/{{ worker.name }}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
{% endfor %}

View File

@ -176,21 +176,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
}
# Templates for sections that may be inserted multiple times in config files
SUPERVISORD_PROCESS_CONFIG_BLOCK = """
[program:synapse_{name}]
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \
--config-path="{config_path}" \
--config-path=/conf/workers/shared.yaml \
--config-path=/conf/workers/{name}.yaml
autorestart=unexpected
priority=500
exitcodes=0
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
"""
NGINX_LOCATION_CONFIG_BLOCK = """
location ~* {endpoint} {{
proxy_pass {upstream};
@ -353,13 +338,10 @@ def generate_worker_files(
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
# List of dicts that describe workers.
# We pass this to the Supervisor template later to generate the appropriate
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
@ -437,7 +419,7 @@ def generate_worker_files(
)
# Enable the worker in supervisord
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
worker_descriptors.append(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
@ -535,10 +517,16 @@ def generate_worker_files(
"/conf/supervisord.conf.j2",
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
enable_redis=workers_in_use,
)
convert(
"/conf/synapse.supervisord.conf.j2",
"/etc/supervisor/conf.d/synapse.conf",
workers=worker_descriptors,
main_config_path=config_path,
)
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",

View File

@ -55,7 +55,6 @@
- [Admin API](usage/administration/admin_api/README.md)
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Delete Group](admin_api/delete_group.md)
- [Event Reports](admin_api/event_reports.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)

View File

@ -1,14 +0,0 @@
# Delete a local group
This API lets a server admin delete a local group. Doing so will kick all
users out of the group so that their clients will correctly handle the group
being deleted.
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api).
The API is:
```
POST /_synapse/admin/v1/delete_group/<group_id>
```

View File

@ -57,8 +57,9 @@ https://www.jaegertracing.io/docs/latest/getting-started.
## Enable OpenTracing in Synapse
OpenTracing is not enabled by default. It must be enabled in the
homeserver config by uncommenting the config options under `opentracing`
as shown in the [sample config](./sample_config.yaml). For example:
homeserver config by adding the `opentracing` option to your config file. You can find
documentation about how to do this in the [config manual under the header 'Opentracing'](usage/configuration/config_documentation.md#opentracing).
See below for an example Opentracing configuration:
```yaml
opentracing:

View File

@ -84,20 +84,19 @@ file when you upgrade the Debian package to a later version.
##### Downstream Debian packages
We do not recommend using the packages from the default Debian `buster`
repository at this time, as they are old and suffer from known security
vulnerabilities. You can install the latest version of Synapse from
[our repository](#matrixorg-packages) or from `buster-backports`. Please
see the [Debian documentation](https://backports.debian.org/Instructions/)
for information on how to use backports.
If you are using Debian `sid` or testing, Synapse is available in the default
repositories and it should be possible to install it simply with:
Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
For `bookworm` and `sid`, it can be installed simply with:
```sh
sudo apt install matrix-synapse
```
Synapse is also avaliable in `bullseye-backports`. Please
see the [Debian documentation](https://backports.debian.org/Instructions/)
for information on how to use backports.
`matrix-synapse` is no longer maintained for `buster` and older.
##### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository

View File

@ -113,6 +113,9 @@ disallow_untyped_defs = False
[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
[mypy-tests.test_server]
disallow_untyped_defs = True
[mypy-tests.state.test_profile]
disallow_untyped_defs = True

2
poetry.lock generated
View File

@ -1563,7 +1563,7 @@ url_preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
content-hash = "73882e279e0379482f2fc7414cb71addfd408ca48ad508ff8a02b0cb544762af"
content-hash = "e96625923122e29b6ea5964379828e321b6cede2b020fc32c6f86c09d86d1ae8"
[metadata.files]
attrs = [

View File

@ -110,9 +110,9 @@ jsonschema = ">=3.0.0"
frozendict = ">=1,!=2.1.2"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
canonicaljson = ">=1.4.0"
canonicaljson = "^1.4.0"
# we use the type definitions added in signedjson 1.1.
signedjson = ">=1.1.0"
signedjson = "^1.1.0"
# validating SSL certs for IP addresses requires service_identity 18.1.
service-identity = ">=18.1.0"
# Twisted 18.9 introduces some logger improvements that the structured
@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1"
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "~=1.2.1"
matrix-common = "^1.2.1"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`

View File

@ -141,7 +141,16 @@ async def check_state_independent_auth_rules(
Raises:
AuthError if the checks fail
"""
# Check the auth events.
# Implementation of https://spec.matrix.org/v1.2/rooms/v9/#authorization-rules
# 1. If type is m.room.create:
if event.type == EventTypes.Create:
_check_create(event)
# 1.5 Otherwise, allow
return
# 2. Reject if event has auth_events that: ...
auth_events = await store.get_events(
event.auth_event_ids(),
redact_behaviour=EventRedactBehaviour.as_is,
@ -149,6 +158,7 @@ async def check_state_independent_auth_rules(
)
room_id = event.room_id
auth_dict: MutableStateMap[str] = {}
expected_auth_types = auth_types_for_event(event.room_version, event)
for auth_event_id in event.auth_event_ids():
auth_event = auth_events.get(auth_event_id)
@ -170,6 +180,24 @@ async def check_state_independent_auth_rules(
% (event.event_id, room_id, auth_event_id, auth_event.room_id),
)
k = (auth_event.type, auth_event.state_key)
# 2.1 ... have duplicate entries for a given type and state_key pair
if k in auth_dict:
raise AuthError(
403,
f"Event {event.event_id} has duplicate auth_events for {k}: {auth_dict[k]} and {auth_event_id}",
)
# 2.2 ... have entries whose type and state_key dont match those specified by
# the auth events selection algorithm described in the server
# specification.
if k not in expected_auth_types:
raise AuthError(
403,
f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}",
)
# We also need to check that the auth event itself is not rejected.
if auth_event.rejected_reason:
raise AuthError(
@ -178,30 +206,7 @@ async def check_state_independent_auth_rules(
% (event.event_id, auth_event.event_id),
)
auth_dict[(auth_event.type, auth_event.state_key)] = auth_event_id
# Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
#
# 1. If type is m.room.create:
if event.type == EventTypes.Create:
# 1b. If the domain of the room_id does not match the domain of the sender,
# reject.
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(
403, "Creation event's room_id domain does not match sender's"
)
# 1c. If content.room_version is present and is not a recognised version, reject
room_version_prop = event.content.get("room_version", "1")
if room_version_prop not in KNOWN_ROOM_VERSIONS:
raise AuthError(
403,
"room appears to have unsupported version %s" % (room_version_prop,),
)
return
auth_dict[k] = auth_event_id
# 3. If event does not have a m.room.create in its auth_events, reject.
creation_event = auth_dict.get((EventTypes.Create, ""), None)
@ -324,6 +329,41 @@ def _check_size_limits(event: "EventBase") -> None:
raise EventSizeError("event too large")
def _check_create(event: "EventBase") -> None:
"""Implementation of the auth rules for m.room.create events
Args:
event: The `m.room.create` event to be checked
Raises:
AuthError if the event does not pass the auth rules
"""
assert event.type == EventTypes.Create
# 1.1 If it has any previous events, reject.
if event.prev_event_ids():
raise AuthError(403, "Create event has prev events")
# 1.2 If the domain of the room_id does not match the domain of the sender,
# reject.
sender_domain = get_domain_from_id(event.sender)
room_id_domain = get_domain_from_id(event.room_id)
if room_id_domain != sender_domain:
raise AuthError(403, "Creation event's room_id domain does not match sender's")
# 1.3 If content.room_version is present and is not a recognised version, reject
room_version_prop = event.content.get("room_version", "1")
if room_version_prop not in KNOWN_ROOM_VERSIONS:
raise AuthError(
403,
"room appears to have unsupported version %s" % (room_version_prop,),
)
# 1.4 If content has no creator field, reject.
if EventContentFields.ROOM_CREATOR not in event.content:
raise AuthError(403, "Create event lacks a 'creator' property")
def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool:
creation_event = auth_events.get((EventTypes.Create, ""))
# There should always be a creation event, but if not don't federate.

View File

@ -123,6 +123,43 @@ class DeviceWorkerHandler:
return device
async def get_device_changes_in_shared_rooms(
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
) -> Collection[str]:
"""Get the set of users whose devices have changed who share a room with
the given user.
"""
changed_users = await self.store.get_device_list_changes_in_rooms(
room_ids, from_token.device_list_key
)
if changed_users is not None:
# We also check if the given user has changed their device. If
# they're in no rooms then the above query won't include them.
changed = await self.store.get_users_whose_devices_changed(
from_token.device_list_key, [user_id]
)
changed_users.update(changed)
return changed_users
# If the DB returned None then the `from_token` is too old, so we fall
# back on looking for device updates for all users.
users_who_share_room = await self.store.get_users_who_share_room_with_user(
user_id
)
tracked_users = set(users_who_share_room)
# Always tell the user about their own devices
tracked_users.add(user_id)
changed = await self.store.get_users_whose_devices_changed(
from_token.device_list_key, tracked_users
)
return changed
@trace
@measure_func("device.get_user_ids_changed")
async def get_user_ids_changed(
@ -138,19 +175,8 @@ class DeviceWorkerHandler:
room_ids = await self.store.get_rooms_for_user(user_id)
# First we check if any devices have changed for users that we share
# rooms with.
users_who_share_room = await self.store.get_users_who_share_room_with_user(
user_id
)
tracked_users = set(users_who_share_room)
# Always tell the user about their own devices
tracked_users.add(user_id)
changed = await self.store.get_users_whose_devices_changed(
from_token.device_list_key, tracked_users
changed = await self.get_device_changes_in_shared_rooms(
user_id, room_ids, from_token
)
# Then work out if any users have since joined
@ -237,10 +263,19 @@ class DeviceWorkerHandler:
break
if possibly_changed or possibly_left:
# Take the intersection of the users whose devices may have changed
# and those that actually still share a room with the user
possibly_joined = possibly_changed & users_who_share_room
possibly_left = (possibly_changed | possibly_left) - users_who_share_room
possibly_joined = possibly_changed
possibly_left = possibly_changed | possibly_left
# Double check if we still share rooms with the given user.
users_rooms = await self.store.get_rooms_for_users_with_stream_ordering(
possibly_left
)
for changed_user_id, entries in users_rooms.items():
if any(e.room_id in room_ids for e in entries):
possibly_left.discard(changed_user_id)
else:
possibly_joined.discard(changed_user_id)
else:
possibly_joined = set()
possibly_left = set()

View File

@ -240,6 +240,7 @@ class SyncHandler:
self.auth_blocking = hs.get_auth_blocking()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._device_handler = hs.get_device_handler()
# TODO: flush cache entries on subsequent sync request.
# Once we get the next /sync request (ie, one with the same access token
@ -1268,21 +1269,11 @@ class SyncHandler:
):
users_that_have_changed.add(changed_user_id)
else:
users_who_share_room = (
await self.store.get_users_who_share_room_with_user(user_id)
)
# Always tell the user about their own devices. We check as the user
# ID is almost certainly already included (unless they're not in any
# rooms) and taking a copy of the set is relatively expensive.
if user_id not in users_who_share_room:
users_who_share_room = set(users_who_share_room)
users_who_share_room.add(user_id)
tracked_users = users_who_share_room
users_that_have_changed = (
await self.store.get_users_whose_devices_changed(
since_token.device_list_key, tracked_users
await self._device_handler.get_device_changes_in_shared_rooms(
user_id,
sync_result_builder.joined_room_ids,
from_token=since_token,
)
)

View File

@ -928,6 +928,17 @@ def set_cors_headers(request: Request) -> None:
)
def set_corp_headers(request: Request) -> None:
"""Set the CORP headers so that javascript running in a web browsers can
embed the resource returned from this request when their client requires
the `Cross-Origin-Embedder-Policy: require-corp` header.
Args:
request: The http request to add the CORP header to.
"""
request.setHeader(b"Cross-Origin-Resource-Policy", b"cross-origin")
def respond_with_html(request: Request, code: int, html: str) -> None:
"""
Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes.

View File

@ -290,7 +290,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [
"key": "type",
"pattern": "m.room.server_acl",
"_cache_key": "_room_server_acl",
}
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "",
"_cache_key": "_room_server_acl_state_key",
},
],
"actions": [],
},

View File

@ -17,13 +17,7 @@ from typing import TYPE_CHECKING, Tuple
from twisted.web.server import Request
from synapse.api.errors import (
AuthError,
Codes,
InvalidClientCredentialsError,
NotFoundError,
SynapseError,
)
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@ -52,6 +46,8 @@ class ClientDirectoryServer(RestServlet):
self.auth = hs.get_auth()
async def on_GET(self, request: Request, room_alias: str) -> Tuple[int, JsonDict]:
if not RoomAlias.is_valid(room_alias):
raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM)
room_alias_obj = RoomAlias.from_string(room_alias)
res = await self.directory_handler.get_association(room_alias_obj)
@ -61,6 +57,8 @@ class ClientDirectoryServer(RestServlet):
async def on_PUT(
self, request: SynapseRequest, room_alias: str
) -> Tuple[int, JsonDict]:
if not RoomAlias.is_valid(room_alias):
raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM)
room_alias_obj = RoomAlias.from_string(room_alias)
content = parse_json_object_from_request(request)
@ -95,31 +93,30 @@ class ClientDirectoryServer(RestServlet):
async def on_DELETE(
self, request: SynapseRequest, room_alias: str
) -> Tuple[int, JsonDict]:
if not RoomAlias.is_valid(room_alias):
raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM)
room_alias_obj = RoomAlias.from_string(room_alias)
requester = await self.auth.get_user_by_req(request)
try:
service = self.auth.get_appservice_by_req(request)
if requester.app_service:
await self.directory_handler.delete_appservice_association(
service, room_alias_obj
requester.app_service, room_alias_obj
)
logger.info(
"Application service at %s deleted alias %s",
service.url,
requester.app_service.url,
room_alias_obj.to_string(),
)
return 200, {}
except InvalidClientCredentialsError:
# fallback to default user behaviour if they aren't an AS
pass
requester = await self.auth.get_user_by_req(request)
user = requester.user
else:
await self.directory_handler.delete_association(requester, room_alias_obj)
await self.directory_handler.delete_association(requester, room_alias_obj)
logger.info(
"User %s deleted alias %s", user.to_string(), room_alias_obj.to_string()
)
logger.info(
"User %s deleted alias %s",
requester.user.to_string(),
room_alias_obj.to_string(),
)
return 200, {}
@ -154,17 +151,6 @@ class ClientDirectoryListServer(RestServlet):
return 200, {}
async def on_DELETE(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await self.directory_handler.edit_published_room_list(
requester, room_id, "private"
)
return 200, {}
class ClientAppserviceDirectoryListServer(RestServlet):
PATTERNS = client_patterns(

View File

@ -1177,7 +1177,9 @@ class TimestampLookupRestServlet(RestServlet):
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
requester = await self._auth.get_user_by_req(request)
await self._auth.check_user_in_room(room_id, requester.user.to_string())
await self._auth.check_user_in_room_or_world_readable(
room_id, requester.user.to_string()
)
timestamp = parse_integer(request, "ts", required=True)
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])

View File

@ -15,7 +15,11 @@
import logging
from typing import TYPE_CHECKING
from synapse.http.server import DirectServeJsonResource, set_cors_headers
from synapse.http.server import (
DirectServeJsonResource,
set_corp_headers,
set_cors_headers,
)
from synapse.http.servlet import parse_boolean
from synapse.http.site import SynapseRequest
@ -38,6 +42,7 @@ class DownloadResource(DirectServeJsonResource):
async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
set_corp_headers(request)
request.setHeader(
b"Content-Security-Policy",
b"sandbox;"

View File

@ -18,7 +18,11 @@ import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.http.server import DirectServeJsonResource, set_cors_headers
from synapse.http.server import (
DirectServeJsonResource,
set_corp_headers,
set_cors_headers,
)
from synapse.http.servlet import parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.media.v1.media_storage import MediaStorage
@ -58,6 +62,7 @@ class ThumbnailResource(DirectServeJsonResource):
async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
set_corp_headers(request)
server_name, media_id, _ = parse_media_id(request)
width = parse_integer(request, "width", required=True)
height = parse_integer(request, "height", required=True)

View File

@ -1208,6 +1208,65 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
return devices
@cached()
async def _get_min_device_lists_changes_in_room(self) -> int:
"""Returns the minimum stream ID that we have entries for
`device_lists_changes_in_room`
"""
return await self.db_pool.simple_select_one_onecol(
table="device_lists_changes_in_room",
keyvalues={},
retcol="COALESCE(MIN(stream_id), 0)",
desc="get_min_device_lists_changes_in_room",
)
async def get_device_list_changes_in_rooms(
self, room_ids: Collection[str], from_id: int
) -> Optional[Set[str]]:
"""Return the set of users whose devices have changed in the given rooms
since the given stream ID.
Returns None if the given stream ID is too old.
"""
if not room_ids:
return set()
min_stream_id = await self._get_min_device_lists_changes_in_room()
if min_stream_id > from_id:
return None
sql = """
SELECT DISTINCT user_id FROM device_lists_changes_in_room
WHERE {clause} AND stream_id >= ?
"""
def _get_device_list_changes_in_rooms_txn(
txn: LoggingTransaction,
clause: str,
args: List[Any],
) -> Set[str]:
txn.execute(sql.format(clause=clause), args)
return {user_id for user_id, in txn}
changes = set()
for chunk in batch_iter(room_ids, 1000):
clause, args = make_in_list_sql_clause(
self.database_engine, "room_id", chunk
)
args.append(from_id)
changes |= await self.db_pool.runInteraction(
"get_device_list_changes_in_rooms",
_get_device_list_changes_in_rooms_txn,
clause,
args,
)
return changes
class DeviceBackgroundUpdateStore(SQLBaseStore):
def __init__(

View File

@ -148,7 +148,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
self._doing_notif_rotation = False
if hs.config.worker.run_background_tasks:
self._rotate_notif_loop = self._clock.looping_call(
self._rotate_notifs, 30 * 60 * 1000
self._rotate_notifs, 30 * 1000
)
self.db_pool.updates.register_background_index_update(
@ -233,14 +233,30 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
counts = NotifCounts()
# First we pull the counts from the summary table
# First we pull the counts from the summary table.
#
# We check that `last_receipt_stream_ordering` matches the stream
# ordering given. If it doesn't match then a new read receipt has arrived and
# we haven't yet updated the counts in `event_push_summary` to reflect
# that; in that case we simply ignore `event_push_summary` counts
# and do a manual count of all of the rows in the `event_push_actions` table
# for this user/room.
#
# If `last_receipt_stream_ordering` is null then that means it's up to
# date (as the row was written by an older version of Synapse that
# updated `event_push_summary` synchronously when persisting a new read
# receipt).
txn.execute(
"""
SELECT stream_ordering, notif_count, COALESCE(unread_count, 0)
FROM event_push_summary
WHERE room_id = ? AND user_id = ? AND stream_ordering > ?
WHERE room_id = ? AND user_id = ?
AND (
(last_receipt_stream_ordering IS NULL AND stream_ordering > ?)
OR last_receipt_stream_ordering = ?
)
""",
(room_id, user_id, stream_ordering),
(room_id, user_id, stream_ordering, stream_ordering),
)
row = txn.fetchone()
@ -263,9 +279,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
if row:
counts.highlight_count += row[0]
# Finally we need to count push actions that haven't been summarized
# yet.
# We only want to pull out push actions that we haven't summarized yet.
# Finally we need to count push actions that aren't included in the
# summary returned above, e.g. recent events that haven't been
# summarized yet, or the summary is empty due to a recent read receipt.
stream_ordering = max(stream_ordering, summary_stream_ordering)
notify_count, unread_count = self._get_notif_unread_count_for_user_room(
txn, room_id, user_id, stream_ordering
@ -800,6 +816,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
self._doing_notif_rotation = True
try:
# First we recalculate push summaries and delete stale push actions
# for rooms/users with new receipts.
while True:
logger.debug("Handling new receipts")
caught_up = await self.db_pool.runInteraction(
"_handle_new_receipts_for_notifs_txn",
self._handle_new_receipts_for_notifs_txn,
)
if caught_up:
break
# Then we update the event push summaries for any new events
while True:
logger.info("Rotating notifications")
@ -810,10 +839,110 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
break
await self.hs.get_clock().sleep(self._rotate_delay)
# Finally we clear out old event push actions.
await self._remove_old_push_actions_that_have_rotated()
finally:
self._doing_notif_rotation = False
def _handle_new_receipts_for_notifs_txn(self, txn: LoggingTransaction) -> bool:
"""Check for new read receipts and delete from event push actions.
Any push actions which predate the user's most recent read receipt are
now redundant, so we can remove them from `event_push_actions` and
update `event_push_summary`.
"""
limit = 100
min_stream_id = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_last_receipt_stream_id",
keyvalues={},
retcol="stream_id",
)
sql = """
SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering
FROM receipts_linearized AS r
INNER JOIN events AS e USING (event_id)
WHERE r.stream_id > ? AND user_id LIKE ?
ORDER BY r.stream_id ASC
LIMIT ?
"""
# We only want local users, so we add a dodgy filter to the above query
# and recheck it below.
user_filter = "%:" + self.hs.hostname
txn.execute(
sql,
(
min_stream_id,
user_filter,
limit,
),
)
rows = txn.fetchall()
# For each new read receipt we delete push actions from before it and
# recalculate the summary.
for _, room_id, user_id, stream_ordering in rows:
# Only handle our own read receipts.
if not self.hs.is_mine_id(user_id):
continue
txn.execute(
"""
DELETE FROM event_push_actions
WHERE room_id = ?
AND user_id = ?
AND stream_ordering <= ?
AND highlight = 0
""",
(room_id, user_id, stream_ordering),
)
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
notif_count, unread_count = self._get_notif_unread_count_for_user_room(
txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
)
self.db_pool.simple_upsert_txn(
txn,
table="event_push_summary",
keyvalues={"room_id": room_id, "user_id": user_id},
values={
"notif_count": notif_count,
"unread_count": unread_count,
"stream_ordering": old_rotate_stream_ordering,
"last_receipt_stream_ordering": stream_ordering,
},
)
# We always update `event_push_summary_last_receipt_stream_id` to
# ensure that we don't rescan the same receipts for remote users.
#
# This requires repeatable read to be safe, as we need the
# `MAX(stream_id)` to not include any new rows that have been committed
# since the start of the transaction (since those rows won't have been
# returned by the query above). Alternatively we could query the max
# stream ID at the start of the transaction and bound everything by
# that.
txn.execute(
"""
UPDATE event_push_summary_last_receipt_stream_id
SET stream_id = (SELECT COALESCE(MAX(stream_id), 0) FROM receipts_linearized)
"""
)
return len(rows) < limit
def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool:
"""Archives older notifications into event_push_summary. Returns whether
the archiving process has caught up or not.
@ -1033,66 +1162,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas
if done:
break
def _remove_old_push_actions_before_txn(
self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int
) -> None:
"""
Purges old push actions for a user and room before a given
stream_ordering.
We however keep a months worth of highlighted notifications, so that
users can still get a list of recent highlights.
Args:
txn: The transaction
room_id: Room ID to delete from
user_id: user ID to delete for
stream_ordering: The lowest stream ordering which will
not be deleted.
"""
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate,
(room_id, user_id),
)
# We need to join on the events table to get the received_ts for
# event_push_actions and sqlite won't let us use a join in a delete so
# we can't just delete where received_ts < x. Furthermore we can
# only identify event_push_actions by a tuple of room_id, event_id
# we we can't use a subquery.
# Instead, we look up the stream ordering for the last event in that
# room received before the threshold time and delete event_push_actions
# in the room with a stream_odering before that.
txn.execute(
"DELETE FROM event_push_actions "
" WHERE user_id = ? AND room_id = ? AND "
" stream_ordering <= ?"
" AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)",
(user_id, room_id, stream_ordering, self.stream_ordering_month_ago),
)
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
notif_count, unread_count = self._get_notif_unread_count_for_user_room(
txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
)
self.db_pool.simple_upsert_txn(
txn,
table="event_push_summary",
keyvalues={"room_id": room_id, "user_id": user_id},
values={
"notif_count": notif_count,
"unread_count": unread_count,
"stream_ordering": old_rotate_stream_ordering,
},
)
class EventPushActionsStore(EventPushActionsWorkerStore):
EPA_HIGHLIGHT_INDEX = "epa_highlight_index"

View File

@ -26,7 +26,7 @@ from typing import (
cast,
)
from synapse.api.constants import EduTypes, ReceiptTypes
from synapse.api.constants import EduTypes
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import ReceiptsStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
@ -682,17 +682,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
lock=False,
)
# When updating a local users read receipt, remove any push actions
# which resulted from the receipt's event and all earlier events.
if (
self.hs.is_mine_id(user_id)
and receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE)
and stream_ordering is not None
):
self._remove_old_push_actions_before_txn( # type: ignore[attr-defined]
txn, room_id=room_id, user_id=user_id, stream_ordering=stream_ordering
)
return rx_ts
def _graph_to_linear(

View File

@ -0,0 +1,35 @@
/* Copyright 2022 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Add a column that records the position of the read receipt for the user at
-- the time we summarised the push actions. This is used to check if the counts
-- are up to date after a new read receipt has been sent.
--
-- Null means that we can skip that check, as the row was written by an older
-- version of Synapse that updated `event_push_summary` synchronously when
-- persisting a new read receipt
ALTER TABLE event_push_summary ADD COLUMN last_receipt_stream_ordering BIGINT;
-- Tracks which new receipts we've handled
CREATE TABLE event_push_summary_last_receipt_stream_id (
Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
stream_id BIGINT NOT NULL,
CHECK (Lock='X')
);
INSERT INTO event_push_summary_last_receipt_stream_id (stream_id)
SELECT COALESCE(MAX(stream_id), 0)
FROM receipts_linearized;

View File

@ -225,9 +225,10 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
# we need a user on the remote server to be a member, so that we can send
# extremity-causing events.
remote_server_user_id = f"@user:{self.OTHER_SERVER_NAME}"
self.get_success(
event_injection.inject_member_event(
self.hs, room_id, f"@user:{self.OTHER_SERVER_NAME}", "join"
self.hs, room_id, remote_server_user_id, "join"
)
)
@ -247,6 +248,12 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
# create more than is 5 which corresponds to the number of backward
# extremities we slice off in `_maybe_backfill_inner`
federation_event_handler = self.hs.get_federation_event_handler()
auth_events = [
ev
for ev in current_state
if (ev.type, ev.state_key)
in {("m.room.create", ""), ("m.room.member", remote_server_user_id)}
]
for _ in range(0, 8):
event = make_event_from_dict(
self.add_hashes_and_signatures(
@ -258,15 +265,14 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
"body": "message connected to fake event",
},
"room_id": room_id,
"sender": f"@user:{self.OTHER_SERVER_NAME}",
"sender": remote_server_user_id,
"prev_events": [
ev1.event_id,
# We're creating an backward extremity each time thanks
# to this fake event
generate_fake_event_id(),
],
# lazy: *everything* is an auth event
"auth_events": [ev.event_id for ev in current_state],
"auth_events": [ev.event_id for ev in auth_events],
"depth": ev1.depth + 1,
},
room_version,

View File

@ -98,7 +98,6 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
auth_event_ids = [
initial_state_map[("m.room.create", "")],
initial_state_map[("m.room.power_levels", "")],
initial_state_map[("m.room.join_rules", "")],
member_event.event_id,
]

View File

@ -16,6 +16,7 @@ from http import HTTPStatus
from twisted.test.proto_helpers import MemoryReactor
from synapse.appservice import ApplicationService
from synapse.rest import admin
from synapse.rest.client import directory, login, room
from synapse.server import HomeServer
@ -129,6 +130,39 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
def test_deleting_alias_via_directory_appservice(self) -> None:
user_id = "@as:test"
as_token = "i_am_an_app_service"
appservice = ApplicationService(
as_token,
id="1234",
namespaces={"aliases": [{"regex": "#asns-*", "exclusive": True}]},
sender=user_id,
)
self.hs.get_datastores().main.services_cache.append(appservice)
# Add an alias for the room, as the appservice
alias = RoomAlias(f"asns-{random_string(5)}", self.hs.hostname).to_string()
data = {"room_id": self.room_id}
request_data = json.dumps(data)
channel = self.make_request(
"PUT",
f"/_matrix/client/r0/directory/room/{alias}",
request_data,
access_token=as_token,
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
# Then try to remove the alias, as the appservice
channel = self.make_request(
"DELETE",
f"/_matrix/client/r0/directory/room/{alias}",
access_token=as_token,
)
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
def test_deleting_nonexistant_alias(self) -> None:
# Check that no alias exists
alias = "#potato:test"
@ -181,6 +215,19 @@ class DirectoryTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, expected_code, channel.result)
return alias
def test_invalid_alias(self) -> None:
alias = "#potato"
channel = self.make_request(
"GET",
f"/_matrix/client/r0/directory/room/{alias}",
access_token=self.user_tok,
)
self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)
self.assertIn("error", channel.json_body, channel.json_body)
self.assertEqual(
channel.json_body["errcode"], "M_INVALID_PARAM", channel.json_body
)
def random_alias(self, length: int) -> str:
return RoomAlias(random_string(length), self.hs.hostname).to_string()

View File

@ -481,6 +481,12 @@ class MediaRepoTests(unittest.HomeserverTestCase):
if expected_found:
self.assertEqual(channel.code, 200)
self.assertEqual(
channel.headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
[b"cross-origin"],
)
if expected_body is not None:
self.assertEqual(
channel.result["body"], expected_body, channel.result["body"]
@ -549,6 +555,20 @@ class MediaRepoTests(unittest.HomeserverTestCase):
[b"noindex, nofollow, noarchive, noimageindex"],
)
def test_cross_origin_resource_policy_header(self) -> None:
"""
Test that the Cross-Origin-Resource-Policy header is set to "cross-origin"
allowing web clients to embed media from the downloads API.
"""
channel = self._req(b"inline; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
headers.getRawHeaders(b"Cross-Origin-Resource-Policy"),
[b"cross-origin"],
)
class TestSpamChecker:
"""A spam checker module that rejects all media that includes the bytes

View File

@ -14,7 +14,11 @@
from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
from synapse.storage.databases.main.event_push_actions import NotifCounts
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@ -29,31 +33,33 @@ HIGHLIGHT = [
class EventPushActionsStoreTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.persist_events_store = hs.get_datastores().persist_events
persist_events_store = hs.get_datastores().persist_events
assert persist_events_store is not None
self.persist_events_store = persist_events_store
def test_get_unread_push_actions_for_user_in_range_for_http(self):
def test_get_unread_push_actions_for_user_in_range_for_http(self) -> None:
self.get_success(
self.store.get_unread_push_actions_for_user_in_range_for_http(
USER_ID, 0, 1000, 20
)
)
def test_get_unread_push_actions_for_user_in_range_for_email(self):
def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None:
self.get_success(
self.store.get_unread_push_actions_for_user_in_range_for_email(
USER_ID, 0, 1000, 20
)
)
def test_count_aggregation(self):
def test_count_aggregation(self) -> None:
room_id = "!foo:example.com"
user_id = "@user1235:example.com"
user_id = "@user1235:test"
last_read_stream_ordering = [0]
def _assert_counts(noitf_count, highlight_count):
def _assert_counts(noitf_count: int, highlight_count: int) -> None:
counts = self.get_success(
self.store.db_pool.runInteraction(
"",
@ -72,14 +78,29 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
),
)
def _inject_actions(stream, action):
def _inject_actions(stream: int, action: list) -> None:
event = Mock()
event.room_id = room_id
event.event_id = "$test:example.com"
event.event_id = f"$test{stream}:example.com"
event.internal_metadata.stream_ordering = stream
event.internal_metadata.is_outlier.return_value = False
event.depth = stream
self.get_success(
self.store.db_pool.simple_insert(
table="events",
values={
"stream_ordering": stream,
"topological_ordering": stream,
"type": "m.room.message",
"room_id": room_id,
"processed": True,
"outlier": False,
"event_id": event.event_id,
},
)
)
self.get_success(
self.store.add_push_actions_to_staging(
event.event_id,
@ -96,21 +117,31 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
)
def _rotate(stream):
def _rotate(stream: int) -> None:
self.get_success(
self.store.db_pool.runInteraction(
"", self.store._rotate_notifs_before_txn, stream
"rotate-receipts", self.store._handle_new_receipts_for_notifs_txn
)
)
def _mark_read(stream, depth):
self.get_success(
self.store.db_pool.runInteraction(
"rotate-notifs", self.store._rotate_notifs_before_txn, stream
)
)
def _mark_read(stream: int, depth: int) -> None:
last_read_stream_ordering[0] = stream
self.get_success(
self.store.db_pool.runInteraction(
"",
self.store._remove_old_push_actions_before_txn,
self.store._insert_linearized_receipt_txn,
room_id,
"m.read",
user_id,
f"$test{stream}:example.com",
{},
stream,
)
)
@ -144,7 +175,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
_assert_counts(1, 0)
_mark_read(7, 7)
_mark_read(6, 6)
_assert_counts(0, 0)
_inject_actions(8, HIGHLIGHT)
@ -165,8 +196,8 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
_mark_read(10, 10)
_assert_counts(0, 0)
def test_find_first_stream_ordering_after_ts(self):
def add_event(so, ts):
def test_find_first_stream_ordering_after_ts(self) -> None:
def add_event(so: int, ts: int) -> None:
self.get_success(
self.store.db_pool.simple_insert(
"events",

View File

@ -109,6 +109,133 @@ class EventAuthTestCase(unittest.TestCase):
)
)
def test_create_event_with_prev_events(self):
"""A create event with prev_events should be rejected
https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules
1: If type is m.room.create:
1. If it has any previous events, reject.
"""
creator = f"@creator:{TEST_DOMAIN}"
# we make both a good event and a bad event, to check that we are rejecting
# the bad event for the reason we think we are.
good_event = make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"type": "m.room.create",
"state_key": "",
"sender": creator,
"content": {
"creator": creator,
"room_version": RoomVersions.V9.identifier,
},
"auth_events": [],
"prev_events": [],
},
room_version=RoomVersions.V9,
)
bad_event = make_event_from_dict(
{**good_event.get_dict(), "prev_events": ["$fakeevent"]},
room_version=RoomVersions.V9,
)
event_store = _StubEventSourceStore()
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, good_event)
)
with self.assertRaises(AuthError):
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, bad_event)
)
def test_duplicate_auth_events(self):
"""Events with duplicate auth_events should be rejected
https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules
2. Reject if event has auth_events that:
1. have duplicate entries for a given type and state_key pair
"""
creator = "@creator:example.com"
create_event = _create_event(RoomVersions.V9, creator)
join_event1 = _join_event(RoomVersions.V9, creator)
pl_event = _power_levels_event(
RoomVersions.V9,
creator,
{"state_default": 30, "users": {"creator": 100}},
)
# create a second join event, so that we can make a duplicate
join_event2 = _join_event(RoomVersions.V9, creator)
event_store = _StubEventSourceStore()
event_store.add_events([create_event, join_event1, join_event2, pl_event])
good_event = _random_state_event(
RoomVersions.V9, creator, [create_event, join_event2, pl_event]
)
bad_event = _random_state_event(
RoomVersions.V9, creator, [create_event, join_event1, join_event2, pl_event]
)
# a variation: two instances of the *same* event
bad_event2 = _random_state_event(
RoomVersions.V9, creator, [create_event, join_event2, join_event2, pl_event]
)
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, good_event)
)
with self.assertRaises(AuthError):
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, bad_event)
)
with self.assertRaises(AuthError):
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, bad_event2)
)
def test_unexpected_auth_events(self):
"""Events with excess auth_events should be rejected
https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules
2. Reject if event has auth_events that:
2. have entries whose type and state_key dont match those specified by the
auth events selection algorithm described in the server specification.
"""
creator = "@creator:example.com"
create_event = _create_event(RoomVersions.V9, creator)
join_event = _join_event(RoomVersions.V9, creator)
pl_event = _power_levels_event(
RoomVersions.V9,
creator,
{"state_default": 30, "users": {"creator": 100}},
)
join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public")
event_store = _StubEventSourceStore()
event_store.add_events([create_event, join_event, pl_event, join_rules_event])
good_event = _random_state_event(
RoomVersions.V9, creator, [create_event, join_event, pl_event]
)
# join rules should *not* be included in the auth events.
bad_event = _random_state_event(
RoomVersions.V9,
creator,
[create_event, join_event, pl_event, join_rules_event],
)
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, good_event)
)
with self.assertRaises(AuthError):
get_awaitable_result(
event_auth.check_state_independent_auth_rules(event_store, bad_event)
)
def test_random_users_cannot_send_state_before_first_pl(self):
"""
Check that, before the first PL lands, the creator is the only user
@ -564,8 +691,8 @@ class EventAuthTestCase(unittest.TestCase):
# helpers for making events
TEST_ROOM_ID = "!test:room"
TEST_DOMAIN = "example.com"
TEST_ROOM_ID = f"!test_room:{TEST_DOMAIN}"
def _create_event(

View File

@ -14,7 +14,7 @@
import re
from http import HTTPStatus
from typing import Tuple
from typing import Awaitable, Callable, Dict, NoReturn, Optional, Tuple
from twisted.internet.defer import Deferred
from twisted.web.resource import Resource
@ -36,6 +36,7 @@ from synapse.util import Clock
from tests import unittest
from tests.http.server._base import test_disconnect
from tests.server import (
FakeChannel,
FakeSite,
ThreadedMemoryReactorClock,
make_request,
@ -44,7 +45,7 @@ from tests.server import (
class JsonResourceTests(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.reactor = ThreadedMemoryReactorClock()
self.hs_clock = Clock(self.reactor)
self.homeserver = setup_test_homeserver(
@ -54,7 +55,7 @@ class JsonResourceTests(unittest.TestCase):
reactor=self.reactor,
)
def test_handler_for_request(self):
def test_handler_for_request(self) -> None:
"""
JsonResource.handler_for_request gives correctly decoded URL args to
the callback, while Twisted will give the raw bytes of URL query
@ -62,7 +63,9 @@ class JsonResourceTests(unittest.TestCase):
"""
got_kwargs = {}
def _callback(request, **kwargs):
def _callback(
request: SynapseRequest, **kwargs: object
) -> Tuple[int, Dict[str, object]]:
got_kwargs.update(kwargs)
return 200, kwargs
@ -83,13 +86,13 @@ class JsonResourceTests(unittest.TestCase):
self.assertEqual(got_kwargs, {"room_id": "\N{SNOWMAN}"})
def test_callback_direct_exception(self):
def test_callback_direct_exception(self) -> None:
"""
If the web callback raises an uncaught exception, it will be translated
into a 500.
"""
def _callback(request, **kwargs):
def _callback(request: SynapseRequest, **kwargs: object) -> NoReturn:
raise Exception("boo")
res = JsonResource(self.homeserver)
@ -103,17 +106,17 @@ class JsonResourceTests(unittest.TestCase):
self.assertEqual(channel.result["code"], b"500")
def test_callback_indirect_exception(self):
def test_callback_indirect_exception(self) -> None:
"""
If the web callback raises an uncaught exception in a Deferred, it will
be translated into a 500.
"""
def _throw(*args):
def _throw(*args: object) -> NoReturn:
raise Exception("boo")
def _callback(request, **kwargs):
d = Deferred()
def _callback(request: SynapseRequest, **kwargs: object) -> "Deferred[None]":
d: "Deferred[None]" = Deferred()
d.addCallback(_throw)
self.reactor.callLater(0.5, d.callback, True)
return make_deferred_yieldable(d)
@ -129,13 +132,13 @@ class JsonResourceTests(unittest.TestCase):
self.assertEqual(channel.result["code"], b"500")
def test_callback_synapseerror(self):
def test_callback_synapseerror(self) -> None:
"""
If the web callback raises a SynapseError, it returns the appropriate
status code and message set in it.
"""
def _callback(request, **kwargs):
def _callback(request: SynapseRequest, **kwargs: object) -> NoReturn:
raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN)
res = JsonResource(self.homeserver)
@ -151,12 +154,12 @@ class JsonResourceTests(unittest.TestCase):
self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
def test_no_handler(self):
def test_no_handler(self) -> None:
"""
If there is no handler to process the request, Synapse will return 400.
"""
def _callback(request, **kwargs):
def _callback(request: SynapseRequest, **kwargs: object) -> None:
"""
Not ever actually called!
"""
@ -175,14 +178,16 @@ class JsonResourceTests(unittest.TestCase):
self.assertEqual(channel.json_body["error"], "Unrecognized request")
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
def test_head_request(self):
def test_head_request(self) -> None:
"""
JsonResource.handler_for_request gives correctly decoded URL args to
the callback, while Twisted will give the raw bytes of URL query
arguments.
"""
def _callback(request, **kwargs):
def _callback(
request: SynapseRequest, **kwargs: object
) -> Tuple[int, Dict[str, object]]:
return 200, {"result": True}
res = JsonResource(self.homeserver)
@ -203,20 +208,21 @@ class JsonResourceTests(unittest.TestCase):
class OptionsResourceTests(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.reactor = ThreadedMemoryReactorClock()
class DummyResource(Resource):
isLeaf = True
def render(self, request):
return request.path
def render(self, request: SynapseRequest) -> bytes:
# Type-ignore: mypy thinks request.path is Optional[Any], not bytes.
return request.path # type: ignore[return-value]
# Setup a resource with some children.
self.resource = OptionsResource()
self.resource.putChild(b"res", DummyResource())
def _make_request(self, method, path):
def _make_request(self, method: bytes, path: bytes) -> FakeChannel:
"""Create a request from the method/path and return a channel with the response."""
# Create a site and query for the resource.
site = SynapseSite(
@ -233,7 +239,7 @@ class OptionsResourceTests(unittest.TestCase):
channel = make_request(self.reactor, site, method, path, shorthand=False)
return channel
def test_unknown_options_request(self):
def test_unknown_options_request(self) -> None:
"""An OPTIONS requests to an unknown URL still returns 204 No Content."""
channel = self._make_request(b"OPTIONS", b"/foo/")
self.assertEqual(channel.result["code"], b"204")
@ -253,7 +259,7 @@ class OptionsResourceTests(unittest.TestCase):
"has CORS Headers header",
)
def test_known_options_request(self):
def test_known_options_request(self) -> None:
"""An OPTIONS requests to an known URL still returns 204 No Content."""
channel = self._make_request(b"OPTIONS", b"/res/")
self.assertEqual(channel.result["code"], b"204")
@ -273,12 +279,12 @@ class OptionsResourceTests(unittest.TestCase):
"has CORS Headers header",
)
def test_unknown_request(self):
def test_unknown_request(self) -> None:
"""A non-OPTIONS request to an unknown URL should 404."""
channel = self._make_request(b"GET", b"/foo/")
self.assertEqual(channel.result["code"], b"404")
def test_known_request(self):
def test_known_request(self) -> None:
"""A non-OPTIONS request to an known URL should query the proper resource."""
channel = self._make_request(b"GET", b"/res/")
self.assertEqual(channel.result["code"], b"200")
@ -287,16 +293,17 @@ class OptionsResourceTests(unittest.TestCase):
class WrapHtmlRequestHandlerTests(unittest.TestCase):
class TestResource(DirectServeHtmlResource):
callback = None
callback: Optional[Callable[..., Awaitable[None]]]
async def _async_render_GET(self, request):
async def _async_render_GET(self, request: SynapseRequest) -> None:
assert self.callback is not None
await self.callback(request)
def setUp(self):
def setUp(self) -> None:
self.reactor = ThreadedMemoryReactorClock()
def test_good_response(self):
async def callback(request):
def test_good_response(self) -> None:
async def callback(request: SynapseRequest) -> None:
request.write(b"response")
request.finish()
@ -311,13 +318,13 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
body = channel.result["body"]
self.assertEqual(body, b"response")
def test_redirect_exception(self):
def test_redirect_exception(self) -> None:
"""
If the callback raises a RedirectException, it is turned into a 30x
with the right location.
"""
async def callback(request, **kwargs):
async def callback(request: SynapseRequest, **kwargs: object) -> None:
raise RedirectException(b"/look/an/eagle", 301)
res = WrapHtmlRequestHandlerTests.TestResource()
@ -332,13 +339,13 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
location_headers = [v for k, v in headers if k == b"Location"]
self.assertEqual(location_headers, [b"/look/an/eagle"])
def test_redirect_exception_with_cookie(self):
def test_redirect_exception_with_cookie(self) -> None:
"""
If the callback raises a RedirectException which sets a cookie, that is
returned too
"""
async def callback(request, **kwargs):
async def callback(request: SynapseRequest, **kwargs: object) -> NoReturn:
e = RedirectException(b"/no/over/there", 304)
e.cookies.append(b"session=yespls")
raise e
@ -357,10 +364,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
cookies_headers = [v for k, v in headers if k == b"Set-Cookie"]
self.assertEqual(cookies_headers, [b"session=yespls"])
def test_head_request(self):
def test_head_request(self) -> None:
"""A head request should work by being turned into a GET request."""
async def callback(request):
async def callback(request: SynapseRequest) -> None:
request.write(b"response")
request.finish()
@ -410,7 +417,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource):
class DirectServeJsonResourceCancellationTests(unittest.TestCase):
"""Tests for `DirectServeJsonResource` cancellation."""
def setUp(self):
def setUp(self) -> None:
self.reactor = ThreadedMemoryReactorClock()
self.clock = Clock(self.reactor)
self.resource = CancellableDirectServeJsonResource(self.clock)
@ -444,7 +451,7 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase):
class DirectServeHtmlResourceCancellationTests(unittest.TestCase):
"""Tests for `DirectServeHtmlResource` cancellation."""
def setUp(self):
def setUp(self) -> None:
self.reactor = ThreadedMemoryReactorClock()
self.clock = Clock(self.reactor)
self.resource = CancellableDirectServeHtmlResource(self.clock)