Merge remote-tracking branch 'origin/develop' into clokep/ip-blacklists
commit
d5ba7a927f
22
UPGRADE.rst
22
UPGRADE.rst
|
@ -105,6 +105,28 @@ shown below:
|
|||
|
||||
return {"localpart": localpart}
|
||||
|
||||
Removal historical Synapse Admin API
|
||||
------------------------------------
|
||||
|
||||
Historically, the Synapse Admin API has been accessible under:
|
||||
|
||||
* ``/_matrix/client/api/v1/admin``
|
||||
* ``/_matrix/client/unstable/admin``
|
||||
* ``/_matrix/client/r0/admin``
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The endpoints with ``/_matrix/client/*`` prefixes have been removed as of v1.24.0.
|
||||
The Admin API is now only accessible under:
|
||||
|
||||
* ``/_synapse/admin/v1``
|
||||
|
||||
The only exception is the `/admin/whois` endpoint, which is
|
||||
`also available via the client-server API <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_.
|
||||
|
||||
The deprecation of the old endpoints was announced with Synapse 1.20.0 (released
|
||||
on 2020-09-22) and makes it easier for homeserver admins to lock down external
|
||||
access to the Admin API endpoints.
|
||||
|
||||
Upgrading to v1.23.0
|
||||
====================
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Simplify the way the `HomeServer` object caches its internal attributes.
|
|
@ -0,0 +1 @@
|
|||
Remove old `/_matrix/client/*/admin` endpoints which was deprecated since Synapse 1.20.0.
|
|
@ -0,0 +1 @@
|
|||
Allow per-room profiles to be used for the server notice user.
|
|
@ -0,0 +1 @@
|
|||
Remove unnecessary function arguments and add typing to several membership replication classes.
|
|
@ -0,0 +1 @@
|
|||
Optimise the lookup for an invite from another homeserver when trying to reject it.
|
|
@ -0,0 +1 @@
|
|||
Fix bug where logging could break after a call to SIGHUP.
|
|
@ -0,0 +1 @@
|
|||
Update the formatting of the `push` section of the homeserver config file to better align with the [code style guidelines](https://github.com/matrix-org/synapse/blob/develop/docs/code_style.md#configuration-file-format).
|
|
@ -0,0 +1 @@
|
|||
Add a config option, `push.group_by_unread_count`, which controls whether unread message counts in push notifications are defined as "the number of rooms with unread messages" or "total unread messages".
|
|
@ -0,0 +1 @@
|
|||
Improve documentation how to configure prometheus for workers.
|
|
@ -0,0 +1 @@
|
|||
Fix `register_new_matrix_user` failing with "Bad Request" when trailing slash is included in server URL. Contributed by @angdraug.
|
|
@ -0,0 +1 @@
|
|||
Update example prometheus console.
|
|
@ -0,0 +1 @@
|
|||
Disable pretty printing JSON responses for curl. Users who want pretty-printed output should use [jq](https://stedolan.github.io/jq/) in combination with curl. Contributed by @tulir.
|
|
@ -0,0 +1 @@
|
|||
Add `force_purge` option to delete-room admin api.
|
|
@ -0,0 +1 @@
|
|||
Drop redundant database index on `event_json`.
|
|
@ -0,0 +1 @@
|
|||
Simplify `uk.half-shot.msc2778.login.application_service` login handler.
|
|
@ -20,6 +20,7 @@ Add a new job to the main prometheus.conf file:
|
|||
```
|
||||
|
||||
### for Prometheus v2
|
||||
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
```yaml
|
||||
|
@ -29,14 +30,17 @@ Add a new job to the main prometheus.yml file:
|
|||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ['SERVER.LOCATION:PORT']
|
||||
- targets: ["my.server.here:port"]
|
||||
```
|
||||
|
||||
An example of a Prometheus configuration with workers can be found in
|
||||
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
```yaml
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
```
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
|
@ -22,12 +22,12 @@ new PromConsole.Graph({
|
|||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resource_maxrss"></div>
|
||||
<div id="process_resident_memory_bytes"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_maxrss"),
|
||||
expr: "process_psutil_rss:max",
|
||||
name: "Maxrss",
|
||||
node: document.querySelector("#process_resident_memory_bytes"),
|
||||
expr: "process_resident_memory_bytes",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
|
@ -43,8 +43,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds{job='synapse'}",
|
||||
name: "FDs",
|
||||
expr: "process_open_fds",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
|
@ -62,8 +62,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||
name: "time",
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
|
@ -80,8 +80,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||
name: "time",
|
||||
expr: "rate(python_twisted_reactor_tick_time_sum[2m]) / rate(python_twisted_reactor_tick_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
|
@ -97,14 +97,14 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_pending_calls"),
|
||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||
name: "calls",
|
||||
expr: "rate(python_twisted_reactor_pending_calls_sum[30s]) / rate(python_twisted_reactor_pending_calls_count[30s])",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yTitle: "Pending Cals"
|
||||
yTitle: "Pending Calls"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
@ -115,7 +115,7 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||
expr: "sum(rate(synapse_storage_query_time_count[2m])) by (verb)",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
@ -129,8 +129,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||
name: "[[desc]]",
|
||||
expr: "topk(10, rate(synapse_storage_transaction_time_count[2m]))",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
@ -140,12 +140,12 @@ new PromConsole.Graph({
|
|||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_msec"></div>
|
||||
<div id="synapse_storage_transactions_time_sec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||
name: "[[desc]]",
|
||||
node: document.querySelector("#synapse_storage_transactions_time_sec"),
|
||||
expr: "rate(synapse_storage_transaction_time_sum[2m])",
|
||||
name: "[[job]]-[[index]] [[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -154,34 +154,33 @@ new PromConsole.Graph({
|
|||
})
|
||||
</script>
|
||||
|
||||
<h3>Database scheduling latency</h3>
|
||||
<div id="synapse_storage_schedule_time"></div>
|
||||
<h3>Average time waiting for database connection</h3>
|
||||
<div id="synapse_storage_avg_waiting_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||
name: "Total latency",
|
||||
node: document.querySelector("#synapse_storage_avg_waiting_time"),
|
||||
expr: "rate(synapse_storage_schedule_time_sum[2m]) / rate(synapse_storage_schedule_time_count[2m])",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache hit ratio</h3>
|
||||
<div id="synapse_cache_ratio"></div>
|
||||
<h3>Cache request rate</h3>
|
||||
<div id="synapse_cache_request_rate"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_ratio"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||
name: "[[name]]",
|
||||
node: document.querySelector("#synapse_cache_request_rate"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m])",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: "Percentage"
|
||||
yUnits: "rps",
|
||||
yTitle: "Cache request rate"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
@ -191,7 +190,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[name]]",
|
||||
name: "[[job]]-[[index]] [[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
|
@ -206,8 +205,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
|
@ -219,8 +218,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
expr: "rate(synapse_http_server_in_flight_requests_count{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[job]]-[[index]] [[method]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
|
@ -233,8 +232,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum[2m]) / rate(synapse_http_server_response_count[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
|
@ -277,7 +276,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
|
@ -292,7 +291,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
|
@ -306,8 +305,8 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
expr: "rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m])",
|
||||
name: "[[job]]-[[index]] [[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
|
@ -323,7 +322,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[type]]",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
|
@ -337,7 +336,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[type]]",
|
||||
name: "[[job]]-[[index]] [[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
|
@ -367,7 +366,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "listeners",
|
||||
name: "[[job]]-[[index]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
|
@ -382,7 +381,7 @@ new PromConsole.Graph({
|
|||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "events",
|
||||
name: "[[job]]-[[index]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
|
|
|
@ -382,7 +382,7 @@ the new room. Users on other servers will be unaffected.
|
|||
|
||||
The API is:
|
||||
|
||||
```json
|
||||
```
|
||||
POST /_synapse/admin/v1/rooms/<room_id>/delete
|
||||
```
|
||||
|
||||
|
@ -439,6 +439,10 @@ The following JSON body parameters are available:
|
|||
future attempts to join the room. Defaults to `false`.
|
||||
* `purge` - Optional. If set to `true`, it will remove all traces of the room from your database.
|
||||
Defaults to `true`.
|
||||
* `force_purge` - Optional, and ignored unless `purge` is `true`. If set to `true`, it
|
||||
will force a purge to go ahead even if there are local users still in the room. Do not
|
||||
use this unless a regular `purge` operation fails, as it could leave those users'
|
||||
clients in a confused state.
|
||||
|
||||
The JSON body must not be empty. The body must be at least `{}`.
|
||||
|
||||
|
|
|
@ -176,6 +176,13 @@ The api is::
|
|||
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
and::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<userId>
|
||||
|
||||
See also: `Client Server API Whois
|
||||
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
|
|
|
@ -13,10 +13,12 @@
|
|||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```yaml
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
```
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
installation, and serves under `/_synapse/metrics`. If you do not
|
||||
|
@ -31,11 +33,13 @@
|
|||
|
||||
Add a new listener to homeserver.yaml:
|
||||
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```yaml
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
```
|
||||
|
||||
For both options, you will need to ensure that `enable_metrics` is
|
||||
set to `True`.
|
||||
|
@ -47,10 +51,13 @@
|
|||
It needs to set the `metrics_path` to a non-default value (under
|
||||
`scrape_configs`):
|
||||
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
```
|
||||
|
||||
where `my.server.here` is the IP address of Synapse, and `port` is
|
||||
the listener port configured with the `metrics` resource.
|
||||
|
@ -60,7 +67,8 @@
|
|||
|
||||
1. Restart Prometheus.
|
||||
|
||||
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/) and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
|
||||
1. Consider using the [grafana dashboard](https://github.com/matrix-org/synapse/tree/master/contrib/grafana/)
|
||||
and required [recording rules](https://github.com/matrix-org/synapse/tree/master/contrib/prometheus/)
|
||||
|
||||
## Monitoring workers
|
||||
|
||||
|
@ -76,9 +84,9 @@ To allow collecting metrics from a worker, you need to add a
|
|||
under `worker_listeners`:
|
||||
|
||||
```yaml
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
- type: metrics
|
||||
bind_address: ''
|
||||
port: 9101
|
||||
```
|
||||
|
||||
The `bind_address` and `port` parameters should be set so that
|
||||
|
@ -87,6 +95,38 @@ don't clash with an existing worker.
|
|||
With this example, the worker's metrics would then be available
|
||||
on `http://127.0.0.1:9101`.
|
||||
|
||||
Example Prometheus target for Synapse with workers:
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
scrape_interval: 15s
|
||||
metrics_path: "/_synapse/metrics"
|
||||
static_configs:
|
||||
- targets: ["my.server.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "master"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 1
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "generic_worker"
|
||||
index: 2
|
||||
- targets: ["my.workerserver.here:port"]
|
||||
labels:
|
||||
instance: "my.server"
|
||||
job: "media_repository"
|
||||
index: 1
|
||||
```
|
||||
|
||||
Labels (`instance`, `job`, `index`) can be defined as anything.
|
||||
The labels are used to group graphs in grafana.
|
||||
|
||||
## Renaming of metrics & deprecation of old names in 1.2
|
||||
|
||||
Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
|
|
|
@ -2253,20 +2253,35 @@ password_providers:
|
|||
|
||||
|
||||
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
## Push ##
|
||||
|
||||
push:
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
# The default value is "true" to include message details. Uncomment to only
|
||||
# include the event ID and room ID in push notification payloads.
|
||||
#
|
||||
#include_content: false
|
||||
|
||||
# When a push notification is received, an unread count is also sent.
|
||||
# This number can either be calculated as the number of unread messages
|
||||
# for the user, or the number of *rooms* the user has unread messages in.
|
||||
#
|
||||
# The default value is "true", meaning push clients will see the number of
|
||||
# rooms with unread messages in them. Uncomment to instead send the number
|
||||
# of unread messages.
|
||||
#
|
||||
#group_unread_count_by_room: false
|
||||
|
||||
|
||||
# Spam checkers are third-party modules that can block specific actions
|
||||
|
|
|
@ -37,7 +37,7 @@ def request_registration(
|
|||
exit=sys.exit,
|
||||
):
|
||||
|
||||
url = "%s/_matrix/client/r0/admin/register" % (server_location,)
|
||||
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
|
||||
|
||||
# Get the nonce
|
||||
r = requests.get(url, verify=False)
|
||||
|
|
|
@ -32,6 +32,7 @@ from synapse.app.phone_stats_home import start_phone_stats_home
|
|||
from synapse.config.server import ListenerConfig
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
|
@ -244,6 +245,7 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
|||
# Set up the SIGHUP machinery.
|
||||
if hasattr(signal, "SIGHUP"):
|
||||
|
||||
@wrap_as_background_process("sighup")
|
||||
def handle_sighup(*args, **kwargs):
|
||||
# Tell systemd our state, if we're using it. This will silently fail if
|
||||
# we're not using systemd.
|
||||
|
@ -254,7 +256,13 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]):
|
|||
|
||||
sdnotify(b"READY=1")
|
||||
|
||||
signal.signal(signal.SIGHUP, handle_sighup)
|
||||
# We defer running the sighup handlers until next reactor tick. This
|
||||
# is so that we're in a sane state, e.g. flushing the logs may fail
|
||||
# if the sighup happens in the middle of writing a log entry.
|
||||
def run_sighup(*args, **kwargs):
|
||||
hs.get_clock().call_later(0, handle_sighup, *args, **kwargs)
|
||||
|
||||
signal.signal(signal.SIGHUP, run_sighup)
|
||||
|
||||
register_sighup(refresh_certificate, hs)
|
||||
|
||||
|
|
|
@ -21,8 +21,11 @@ class PushConfig(Config):
|
|||
section = "push"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
push_config = config.get("push", {})
|
||||
push_config = config.get("push") or {}
|
||||
self.push_include_content = push_config.get("include_content", True)
|
||||
self.push_group_unread_count_by_room = push_config.get(
|
||||
"group_unread_count_by_room", True
|
||||
)
|
||||
|
||||
pusher_instances = config.get("pusher_instances") or []
|
||||
self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
|
||||
|
@ -49,18 +52,33 @@ class PushConfig(Config):
|
|||
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
#push:
|
||||
# include_content: true
|
||||
## Push ##
|
||||
|
||||
push:
|
||||
# Clients requesting push notifications can either have the body of
|
||||
# the message sent in the notification poke along with other details
|
||||
# like the sender, or just the event ID and room ID (`event_id_only`).
|
||||
# If clients choose the former, this option controls whether the
|
||||
# notification request includes the content of the event (other details
|
||||
# like the sender are still included). For `event_id_only` push, it
|
||||
# has no effect.
|
||||
#
|
||||
# For modern android devices the notification content will still appear
|
||||
# because it is loaded by the app. iPhone, however will send a
|
||||
# notification saying only that a message arrived and who it came from.
|
||||
#
|
||||
# The default value is "true" to include message details. Uncomment to only
|
||||
# include the event ID and room ID in push notification payloads.
|
||||
#
|
||||
#include_content: false
|
||||
|
||||
# When a push notification is received, an unread count is also sent.
|
||||
# This number can either be calculated as the number of unread messages
|
||||
# for the user, or the number of *rooms* the user has unread messages in.
|
||||
#
|
||||
# The default value is "true", meaning push clients will see the number of
|
||||
# rooms with unread messages in them. Uncomment to instead send the number
|
||||
# of unread messages.
|
||||
#
|
||||
#group_unread_count_by_room: false
|
||||
"""
|
||||
|
|
|
@ -354,7 +354,8 @@ class IdentityHandler(BaseHandler):
|
|||
raise SynapseError(500, "An error was encountered when sending the email")
|
||||
|
||||
token_expires = (
|
||||
self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime
|
||||
self.hs.get_clock().time_msec()
|
||||
+ self.hs.config.email_validation_token_lifetime
|
||||
)
|
||||
|
||||
await self.store.start_or_continue_validation_session(
|
||||
|
|
|
@ -299,17 +299,22 @@ class PaginationHandler:
|
|||
"""
|
||||
return self._purges_by_id.get(purge_id)
|
||||
|
||||
async def purge_room(self, room_id: str) -> None:
|
||||
"""Purge the given room from the database"""
|
||||
async def purge_room(self, room_id: str, force: bool = False) -> None:
|
||||
"""Purge the given room from the database.
|
||||
|
||||
Args:
|
||||
room_id: room to be purged
|
||||
force: set true to skip checking for joined users.
|
||||
"""
|
||||
with await self.pagination_lock.write(room_id):
|
||||
# check we know about the room
|
||||
await self.store.get_room_version_id(room_id)
|
||||
|
||||
# first check that we have no users in this room
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
|
||||
if joined:
|
||||
raise SynapseError(400, "Users are still joined to this room")
|
||||
if not force:
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
if joined:
|
||||
raise SynapseError(400, "Users are still joined to this room")
|
||||
|
||||
await self.storage.purge_events.purge_room(room_id)
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ from synapse.api.errors import (
|
|||
from synapse.api.ratelimiting import Ratelimiter
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.storage.roommember import RoomsForUser
|
||||
from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_left_room
|
||||
|
@ -347,7 +346,15 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
# later on.
|
||||
content = dict(content)
|
||||
|
||||
if not self.allow_per_room_profiles or requester.shadow_banned:
|
||||
# allow the server notices mxid to set room-level profile
|
||||
is_requester_server_notices_user = (
|
||||
self._server_notices_mxid is not None
|
||||
and requester.user.to_string() == self._server_notices_mxid
|
||||
)
|
||||
|
||||
if (
|
||||
not self.allow_per_room_profiles and not is_requester_server_notices_user
|
||||
) or requester.shadow_banned:
|
||||
# Strip profile data, knowing that new profile data will be added to the
|
||||
# event's content in event_creation_handler.create_event() using the target's
|
||||
# global profile.
|
||||
|
@ -515,10 +522,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
elif effective_membership_state == Membership.LEAVE:
|
||||
if not is_host_in_room:
|
||||
# perhaps we've been invited
|
||||
invite = await self.store.get_invite_for_local_user_in_room(
|
||||
user_id=target.to_string(), room_id=room_id
|
||||
) # type: Optional[RoomsForUser]
|
||||
if not invite:
|
||||
(
|
||||
current_membership_type,
|
||||
current_membership_event_id,
|
||||
) = await self.store.get_local_current_membership_for_user_in_room(
|
||||
target.to_string(), room_id
|
||||
)
|
||||
if (
|
||||
current_membership_type != Membership.INVITE
|
||||
or not current_membership_event_id
|
||||
):
|
||||
logger.info(
|
||||
"%s sent a leave request to %s, but that is not an active room "
|
||||
"on this server, and there is no pending invite",
|
||||
|
@ -528,6 +541,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
|
||||
raise SynapseError(404, "Not a known room")
|
||||
|
||||
invite = await self.store.get_event(current_membership_event_id)
|
||||
logger.info(
|
||||
"%s rejects invite to %s from %s", target, room_id, invite.sender
|
||||
)
|
||||
|
|
|
@ -25,7 +25,7 @@ from io import BytesIO
|
|||
from typing import Any, Callable, Dict, Iterator, List, Tuple, Union
|
||||
|
||||
import jinja2
|
||||
from canonicaljson import iterencode_canonical_json, iterencode_pretty_printed_json
|
||||
from canonicaljson import iterencode_canonical_json
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import defer, interfaces
|
||||
|
@ -94,11 +94,7 @@ def return_json_error(f: failure.Failure, request: SynapseRequest) -> None:
|
|||
pass
|
||||
else:
|
||||
respond_with_json(
|
||||
request,
|
||||
error_code,
|
||||
error_dict,
|
||||
send_cors=True,
|
||||
pretty_print=_request_user_agent_is_curl(request),
|
||||
request, error_code, error_dict, send_cors=True,
|
||||
)
|
||||
|
||||
|
||||
|
@ -290,7 +286,6 @@ class DirectServeJsonResource(_AsyncResource):
|
|||
code,
|
||||
response_object,
|
||||
send_cors=True,
|
||||
pretty_print=_request_user_agent_is_curl(request),
|
||||
canonical_json=self.canonical_json,
|
||||
)
|
||||
|
||||
|
@ -587,7 +582,6 @@ def respond_with_json(
|
|||
code: int,
|
||||
json_object: Any,
|
||||
send_cors: bool = False,
|
||||
pretty_print: bool = False,
|
||||
canonical_json: bool = True,
|
||||
):
|
||||
"""Sends encoded JSON in response to the given request.
|
||||
|
@ -598,8 +592,6 @@ def respond_with_json(
|
|||
json_object: The object to serialize to JSON.
|
||||
send_cors: Whether to send Cross-Origin Resource Sharing headers
|
||||
https://fetch.spec.whatwg.org/#http-cors-protocol
|
||||
pretty_print: Whether to include indentation and line-breaks in the
|
||||
resulting JSON bytes.
|
||||
canonical_json: Whether to use the canonicaljson algorithm when encoding
|
||||
the JSON bytes.
|
||||
|
||||
|
@ -615,13 +607,10 @@ def respond_with_json(
|
|||
)
|
||||
return None
|
||||
|
||||
if pretty_print:
|
||||
encoder = iterencode_pretty_printed_json
|
||||
if canonical_json:
|
||||
encoder = iterencode_canonical_json
|
||||
else:
|
||||
if canonical_json:
|
||||
encoder = iterencode_canonical_json
|
||||
else:
|
||||
encoder = _encode_json_bytes
|
||||
encoder = _encode_json_bytes
|
||||
|
||||
request.setResponseCode(code)
|
||||
request.setHeader(b"Content-Type", b"application/json")
|
||||
|
@ -759,11 +748,3 @@ def finish_request(request: Request):
|
|||
request.finish()
|
||||
except RuntimeError as e:
|
||||
logger.info("Connection disconnected before response was written: %r", e)
|
||||
|
||||
|
||||
def _request_user_agent_is_curl(request: Request) -> bool:
|
||||
user_agents = request.requestHeaders.getRawHeaders(b"User-Agent", default=[])
|
||||
for user_agent in user_agents:
|
||||
if b"curl" in user_agent:
|
||||
return True
|
||||
return False
|
||||
|
|
|
@ -75,6 +75,7 @@ class HttpPusher:
|
|||
self.failing_since = pusherdict["failing_since"]
|
||||
self.timed_call = None
|
||||
self._is_processing = False
|
||||
self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
|
||||
|
||||
# This is the highest stream ordering we know it's safe to process.
|
||||
# When new events arrive, we'll be given a window of new events: we
|
||||
|
@ -136,7 +137,11 @@ class HttpPusher:
|
|||
async def _update_badge(self):
|
||||
# XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems
|
||||
# to be largely redundant. perhaps we can remove it.
|
||||
badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
|
||||
badge = await push_tools.get_badge_count(
|
||||
self.hs.get_datastore(),
|
||||
self.user_id,
|
||||
group_by_room=self._group_unread_count_by_room,
|
||||
)
|
||||
await self._send_badge(badge)
|
||||
|
||||
def on_timer(self):
|
||||
|
@ -283,7 +288,11 @@ class HttpPusher:
|
|||
return True
|
||||
|
||||
tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"])
|
||||
badge = await push_tools.get_badge_count(self.hs.get_datastore(), self.user_id)
|
||||
badge = await push_tools.get_badge_count(
|
||||
self.hs.get_datastore(),
|
||||
self.user_id,
|
||||
group_by_room=self._group_unread_count_by_room,
|
||||
)
|
||||
|
||||
event = await self.store.get_event(push_action["event_id"], allow_none=True)
|
||||
if event is None:
|
||||
|
|
|
@ -12,12 +12,12 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage import Storage
|
||||
from synapse.storage.databases.main import DataStore
|
||||
|
||||
|
||||
async def get_badge_count(store, user_id):
|
||||
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
|
||||
invites = await store.get_invited_rooms_for_local_user(user_id)
|
||||
joins = await store.get_rooms_for_user(user_id)
|
||||
|
||||
|
@ -34,9 +34,15 @@ async def get_badge_count(store, user_id):
|
|||
room_id, user_id, last_unread_event_id
|
||||
)
|
||||
)
|
||||
# return one badge count per conversation, as count per
|
||||
# message is so noisy as to be almost useless
|
||||
badge += 1 if notifs["notify_count"] else 0
|
||||
if notifs["notify_count"] == 0:
|
||||
continue
|
||||
|
||||
if group_by_room:
|
||||
# return one badge count per conversation
|
||||
badge += 1
|
||||
else:
|
||||
# increment the badge count by the number of unread messages in the room
|
||||
badge += notifs["notify_count"]
|
||||
return badge
|
||||
|
||||
|
||||
|
|
|
@ -12,9 +12,10 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from twisted.web.http import Request
|
||||
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.replication.http._base import ReplicationEndpoint
|
||||
|
@ -52,16 +53,23 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||
self.clock = hs.get_clock()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload(
|
||||
requester, room_id, user_id, remote_room_hosts, content
|
||||
):
|
||||
async def _serialize_payload( # type: ignore
|
||||
requester: Requester,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
remote_room_hosts: List[str],
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
requester(Requester)
|
||||
room_id (str)
|
||||
user_id (str)
|
||||
remote_room_hosts (list[str]): Servers to try and join via
|
||||
content(dict): The event content to use for the join event
|
||||
requester: The user making the request according to the access token
|
||||
room_id: The ID of the room.
|
||||
user_id: The ID of the user.
|
||||
remote_room_hosts: Servers to try and join via
|
||||
content: The event content to use for the join event
|
||||
|
||||
Returns:
|
||||
A dict representing the payload of the request.
|
||||
"""
|
||||
return {
|
||||
"requester": requester.serialize(),
|
||||
|
@ -69,7 +77,9 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
|
|||
"content": content,
|
||||
}
|
||||
|
||||
async def _handle_request(self, request, room_id, user_id):
|
||||
async def _handle_request( # type: ignore
|
||||
self, request: Request, room_id: str, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
remote_room_hosts = content["remote_room_hosts"]
|
||||
|
@ -118,14 +128,17 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||
txn_id: Optional[str],
|
||||
requester: Requester,
|
||||
content: JsonDict,
|
||||
):
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
invite_event_id: ID of the invite to be rejected
|
||||
txn_id: optional transaction ID supplied by the client
|
||||
requester: user making the rejection request, according to the access token
|
||||
content: additional content to include in the rejection event.
|
||||
invite_event_id: The ID of the invite to be rejected.
|
||||
txn_id: Optional transaction ID supplied by the client
|
||||
requester: User making the rejection request, according to the access token
|
||||
content: Additional content to include in the rejection event.
|
||||
Normally an empty dict.
|
||||
|
||||
Returns:
|
||||
A dict representing the payload of the request.
|
||||
"""
|
||||
return {
|
||||
"txn_id": txn_id,
|
||||
|
@ -133,7 +146,9 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||
"content": content,
|
||||
}
|
||||
|
||||
async def _handle_request(self, request, invite_event_id):
|
||||
async def _handle_request( # type: ignore
|
||||
self, request: Request, invite_event_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
txn_id = content["txn_id"]
|
||||
|
@ -174,18 +189,25 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
|||
self.distributor = hs.get_distributor()
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload(room_id, user_id, change):
|
||||
async def _serialize_payload( # type: ignore
|
||||
room_id: str, user_id: str, change: str
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
room_id (str)
|
||||
user_id (str)
|
||||
change (str): "left"
|
||||
room_id: The ID of the room.
|
||||
user_id: The ID of the user.
|
||||
change: "left"
|
||||
|
||||
Returns:
|
||||
A dict representing the payload of the request.
|
||||
"""
|
||||
assert change == "left"
|
||||
|
||||
return {}
|
||||
|
||||
def _handle_request(self, request, room_id, user_id, change):
|
||||
def _handle_request( # type: ignore
|
||||
self, request: Request, room_id: str, user_id: str, change: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
logger.info("user membership change: %s in %s", user_id, room_id)
|
||||
|
||||
user = UserID.from_string(user_id)
|
||||
|
|
|
@ -21,11 +21,7 @@ import synapse
|
|||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.rest.admin._base import (
|
||||
admin_patterns,
|
||||
assert_requester_is_admin,
|
||||
historical_admin_path_patterns,
|
||||
)
|
||||
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
|
||||
from synapse.rest.admin.devices import (
|
||||
DeleteDevicesRestServlet,
|
||||
DeviceRestServlet,
|
||||
|
@ -84,7 +80,7 @@ class VersionServlet(RestServlet):
|
|||
|
||||
|
||||
class PurgeHistoryRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns(
|
||||
PATTERNS = admin_patterns(
|
||||
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
|
||||
)
|
||||
|
||||
|
@ -169,9 +165,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
|||
|
||||
|
||||
class PurgeHistoryStatusRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns(
|
||||
"/purge_history_status/(?P<purge_id>[^/]+)"
|
||||
)
|
||||
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
|
|
@ -22,28 +22,6 @@ from synapse.api.errors import AuthError
|
|||
from synapse.types import UserID
|
||||
|
||||
|
||||
def historical_admin_path_patterns(path_regex):
|
||||
"""Returns the list of patterns for an admin endpoint, including historical ones
|
||||
|
||||
This is a backwards-compatibility hack. Previously, the Admin API was exposed at
|
||||
various paths under /_matrix/client. This function returns a list of patterns
|
||||
matching those paths (as well as the new one), so that existing scripts which rely
|
||||
on the endpoints being available there are not broken.
|
||||
|
||||
Note that this should only be used for existing endpoints: new ones should just
|
||||
register for the /_synapse/admin path.
|
||||
"""
|
||||
return [
|
||||
re.compile(prefix + path_regex)
|
||||
for prefix in (
|
||||
"^/_synapse/admin/v1",
|
||||
"^/_matrix/client/api/v1/admin",
|
||||
"^/_matrix/client/unstable/admin",
|
||||
"^/_matrix/client/r0/admin",
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def admin_patterns(path_regex: str, version: str = "v1"):
|
||||
"""Returns the list of patterns for an admin endpoint
|
||||
|
||||
|
|
|
@ -16,10 +16,7 @@ import logging
|
|||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.http.servlet import RestServlet
|
||||
from synapse.rest.admin._base import (
|
||||
assert_user_is_admin,
|
||||
historical_admin_path_patterns,
|
||||
)
|
||||
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -28,7 +25,7 @@ class DeleteGroupAdminRestServlet(RestServlet):
|
|||
"""Allows deleting of local groups
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns("/delete_group/(?P<group_id>[^/]*)")
|
||||
PATTERNS = admin_patterns("/delete_group/(?P<group_id>[^/]*)")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.group_server = hs.get_groups_server_handler()
|
||||
|
|
|
@ -22,7 +22,6 @@ from synapse.rest.admin._base import (
|
|||
admin_patterns,
|
||||
assert_requester_is_admin,
|
||||
assert_user_is_admin,
|
||||
historical_admin_path_patterns,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -34,10 +33,10 @@ class QuarantineMediaInRoom(RestServlet):
|
|||
"""
|
||||
|
||||
PATTERNS = (
|
||||
historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media/quarantine")
|
||||
admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine")
|
||||
+
|
||||
# This path kept around for legacy reasons
|
||||
historical_admin_path_patterns("/quarantine_media/(?P<room_id>[^/]+)")
|
||||
admin_patterns("/quarantine_media/(?P<room_id>[^/]+)")
|
||||
)
|
||||
|
||||
def __init__(self, hs):
|
||||
|
@ -63,9 +62,7 @@ class QuarantineMediaByUser(RestServlet):
|
|||
this server.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns(
|
||||
"/user/(?P<user_id>[^/]+)/media/quarantine"
|
||||
)
|
||||
PATTERNS = admin_patterns("/user/(?P<user_id>[^/]+)/media/quarantine")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -90,7 +87,7 @@ class QuarantineMediaByID(RestServlet):
|
|||
it via this server.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns(
|
||||
PATTERNS = admin_patterns(
|
||||
"/media/quarantine/(?P<server_name>[^/]+)/(?P<media_id>[^/]+)"
|
||||
)
|
||||
|
||||
|
@ -116,7 +113,7 @@ class ListMediaInRoom(RestServlet):
|
|||
"""Lists all of the media in a given room.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns("/room/(?P<room_id>[^/]+)/media")
|
||||
PATTERNS = admin_patterns("/room/(?P<room_id>[^/]+)/media")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -134,7 +131,7 @@ class ListMediaInRoom(RestServlet):
|
|||
|
||||
|
||||
class PurgeMediaCacheRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns("/purge_media_cache")
|
||||
PATTERNS = admin_patterns("/purge_media_cache")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.media_repository = hs.get_media_repository()
|
||||
|
|
|
@ -29,7 +29,6 @@ from synapse.rest.admin._base import (
|
|||
admin_patterns,
|
||||
assert_requester_is_admin,
|
||||
assert_user_is_admin,
|
||||
historical_admin_path_patterns,
|
||||
)
|
||||
from synapse.storage.databases.main.room import RoomSortOrder
|
||||
from synapse.types import RoomAlias, RoomID, UserID, create_requester
|
||||
|
@ -44,7 +43,7 @@ class ShutdownRoomRestServlet(RestServlet):
|
|||
joined to the new room.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P<room_id>[^/]+)")
|
||||
PATTERNS = admin_patterns("/shutdown_room/(?P<room_id>[^/]+)")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
@ -71,14 +70,18 @@ class ShutdownRoomRestServlet(RestServlet):
|
|||
|
||||
|
||||
class DeleteRoomRestServlet(RestServlet):
|
||||
"""Delete a room from server. It is a combination and improvement of
|
||||
shut down and purge room.
|
||||
"""Delete a room from server.
|
||||
|
||||
It is a combination and improvement of shutdown and purge room.
|
||||
|
||||
Shuts down a room by removing all local users from the room.
|
||||
Blocking all future invites and joins to the room is optional.
|
||||
|
||||
If desired any local aliases will be repointed to a new room
|
||||
created by `new_room_user_id` and kicked users will be auto
|
||||
created by `new_room_user_id` and kicked users will be auto-
|
||||
joined to the new room.
|
||||
It will remove all trace of a room from the database.
|
||||
|
||||
If 'purge' is true, it will remove all traces of a room from the database.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]+)/delete$")
|
||||
|
@ -111,6 +114,14 @@ class DeleteRoomRestServlet(RestServlet):
|
|||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
force_purge = content.get("force_purge", False)
|
||||
if not isinstance(force_purge, bool):
|
||||
raise SynapseError(
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
"Param 'force_purge' must be a boolean, if given",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
ret = await self.room_shutdown_handler.shutdown_room(
|
||||
room_id=room_id,
|
||||
new_room_user_id=content.get("new_room_user_id"),
|
||||
|
@ -122,7 +133,7 @@ class DeleteRoomRestServlet(RestServlet):
|
|||
|
||||
# Purge room
|
||||
if purge:
|
||||
await self.pagination_handler.purge_room(room_id)
|
||||
await self.pagination_handler.purge_room(room_id, force=force_purge)
|
||||
|
||||
return (200, ret)
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ from synapse.rest.admin._base import (
|
|||
admin_patterns,
|
||||
assert_requester_is_admin,
|
||||
assert_user_is_admin,
|
||||
historical_admin_path_patterns,
|
||||
)
|
||||
from synapse.rest.client.v2_alpha._base import client_patterns
|
||||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -55,7 +55,7 @@ _GET_PUSHERS_ALLOWED_KEYS = {
|
|||
|
||||
|
||||
class UsersRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
|
||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)$")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
@ -338,7 +338,7 @@ class UserRegisterServlet(RestServlet):
|
|||
nonce to the time it was generated, in int seconds.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns("/register")
|
||||
PATTERNS = admin_patterns("/register")
|
||||
NONCE_TIMEOUT = 60
|
||||
|
||||
def __init__(self, hs):
|
||||
|
@ -461,7 +461,14 @@ class UserRegisterServlet(RestServlet):
|
|||
|
||||
|
||||
class WhoisRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
|
||||
path_regex = "/whois/(?P<user_id>[^/]*)$"
|
||||
PATTERNS = (
|
||||
admin_patterns(path_regex)
|
||||
+
|
||||
# URL for spec reason
|
||||
# https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid
|
||||
client_patterns("/admin" + path_regex, v1=True)
|
||||
)
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
@ -485,7 +492,7 @@ class WhoisRestServlet(RestServlet):
|
|||
|
||||
|
||||
class DeactivateAccountRestServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
|
||||
PATTERNS = admin_patterns("/deactivate/(?P<target_user_id>[^/]*)")
|
||||
|
||||
def __init__(self, hs):
|
||||
self._deactivate_account_handler = hs.get_deactivate_account_handler()
|
||||
|
@ -516,7 +523,7 @@ class DeactivateAccountRestServlet(RestServlet):
|
|||
|
||||
|
||||
class AccountValidityRenewServlet(RestServlet):
|
||||
PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
|
||||
PATTERNS = admin_patterns("/account_validity/validity$")
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
@ -559,9 +566,7 @@ class ResetPasswordRestServlet(RestServlet):
|
|||
200 OK with empty object if success otherwise an error.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns(
|
||||
"/reset_password/(?P<target_user_id>[^/]*)"
|
||||
)
|
||||
PATTERNS = admin_patterns("/reset_password/(?P<target_user_id>[^/]*)")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
@ -603,7 +608,7 @@ class SearchUsersRestServlet(RestServlet):
|
|||
200 OK with json object {list[dict[str, Any]], count} or empty object.
|
||||
"""
|
||||
|
||||
PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
|
||||
PATTERNS = admin_patterns("/search_users/(?P<target_user_id>[^/]*)")
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
|
|
@ -154,13 +154,28 @@ class LoginRestServlet(RestServlet):
|
|||
async def _do_appservice_login(
|
||||
self, login_submission: JsonDict, appservice: ApplicationService
|
||||
):
|
||||
logger.info(
|
||||
"Got appservice login request with identifier: %r",
|
||||
login_submission.get("identifier"),
|
||||
)
|
||||
identifier = login_submission.get("identifier")
|
||||
logger.info("Got appservice login request with identifier: %r", identifier)
|
||||
|
||||
identifier = convert_client_dict_legacy_fields_to_identifier(login_submission)
|
||||
qualified_user_id = self._get_qualified_user_id(identifier)
|
||||
if not isinstance(identifier, dict):
|
||||
raise SynapseError(
|
||||
400, "Invalid identifier in login submission", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
# this login flow only supports identifiers of type "m.id.user".
|
||||
if identifier.get("type") != "m.id.user":
|
||||
raise SynapseError(
|
||||
400, "Unknown login identifier type", Codes.INVALID_PARAM
|
||||
)
|
||||
|
||||
user = identifier.get("user")
|
||||
if not isinstance(user, str):
|
||||
raise SynapseError(400, "Invalid user in identifier", Codes.INVALID_PARAM)
|
||||
|
||||
if user.startswith("@"):
|
||||
qualified_user_id = user
|
||||
else:
|
||||
qualified_user_id = UserID(user, self.hs.hostname).to_string()
|
||||
|
||||
if not appservice.is_interested_in_user(qualified_user_id):
|
||||
raise LoginError(403, "Invalid access_token", errcode=Codes.FORBIDDEN)
|
||||
|
|
|
@ -115,7 +115,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
|||
# comments for request_token_inhibit_3pid_errors.
|
||||
# Also wait for some random amount of time between 100ms and 1s to make it
|
||||
# look like we did something.
|
||||
await self.hs.clock.sleep(random.randint(1, 10) / 10)
|
||||
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
|
||||
return 200, {"sid": random_string(16)}
|
||||
|
||||
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
|
||||
|
@ -387,7 +387,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
|
|||
# comments for request_token_inhibit_3pid_errors.
|
||||
# Also wait for some random amount of time between 100ms and 1s to make it
|
||||
# look like we did something.
|
||||
await self.hs.clock.sleep(random.randint(1, 10) / 10)
|
||||
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
|
||||
return 200, {"sid": random_string(16)}
|
||||
|
||||
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
|
||||
|
@ -466,7 +466,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
|||
# comments for request_token_inhibit_3pid_errors.
|
||||
# Also wait for some random amount of time between 100ms and 1s to make it
|
||||
# look like we did something.
|
||||
await self.hs.clock.sleep(random.randint(1, 10) / 10)
|
||||
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
|
||||
return 200, {"sid": random_string(16)}
|
||||
|
||||
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
|
||||
|
|
|
@ -135,7 +135,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
|
|||
# comments for request_token_inhibit_3pid_errors.
|
||||
# Also wait for some random amount of time between 100ms and 1s to make it
|
||||
# look like we did something.
|
||||
await self.hs.clock.sleep(random.randint(1, 10) / 10)
|
||||
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
|
||||
return 200, {"sid": random_string(16)}
|
||||
|
||||
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
|
||||
|
@ -214,7 +214,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
|
|||
# comments for request_token_inhibit_3pid_errors.
|
||||
# Also wait for some random amount of time between 100ms and 1s to make it
|
||||
# look like we did something.
|
||||
await self.hs.clock.sleep(random.randint(1, 10) / 10)
|
||||
await self.hs.get_clock().sleep(random.randint(1, 10) / 10)
|
||||
return 200, {"sid": random_string(16)}
|
||||
|
||||
raise SynapseError(
|
||||
|
|
|
@ -66,7 +66,7 @@ class LocalKey(Resource):
|
|||
|
||||
def __init__(self, hs):
|
||||
self.config = hs.config
|
||||
self.clock = hs.clock
|
||||
self.clock = hs.get_clock()
|
||||
self.update_response_body(self.clock.time_msec())
|
||||
Resource.__init__(self)
|
||||
|
||||
|
|
|
@ -147,7 +147,8 @@ def cache_in_self(builder: T) -> T:
|
|||
"@cache_in_self can only be used on functions starting with `get_`"
|
||||
)
|
||||
|
||||
depname = builder.__name__[len("get_") :]
|
||||
# get_attr -> _attr
|
||||
depname = builder.__name__[len("get") :]
|
||||
|
||||
building = [False]
|
||||
|
||||
|
@ -235,15 +236,6 @@ class HomeServer(metaclass=abc.ABCMeta):
|
|||
self._instance_id = random_string(5)
|
||||
self._instance_name = config.worker_name or "master"
|
||||
|
||||
self.clock = Clock(reactor)
|
||||
self.distributor = Distributor()
|
||||
|
||||
self.registration_ratelimiter = Ratelimiter(
|
||||
clock=self.clock,
|
||||
rate_hz=config.rc_registration.per_second,
|
||||
burst_count=config.rc_registration.burst_count,
|
||||
)
|
||||
|
||||
self.version_string = version_string
|
||||
|
||||
self.datastores = None # type: Optional[Databases]
|
||||
|
@ -301,8 +293,9 @@ class HomeServer(metaclass=abc.ABCMeta):
|
|||
def is_mine_id(self, string: str) -> bool:
|
||||
return string.split(":", 1)[1] == self.hostname
|
||||
|
||||
@cache_in_self
|
||||
def get_clock(self) -> Clock:
|
||||
return self.clock
|
||||
return Clock(self._reactor)
|
||||
|
||||
def get_datastore(self) -> DataStore:
|
||||
if not self.datastores:
|
||||
|
@ -319,11 +312,17 @@ class HomeServer(metaclass=abc.ABCMeta):
|
|||
def get_config(self) -> HomeServerConfig:
|
||||
return self.config
|
||||
|
||||
@cache_in_self
|
||||
def get_distributor(self) -> Distributor:
|
||||
return self.distributor
|
||||
return Distributor()
|
||||
|
||||
@cache_in_self
|
||||
def get_registration_ratelimiter(self) -> Ratelimiter:
|
||||
return self.registration_ratelimiter
|
||||
return Ratelimiter(
|
||||
clock=self.get_clock(),
|
||||
rate_hz=self.config.rc_registration.per_second,
|
||||
burst_count=self.config.rc_registration.burst_count,
|
||||
)
|
||||
|
||||
@cache_in_self
|
||||
def get_federation_client(self) -> FederationClient:
|
||||
|
@ -709,7 +708,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
|||
|
||||
@cache_in_self
|
||||
def get_federation_ratelimiter(self) -> FederationRateLimiter:
|
||||
return FederationRateLimiter(self.clock, config=self.config.rc_federation)
|
||||
return FederationRateLimiter(self.get_clock(), config=self.config.rc_federation)
|
||||
|
||||
@cache_in_self
|
||||
def get_module_api(self) -> ModuleApi:
|
||||
|
|
|
@ -314,6 +314,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
|||
for table in (
|
||||
"event_auth",
|
||||
"event_edges",
|
||||
"event_json",
|
||||
"event_push_actions_staging",
|
||||
"event_reference_hashes",
|
||||
"event_relations",
|
||||
|
@ -340,7 +341,6 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
|
|||
"destination_rooms",
|
||||
"event_backward_extremities",
|
||||
"event_forward_extremities",
|
||||
"event_json",
|
||||
"event_push_actions",
|
||||
"event_search",
|
||||
"events",
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set
|
||||
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.events import EventBase
|
||||
|
@ -350,6 +350,38 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
|||
|
||||
return results
|
||||
|
||||
async def get_local_current_membership_for_user_in_room(
|
||||
self, user_id: str, room_id: str
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
"""Retrieve the current local membership state and event ID for a user in a room.
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
room_id: The ID of the room.
|
||||
|
||||
Returns:
|
||||
A tuple of (membership_type, event_id). Both will be None if a
|
||||
room_id/user_id pair is not found.
|
||||
"""
|
||||
# Paranoia check.
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
raise Exception(
|
||||
"Cannot call 'get_local_current_membership_for_user_in_room' on "
|
||||
"non-local user %s" % (user_id,),
|
||||
)
|
||||
|
||||
results_dict = await self.db_pool.simple_select_one(
|
||||
"local_current_membership",
|
||||
{"room_id": room_id, "user_id": user_id},
|
||||
("membership", "event_id"),
|
||||
allow_none=True,
|
||||
desc="get_local_current_membership_for_user_in_room",
|
||||
)
|
||||
if not results_dict:
|
||||
return None, None
|
||||
|
||||
return results_dict.get("membership"), results_dict.get("event_id")
|
||||
|
||||
@cached(max_entries=500000, iterable=True)
|
||||
async def get_rooms_for_user_with_stream_ordering(
|
||||
self, user_id: str
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
/* Copyright 2020 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
-- this index is essentially redundant. The only time it was ever used was when purging
|
||||
-- rooms - and Synapse 1.24 will change that.
|
||||
|
||||
DROP INDEX IF EXISTS event_json_room_id;
|
|
@ -52,7 +52,7 @@ class AuthTestCase(unittest.TestCase):
|
|||
self.fail("some_user was not in %s" % macaroon.inspect())
|
||||
|
||||
def test_macaroon_caveats(self):
|
||||
self.hs.clock.now = 5000
|
||||
self.hs.get_clock().now = 5000
|
||||
|
||||
token = self.macaroon_generator.generate_access_token("a_user")
|
||||
macaroon = pymacaroons.Macaroon.deserialize(token)
|
||||
|
@ -78,7 +78,7 @@ class AuthTestCase(unittest.TestCase):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def test_short_term_login_token_gives_user_id(self):
|
||||
self.hs.clock.now = 1000
|
||||
self.hs.get_clock().now = 1000
|
||||
|
||||
token = self.macaroon_generator.generate_short_term_login_token("a_user", 5000)
|
||||
user_id = yield defer.ensureDeferred(
|
||||
|
@ -87,7 +87,7 @@ class AuthTestCase(unittest.TestCase):
|
|||
self.assertEqual("a_user", user_id)
|
||||
|
||||
# when we advance the clock, the token should be rejected
|
||||
self.hs.clock.now = 6000
|
||||
self.hs.get_clock().now = 6000
|
||||
with self.assertRaises(synapse.api.errors.AuthError):
|
||||
yield defer.ensureDeferred(
|
||||
self.auth_handler.validate_short_term_login_token_and_get_user_id(token)
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
|
@ -20,8 +19,9 @@ from twisted.internet.defer import Deferred
|
|||
import synapse.rest.admin
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.rest.client.v1 import login, room
|
||||
from synapse.rest.client.v2_alpha import receipts
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
from tests.unittest import HomeserverTestCase, override_config
|
||||
|
||||
|
||||
class HTTPPusherTests(HomeserverTestCase):
|
||||
|
@ -29,6 +29,7 @@ class HTTPPusherTests(HomeserverTestCase):
|
|||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
room.register_servlets,
|
||||
login.register_servlets,
|
||||
receipts.register_servlets,
|
||||
]
|
||||
user_id = True
|
||||
hijack_auth = False
|
||||
|
@ -501,3 +502,161 @@ class HTTPPusherTests(HomeserverTestCase):
|
|||
|
||||
# check that this is low-priority
|
||||
self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low")
|
||||
|
||||
def test_push_unread_count_group_by_room(self):
|
||||
"""
|
||||
The HTTP pusher will group unread count by number of unread rooms.
|
||||
"""
|
||||
# Carry out common push count tests and setup
|
||||
self._test_push_unread_count()
|
||||
|
||||
# Carry out our option-value specific test
|
||||
#
|
||||
# This push should still only contain an unread count of 1 (for 1 unread room)
|
||||
self.assertEqual(
|
||||
self.push_attempts[5][2]["notification"]["counts"]["unread"], 1
|
||||
)
|
||||
|
||||
@override_config({"push": {"group_unread_count_by_room": False}})
|
||||
def test_push_unread_count_message_count(self):
|
||||
"""
|
||||
The HTTP pusher will send the total unread message count.
|
||||
"""
|
||||
# Carry out common push count tests and setup
|
||||
self._test_push_unread_count()
|
||||
|
||||
# Carry out our option-value specific test
|
||||
#
|
||||
# We're counting every unread message, so there should now be 4 since the
|
||||
# last read receipt
|
||||
self.assertEqual(
|
||||
self.push_attempts[5][2]["notification"]["counts"]["unread"], 4
|
||||
)
|
||||
|
||||
def _test_push_unread_count(self):
|
||||
"""
|
||||
Tests that the correct unread count appears in sent push notifications
|
||||
|
||||
Note that:
|
||||
* Sending messages will cause push notifications to go out to relevant users
|
||||
* Sending a read receipt will cause a "badge update" notification to go out to
|
||||
the user that sent the receipt
|
||||
"""
|
||||
# Register the user who gets notified
|
||||
user_id = self.register_user("user", "pass")
|
||||
access_token = self.login("user", "pass")
|
||||
|
||||
# Register the user who sends the message
|
||||
other_user_id = self.register_user("other_user", "pass")
|
||||
other_access_token = self.login("other_user", "pass")
|
||||
|
||||
# Create a room (as other_user)
|
||||
room_id = self.helper.create_room_as(other_user_id, tok=other_access_token)
|
||||
|
||||
# The user to get notified joins
|
||||
self.helper.join(room=room_id, user=user_id, tok=access_token)
|
||||
|
||||
# Register the pusher
|
||||
user_tuple = self.get_success(
|
||||
self.hs.get_datastore().get_user_by_access_token(access_token)
|
||||
)
|
||||
token_id = user_tuple.token_id
|
||||
|
||||
self.get_success(
|
||||
self.hs.get_pusherpool().add_pusher(
|
||||
user_id=user_id,
|
||||
access_token=token_id,
|
||||
kind="http",
|
||||
app_id="m.http",
|
||||
app_display_name="HTTP Push Notifications",
|
||||
device_display_name="pushy push",
|
||||
pushkey="a@example.com",
|
||||
lang=None,
|
||||
data={"url": "example.com"},
|
||||
)
|
||||
)
|
||||
|
||||
# Send a message
|
||||
response = self.helper.send(
|
||||
room_id, body="Hello there!", tok=other_access_token
|
||||
)
|
||||
# To get an unread count, the user who is getting notified has to have a read
|
||||
# position in the room. We'll set the read position to this event in a moment
|
||||
first_message_event_id = response["event_id"]
|
||||
|
||||
# Advance time a bit (so the pusher will register something has happened) and
|
||||
# make the push succeed
|
||||
self.push_attempts[0][0].callback({})
|
||||
self.pump()
|
||||
|
||||
# Check our push made it
|
||||
self.assertEqual(len(self.push_attempts), 1)
|
||||
self.assertEqual(self.push_attempts[0][1], "example.com")
|
||||
|
||||
# Check that the unread count for the room is 0
|
||||
#
|
||||
# The unread count is zero as the user has no read receipt in the room yet
|
||||
self.assertEqual(
|
||||
self.push_attempts[0][2]["notification"]["counts"]["unread"], 0
|
||||
)
|
||||
|
||||
# Now set the user's read receipt position to the first event
|
||||
#
|
||||
# This will actually trigger a new notification to be sent out so that
|
||||
# even if the user does not receive another message, their unread
|
||||
# count goes down
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
"/rooms/%s/receipt/m.read/%s" % (room_id, first_message_event_id),
|
||||
{},
|
||||
access_token=access_token,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Advance time and make the push succeed
|
||||
self.push_attempts[1][0].callback({})
|
||||
self.pump()
|
||||
|
||||
# Unread count is still zero as we've read the only message in the room
|
||||
self.assertEqual(len(self.push_attempts), 2)
|
||||
self.assertEqual(
|
||||
self.push_attempts[1][2]["notification"]["counts"]["unread"], 0
|
||||
)
|
||||
|
||||
# Send another message
|
||||
self.helper.send(
|
||||
room_id, body="How's the weather today?", tok=other_access_token
|
||||
)
|
||||
|
||||
# Advance time and make the push succeed
|
||||
self.push_attempts[2][0].callback({})
|
||||
self.pump()
|
||||
|
||||
# This push should contain an unread count of 1 as there's now been one
|
||||
# message since our last read receipt
|
||||
self.assertEqual(len(self.push_attempts), 3)
|
||||
self.assertEqual(
|
||||
self.push_attempts[2][2]["notification"]["counts"]["unread"], 1
|
||||
)
|
||||
|
||||
# Since we're grouping by room, sending more messages shouldn't increase the
|
||||
# unread count, as they're all being sent in the same room
|
||||
self.helper.send(room_id, body="Hello?", tok=other_access_token)
|
||||
|
||||
# Advance time and make the push succeed
|
||||
self.pump()
|
||||
self.push_attempts[3][0].callback({})
|
||||
|
||||
self.helper.send(room_id, body="Hello??", tok=other_access_token)
|
||||
|
||||
# Advance time and make the push succeed
|
||||
self.pump()
|
||||
self.push_attempts[4][0].callback({})
|
||||
|
||||
self.helper.send(room_id, body="HELLO???", tok=other_access_token)
|
||||
|
||||
# Advance time and make the push succeed
|
||||
self.pump()
|
||||
self.push_attempts[5][0].callback({})
|
||||
|
||||
self.assertEqual(len(self.push_attempts), 6)
|
||||
|
|
|
@ -78,7 +78,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
|
|||
self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool
|
||||
|
||||
self.test_handler = self._build_replication_data_handler()
|
||||
self.worker_hs.replication_data_handler = self.test_handler
|
||||
self.worker_hs._replication_data_handler = self.test_handler
|
||||
|
||||
repl_handler = ReplicationCommandHandler(self.worker_hs)
|
||||
self.client = ClientReplicationStreamProtocol(
|
||||
|
|
|
@ -100,7 +100,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
|
|||
self.assertIn(group_id, self._get_groups_user_is_in(self.other_user_token))
|
||||
|
||||
# Now delete the group
|
||||
url = "/admin/delete_group/" + group_id
|
||||
url = "/_synapse/admin/v1/delete_group/" + group_id
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
url.encode("ascii"),
|
||||
|
|
|
@ -78,7 +78,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
# Test that the admin can still send shutdown
|
||||
url = "admin/shutdown_room/" + room_id
|
||||
url = "/_synapse/admin/v1/shutdown_room/" + room_id
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
url.encode("ascii"),
|
||||
|
@ -112,7 +112,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
|
||||
|
||||
# Test that the admin can still send shutdown
|
||||
url = "admin/shutdown_room/" + room_id
|
||||
url = "/_synapse/admin/v1/shutdown_room/" + room_id
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
url.encode("ascii"),
|
||||
|
|
|
@ -41,7 +41,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
|
||||
self.url = "/_matrix/client/r0/admin/register"
|
||||
self.url = "/_synapse/admin/v1/register"
|
||||
|
||||
self.registration_handler = Mock()
|
||||
self.identity_handler = Mock()
|
||||
|
@ -1768,3 +1768,111 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase):
|
|||
# though the MAU limit would stop the user doing so.
|
||||
puppet_token = self._get_token()
|
||||
self.helper.join(room_id, user=self.other_user, tok=puppet_token)
|
||||
|
||||
|
||||
class WhoisRestTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
self.admin_user = self.register_user("admin", "pass", admin=True)
|
||||
self.admin_user_tok = self.login("admin", "pass")
|
||||
|
||||
self.other_user = self.register_user("user", "pass")
|
||||
self.url1 = "/_synapse/admin/v1/whois/%s" % urllib.parse.quote(self.other_user)
|
||||
self.url2 = "/_matrix/client/r0/admin/whois/%s" % urllib.parse.quote(
|
||||
self.other_user
|
||||
)
|
||||
|
||||
def test_no_auth(self):
|
||||
"""
|
||||
Try to get information of an user without authentication.
|
||||
"""
|
||||
request, channel = self.make_request("GET", self.url1, b"{}")
|
||||
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
|
||||
|
||||
request, channel = self.make_request("GET", self.url2, b"{}")
|
||||
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
|
||||
|
||||
def test_requester_is_not_admin(self):
|
||||
"""
|
||||
If the user is not a server admin, an error is returned.
|
||||
"""
|
||||
self.register_user("user2", "pass")
|
||||
other_user2_token = self.login("user2", "pass")
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url1, access_token=other_user2_token,
|
||||
)
|
||||
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url2, access_token=other_user2_token,
|
||||
)
|
||||
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
|
||||
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
|
||||
|
||||
def test_user_is_not_local(self):
|
||||
"""
|
||||
Tests that a lookup for a user that is not a local returns a 400
|
||||
"""
|
||||
url1 = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain"
|
||||
url2 = "/_matrix/client/r0/admin/whois/@unknown_person:unknown_domain"
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", url1, access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(400, channel.code, msg=channel.json_body)
|
||||
self.assertEqual("Can only whois a local user", channel.json_body["error"])
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", url2, access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(400, channel.code, msg=channel.json_body)
|
||||
self.assertEqual("Can only whois a local user", channel.json_body["error"])
|
||||
|
||||
def test_get_whois_admin(self):
|
||||
"""
|
||||
The lookup should succeed for an admin.
|
||||
"""
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url1, access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(self.other_user, channel.json_body["user_id"])
|
||||
self.assertIn("devices", channel.json_body)
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url2, access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(self.other_user, channel.json_body["user_id"])
|
||||
self.assertIn("devices", channel.json_body)
|
||||
|
||||
def test_get_whois_user(self):
|
||||
"""
|
||||
The lookup should succeed for a normal user looking up their own information.
|
||||
"""
|
||||
other_user_token = self.login("user", "pass")
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url1, access_token=other_user_token,
|
||||
)
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(self.other_user, channel.json_body["user_id"])
|
||||
self.assertIn("devices", channel.json_body)
|
||||
|
||||
request, channel = self.make_request(
|
||||
"GET", self.url2, access_token=other_user_token,
|
||||
)
|
||||
self.assertEqual(200, channel.code, msg=channel.json_body)
|
||||
self.assertEqual(self.other_user, channel.json_body["user_id"])
|
||||
self.assertIn("devices", channel.json_body)
|
||||
|
|
|
@ -33,12 +33,15 @@ class PresenceTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
|
||||
hs = self.setup_test_homeserver(
|
||||
"red", federation_http_client=None, federation_client=Mock()
|
||||
)
|
||||
presence_handler = Mock()
|
||||
presence_handler.set_state.return_value = defer.succeed(None)
|
||||
|
||||
hs.presence_handler = Mock()
|
||||
hs.presence_handler.set_state.return_value = defer.succeed(None)
|
||||
hs = self.setup_test_homeserver(
|
||||
"red",
|
||||
federation_http_client=None,
|
||||
federation_client=Mock(),
|
||||
presence_handler=presence_handler,
|
||||
)
|
||||
|
||||
return hs
|
||||
|
||||
|
@ -55,7 +58,7 @@ class PresenceTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(self.hs.presence_handler.set_state.call_count, 1)
|
||||
self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1)
|
||||
|
||||
def test_put_presence_disabled(self):
|
||||
"""
|
||||
|
@ -70,4 +73,4 @@ class PresenceTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(self.hs.presence_handler.set_state.call_count, 0)
|
||||
self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 0)
|
||||
|
|
|
@ -342,7 +342,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
|
|||
self.register_user("admin", "adminpassword", admin=True)
|
||||
admin_tok = self.login("admin", "adminpassword")
|
||||
|
||||
url = "/_matrix/client/unstable/admin/account_validity/validity"
|
||||
url = "/_synapse/admin/v1/account_validity/validity"
|
||||
params = {"user_id": user_id}
|
||||
request_data = json.dumps(params)
|
||||
request, channel = self.make_request(
|
||||
|
@ -362,7 +362,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
|
|||
self.register_user("admin", "adminpassword", admin=True)
|
||||
admin_tok = self.login("admin", "adminpassword")
|
||||
|
||||
url = "/_matrix/client/unstable/admin/account_validity/validity"
|
||||
url = "/_synapse/admin/v1/account_validity/validity"
|
||||
params = {
|
||||
"user_id": user_id,
|
||||
"expiration_ts": 0,
|
||||
|
@ -389,7 +389,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
|
|||
self.register_user("admin", "adminpassword", admin=True)
|
||||
admin_tok = self.login("admin", "adminpassword")
|
||||
|
||||
url = "/_matrix/client/unstable/admin/account_validity/validity"
|
||||
url = "/_synapse/admin/v1/account_validity/validity"
|
||||
params = {
|
||||
"user_id": user_id,
|
||||
"expiration_ts": 0,
|
||||
|
@ -569,7 +569,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
|
|||
tok = self.login("kermit", "monkey")
|
||||
# We need to manually add an email address otherwise the handler will do
|
||||
# nothing.
|
||||
now = self.hs.clock.time_msec()
|
||||
now = self.hs.get_clock().time_msec()
|
||||
self.get_success(
|
||||
self.store.user_add_threepid(
|
||||
user_id=user_id,
|
||||
|
@ -587,7 +587,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
# We need to manually add an email address otherwise the handler will do
|
||||
# nothing.
|
||||
now = self.hs.clock.time_msec()
|
||||
now = self.hs.get_clock().time_msec()
|
||||
self.get_success(
|
||||
self.store.user_add_threepid(
|
||||
user_id=user_id,
|
||||
|
@ -646,7 +646,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
self.hs.config.account_validity.startup_job_max_delta = self.max_delta
|
||||
|
||||
now_ms = self.hs.clock.time_msec()
|
||||
now_ms = self.hs.get_clock().time_msec()
|
||||
self.get_success(self.store._set_expiration_date_when_missing())
|
||||
|
||||
res = self.get_success(self.store.get_expiration_ts_for_user(user_id))
|
||||
|
|
|
@ -416,7 +416,7 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
|
|||
self.reactor,
|
||||
self.site,
|
||||
"GET",
|
||||
"/_matrix/client/r0/admin/users/" + self.user_id,
|
||||
"/_synapse/admin/v1/users/" + self.user_id,
|
||||
access_token=access_token,
|
||||
custom_headers=headers1.items(),
|
||||
**make_request_args,
|
||||
|
|
|
@ -554,7 +554,7 @@ class HomeserverTestCase(TestCase):
|
|||
self.hs.config.registration_shared_secret = "shared"
|
||||
|
||||
# Create the user
|
||||
request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
|
||||
request, channel = self.make_request("GET", "/_synapse/admin/v1/register")
|
||||
self.assertEqual(channel.code, 200, msg=channel.result)
|
||||
nonce = channel.json_body["nonce"]
|
||||
|
||||
|
@ -580,7 +580,7 @@ class HomeserverTestCase(TestCase):
|
|||
}
|
||||
)
|
||||
request, channel = self.make_request(
|
||||
"POST", "/_matrix/client/r0/admin/register", body.encode("utf8")
|
||||
"POST", "/_synapse/admin/v1/register", body.encode("utf8")
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
|
|
|
@ -271,7 +271,7 @@ def setup_test_homeserver(
|
|||
|
||||
# Install @cache_in_self attributes
|
||||
for key, val in kwargs.items():
|
||||
setattr(hs, key, val)
|
||||
setattr(hs, "_" + key, val)
|
||||
|
||||
# Mock TLS
|
||||
hs.tls_server_context_factory = Mock()
|
||||
|
|
Loading…
Reference in New Issue