diff --git a/.circleci/config.yml b/.circleci/config.yml index e03f01b837..ec3848b048 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,27 @@ version: 2 jobs: + dockerhubuploadrelease: + machine: true + steps: + - checkout + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 . + - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD + - run: docker push matrixdotorg/synapse:${CIRCLE_TAG} + - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3 + dockerhubuploadlatest: + machine: true + steps: + - checkout + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} . + - run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 . + - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD + - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest + - run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3 + - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1} + - run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3 + - run: docker push matrixdotorg/synapse:latest + - run: docker push matrixdotorg/synapse:latest-py3 sytestpy2: machine: true steps: @@ -9,6 +31,8 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs sytestpy2postgres: machine: true steps: @@ -18,15 +42,45 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs + sytestpy2merged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy2 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + + sytestpy2postgresmerged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy2 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + sytestpy3: machine: true steps: - checkout - run: docker pull matrixdotorg/sytest-synapsepy3 - - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs sytestpy3postgres: machine: true steps: @@ -36,13 +90,76 @@ jobs: - store_artifacts: path: ~/project/logs destination: logs + - store_test_results: + path: logs + sytestpy3merged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs + sytestpy3postgresmerged: + machine: true + steps: + - checkout + - run: bash .circleci/merge_base_branch.sh + - run: docker pull matrixdotorg/sytest-synapsepy3 + - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3 + - store_artifacts: + path: ~/project/logs + destination: logs + - store_test_results: + path: logs workflows: version: 2 build: jobs: - - sytestpy2 - - sytestpy2postgres -# Currently broken while the Python 3 port is incomplete -# - sytestpy3 -# - sytestpy3postgres + - sytestpy2: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy2postgres: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy3: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy3postgres: + filters: + branches: + only: /develop|master|release-.*/ + - sytestpy2merged: + filters: + branches: + ignore: /develop|master|release-.*/ + - sytestpy2postgresmerged: + filters: + branches: + ignore: /develop|master|release-.*/ + - sytestpy3merged: + filters: + branches: + ignore: /develop|master|release-.*/ + - sytestpy3postgresmerged: + filters: + branches: + ignore: /develop|master|release-.*/ + - dockerhubuploadrelease: + filters: + tags: + only: /v[0-9].[0-9]+.[0-9]+.*/ + branches: + ignore: /.*/ + - dockerhubuploadlatest: + filters: + branches: + only: master diff --git a/.circleci/merge_base_branch.sh b/.circleci/merge_base_branch.sh new file mode 100755 index 0000000000..6b0bf3aa48 --- /dev/null +++ b/.circleci/merge_base_branch.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -e + +# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful. +# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL. +echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV +source $BASH_ENV + +if [[ -z "${CIRCLE_PR_NUMBER}" ]] +then + echo "Can't figure out what the PR number is! Assuming merge target is develop." + + # It probably hasn't had a PR opened yet. Since all PRs land on develop, we + # can probably assume it's based on it and will be merged into it. + GITBASE="develop" +else + # Get the reference, using the GitHub API + GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` +fi + +# Show what we are before +git show -s + +# Set up username so it can do a merge +git config --global user.email bot@matrix.org +git config --global user.name "A robot" + +# Fetch and merge. If it doesn't work, it will raise due to set -e. +git fetch -u origin $GITBASE +git merge --no-edit origin/$GITBASE + +# Show what we are after. +git show -s \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index 6cdb8532d3..0180602e56 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,6 +3,5 @@ Dockerfile .gitignore demo/etc tox.ini -synctl .git/* .tox/* diff --git a/.gitignore b/.gitignore index 9f42a7568f..3b2252ad8a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,11 @@ *.pyc .*.swp *~ +*.lock .DS_Store _trial_temp/ +_trial_temp*/ logs/ dbs/ *.egg @@ -44,6 +46,7 @@ media_store/ build/ venv/ venv*/ +*venv/ localhost-800*/ static/client/register/register_config.js diff --git a/.travis.yml b/.travis.yml index 318701c9f8..2077f6af72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,9 +8,6 @@ before_script: - git remote set-branches --add origin develop - git fetch origin develop -services: - - postgresql - matrix: fast_finish: true include: @@ -23,22 +20,31 @@ matrix: - python: 2.7 env: TOX_ENV=py27 + - python: 2.7 + env: TOX_ENV=py27-old + - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" + services: + - postgresql + + - python: 3.5 + env: TOX_ENV=py35 - python: 3.6 env: TOX_ENV=py36 + - python: 3.6 + env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" + services: + - postgresql + - python: 3.6 env: TOX_ENV=check_isort - python: 3.6 env: TOX_ENV=check-newsfragment - allow_failures: - - python: 2.7 - env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" - install: - pip install tox diff --git a/CHANGES.md b/CHANGES.md index a35f5aebc7..048b9f95db 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,231 @@ +Synapse 0.33.6 (2018-10-04) +=========================== + +Internal Changes +---------------- + +- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002)) + + +Synapse 0.33.6rc1 (2018-10-03) +============================== + +Features +-------- + +- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883)) +- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894)) +- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916)) +- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959)) +- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964)) + + +Bugfixes +-------- + +- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578)) +- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836)) +- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868)) +- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879)) +- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889)) +- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895)) +- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899)) +- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907)) +- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908)) +- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910)) +- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914)) +- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980)) +- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936)) +- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938)) +- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956)) +- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960)) +- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961)) +- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968)) +- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970)) +- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986)) +- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990)) + + +Internal Changes +---------------- + +- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699)) +- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794)) +- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873)) +- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897)) +- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903)) +- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966)) +- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909)) +- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911)) +- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912)) +- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924)) +- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925)) +- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927)) +- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991)) +- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946)) +- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947)) +- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948)) +- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952)) +- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957)) +- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958)) +- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963)) +- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965)) +- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967)) +- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972)) +- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976)) +- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985)) +- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987)) +- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988)) +- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989)) + + +Synapse 0.33.5.1 (2018-09-25) +============================= + +Internal Changes +---------------- + +- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940)) + + +Synapse 0.33.5 (2018-09-24) +=========================== + +No significant changes. + + +Synapse 0.33.5rc1 (2018-09-17) +============================== + +Features +-------- + +- Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576)) +- Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790)) +- Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846)) + + +Bugfixes +-------- + +- Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788)) +- guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800)) +- Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804)) +- Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810)) +- Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824)) +- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835)) +- Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841)) +- Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845)) +- Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851)) +- Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859)) +- ([\#3874](https://github.com/matrix-org/synapse/issues/3874)) +- Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875)) + + +Internal Changes +---------------- + +- CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704)) +- http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771)) +- Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789)) +- Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795)) +- handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803)) +- Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805)) +- Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806)) +- tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808)) +- crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822)) +- rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823)) +- add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826)) +- speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827)) +- Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834)) +- Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840)) +- federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847)) +- Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853)) +- Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855)) +- Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856)) +- Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857)) +- Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858)) +- Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860)) +- Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871)) +- Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872)) +- mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877)) +- Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888)) + + +Synapse 0.33.4 (2018-09-07) +=========================== + +Internal Changes +---------------- + +- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802)) + + +Synapse 0.33.4rc2 (2018-09-06) +============================== + +Pull in security fixes from v0.33.3.1 + + +Synapse 0.33.3.1 (2018-09-06) +============================= + +SECURITY FIXES +-------------- + +- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) +- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796)) + + +Internal Changes +---------------- + +- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802)) + + +Synapse 0.33.4rc1 (2018-09-04) +============================== + +Features +-------- + +- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659)) +- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680)) +- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724)) +- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749)) +- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751)) + + +Bugfixes +-------- + +- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722)) +- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747)) +- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753)) +- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754)) +- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756)) +- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758)) +- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760)) +- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768)) +- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777)) + +Internal Changes +---------------- + +- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378)) +- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673)) +- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725)) +- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726)) +- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727)) +- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730)) +- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734)) +- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735)) +- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737)) +- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740)) +- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746)) +- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764)) + + Synapse 0.33.3 (2018-08-22) =========================== diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 3d75853aa7..6ef7d48dc7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -30,12 +30,28 @@ use github's pull request workflow to review the contribution, and either ask you to make any refinements needed or merge it and make them ourselves. The changes will then land on master when we next do a release. -We use `Jenkins `_ and -`Travis `_ for continuous -integration. All pull requests to synapse get automatically tested by Travis; -the Jenkins builds require an adminstrator to start them. If your change -breaks the build, this will be shown in github, so please keep an eye on the -pull request for feedback. +We use `CircleCI `_ and `Travis CI +`_ for continuous integration. All +pull requests to synapse get automatically tested by Travis and CircleCI. +If your change breaks the build, this will be shown in GitHub, so please +keep an eye on the pull request for feedback. + +To run unit tests in a local development environment, you can use: + +- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for + SQLite-backed Synapse on Python 2.7. +- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5. +- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6. +- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7 + (requires a running local PostgreSQL with access to create databases). +- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7 + (requires Docker). Entirely self-contained, recommended if you don't want to + set up PostgreSQL yourself. + +Docker images are available for running the integration tests (SyTest) locally, +see the `documentation in the SyTest repo +`_ for more +information. Code style ~~~~~~~~~~ @@ -59,9 +75,10 @@ To create a changelog entry, make a new file in the ``changelog.d`` file named in the format of ``PRnumber.type``. The type can be one of ``feature``, ``bugfix``, ``removal`` (also used for deprecations), or ``misc`` (for internal-only changes). The content of -the file is your changelog entry, which can contain RestructuredText -formatting. A note of contributors is welcomed in changelogs for -non-misc changes (the content of misc changes is not displayed). +the file is your changelog entry, which can contain Markdown +formatting. Adding credits to the changelog is encouraged, we value +your contributions and would like to have you shouted out in the +release notes! For example, a fix in PR #1234 would have its changelog entry in ``changelog.d/1234.bugfix``, and contain content like "The security levels of @@ -76,7 +93,8 @@ AUTHORS.rst file for the project in question. Please feel free to include a change to AUTHORS.rst in your pull request to list yourself and a short description of the area(s) you've worked on. Also, we sometimes have swag to give away to contributors - if you feel that Matrix-branded apparel is missing -from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :) +from your life, please mail us your shipping address to matrix at matrix.org and +we'll try to fix it :) Sign off ~~~~~~~~ @@ -143,4 +161,9 @@ flag to ``git commit``, which uses the name and email set in your Conclusion ~~~~~~~~~~ -That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do! +That's it! Matrix is a very open and collaborative project as you might expect +given our obsession with open communication. If we're going to successfully +matrix together all the fragmented communication technologies out there we are +reliant on contributions and collaboration from the community to do so. So +please get involved - and we hope you have as much fun hacking on Matrix as we +do! diff --git a/MANIFEST.in b/MANIFEST.in index e0826ba544..c6a37ac685 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -23,12 +23,9 @@ recursive-include synapse/static *.gif recursive-include synapse/static *.html recursive-include synapse/static *.js -exclude jenkins.sh -exclude jenkins*.sh -exclude jenkins* exclude Dockerfile exclude .dockerignore -recursive-exclude jenkins *.sh +exclude test_postgresql.sh include pyproject.toml recursive-include changelog.d * @@ -37,3 +34,6 @@ prune .github prune demo/etc prune docker prune .circleci + +exclude jenkins* +recursive-exclude jenkins *.sh diff --git a/MAP.rst b/MAP.rst deleted file mode 100644 index 0f8e9818a8..0000000000 --- a/MAP.rst +++ /dev/null @@ -1,35 +0,0 @@ -Directory Structure -=================== - -Warning: this may be a bit stale... - -:: - - . - ├── cmdclient Basic CLI python Matrix client - ├── demo Scripts for running standalone Matrix demos - ├── docs All doc, including the draft Matrix API spec - │   ├── client-server The client-server Matrix API spec - │   ├── model Domain-specific elements of the Matrix API spec - │   ├── server-server The server-server model of the Matrix API spec - │   └── sphinx The internal API doc of the Synapse homeserver - ├── experiments Early experiments of using Synapse's internal APIs - ├── graph Visualisation of Matrix's distributed message store - ├── synapse The reference Matrix homeserver implementation - │   ├── api Common building blocks for the APIs - │   │   ├── events Definition of state representation Events - │   │   └── streams Definition of streamable Event objects - │   ├── app The __main__ entry point for the homeserver - │   ├── crypto The PKI client/server used for secure federation - │   │   └── resource PKI helper objects (e.g. keys) - │   ├── federation Server-server state replication logic - │   ├── handlers The main business logic of the homeserver - │   ├── http Wrappers around Twisted's HTTP server & client - │   ├── rest Servlet-style RESTful API - │   ├── storage Persistence subsystem (currently only sqlite3) - │   │   └── schema sqlite persistence schema - │   └── util Synapse-specific utilities - ├── tests Unit tests for the Synapse homeserver - └── webclient Basic AngularJS Matrix web client - - diff --git a/README.rst b/README.rst index 4c5971d043..e1ea351f84 100644 --- a/README.rst +++ b/README.rst @@ -81,7 +81,7 @@ Thanks for using Matrix! Synapse Installation ==================== -Synapse is the reference python/twisted Matrix homeserver implementation. +Synapse is the reference Python/Twisted Matrix homeserver implementation. System requirements: @@ -91,12 +91,13 @@ System requirements: Installing from source ---------------------- + (Prebuilt packages are available for some platforms - see `Platform-Specific Instructions`_.) -Synapse is written in python but some of the libraries it uses are written in -C. So before we can install synapse itself we need a working C compiler and the -header files for python C extensions. +Synapse is written in Python but some of the libraries it uses are written in +C. So before we can install Synapse itself we need a working C compiler and the +header files for Python C extensions. Installing prerequisites on Ubuntu or Debian:: @@ -143,21 +144,27 @@ Installing prerequisites on OpenBSD:: doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \ libxslt -To install the synapse homeserver run:: +To install the Synapse homeserver run:: virtualenv -p python2.7 ~/.synapse source ~/.synapse/bin/activate pip install --upgrade pip pip install --upgrade setuptools - pip install https://github.com/matrix-org/synapse/tarball/master + pip install matrix-synapse -This installs synapse, along with the libraries it uses, into a virtual +This installs Synapse, along with the libraries it uses, into a virtual environment under ``~/.synapse``. Feel free to pick a different directory if you prefer. +This Synapse installation can then be later upgraded by using pip again with the +update flag:: + + source ~/.synapse/bin/activate + pip install -U matrix-synapse + In case of problems, please see the _`Troubleshooting` section below. -There is an offical synapse image available at +There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker `_. Further information on this including configuration options is available in the README on @@ -167,12 +174,7 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/ -Also, Martin Giess has created an auto-deployment process with vagrant/ansible, -tested with VirtualBox/AWS/DigitalOcean - see -https://github.com/EMnify/matrix-synapse-auto-deploy -for details. - -Configuring synapse +Configuring Synapse ------------------- Before you can start Synapse, you will need to generate a configuration @@ -254,26 +256,6 @@ Setting up a TURN server For reliable VoIP calls to be routed via this homeserver, you MUST configure a TURN server. See ``_ for details. -IPv6 ----- - -As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph -for providing PR #1696. - -However, for federation to work on hosts with IPv6 DNS servers you **must** -be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002 -for details. We can't make Synapse depend on Twisted 17.1 by default -yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909) -so if you are using operating system dependencies you'll have to install your -own Twisted 17.1 package via pip or backports etc. - -If you're running in a virtualenv then pip should have installed the newest -Twisted automatically, but if your virtualenv is old you will need to manually -upgrade to a newer Twisted dependency via: - - pip install Twisted>=17.1.0 - - Running Synapse =============== @@ -449,8 +431,7 @@ settings require a slightly more difficult installation process. using the ``.`` command, rather than ``bash``'s ``source``. 5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse webpages for their titles. -6) Use ``pip`` to install this repository: ``pip install - https://github.com/matrix-org/synapse/tarball/master`` +6) Use ``pip`` to install this repository: ``pip install matrix-synapse`` 7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the chance of a compromised Synapse server being used to take over your box. @@ -464,37 +445,13 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix- Windows Install --------------- -Synapse can be installed on Cygwin. It requires the following Cygwin packages: - -- gcc -- git -- libffi-devel -- openssl (and openssl-devel, python-openssl) -- python -- python-setuptools - -The content repository requires additional packages and will be unable to process -uploads without them: - -- libjpeg8 -- libjpeg8-devel -- zlib - -If you choose to install Synapse without these packages, you will need to reinstall -``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install -pillow --user`` - -Troubleshooting: - -- You may need to upgrade ``setuptools`` to get this to work correctly: - ``pip install setuptools --upgrade``. -- You may encounter errors indicating that ``ffi.h`` is missing, even with - ``libffi-devel`` installed. If you do, copy the ``.h`` files: - ``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include`` -- You may need to install libsodium from source in order to install PyNacl. If - you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find - it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a`` +If you wish to run or develop Synapse on Windows, the Windows Subsystem For +Linux provides a Linux environment on Windows 10 which is capable of using the +Debian, Fedora, or source installation methods. More information about WSL can +be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for +Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server +for Windows Server. Troubleshooting =============== @@ -502,7 +459,7 @@ Troubleshooting Troubleshooting Installation ---------------------------- -Synapse requires pip 1.7 or later, so if your OS provides too old a version you +Synapse requires pip 8 or later, so if your OS provides too old a version you may need to manually upgrade it:: sudo pip install --upgrade pip @@ -537,28 +494,6 @@ failing, e.g.:: pip install twisted -On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you -will need to export CFLAGS=-Qunused-arguments. - -Troubleshooting Running ------------------------ - -If synapse fails with ``missing "sodium.h"`` crypto errors, you may need -to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for -encryption and digital signatures. -Unfortunately PyNACL currently has a few issues -(https://github.com/pyca/pynacl/issues/53) and -(https://github.com/pyca/pynacl/issues/79) that mean it may not install -correctly, causing all tests to fail with errors about missing "sodium.h". To -fix try re-installing from PyPI or directly from -(https://github.com/pyca/pynacl):: - - # Install from PyPI - pip install --user --upgrade --force pynacl - - # Install from github - pip install --user https://github.com/pyca/pynacl/tarball/master - Running out of File Handles ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -747,6 +682,18 @@ so an example nginx configuration might look like:: } } +and an example apache configuration may look like:: + + + SSLEngine on + ServerName matrix.example.com; + + + ProxyPass http://127.0.0.1:8008/_matrix nocanon + ProxyPassReverse http://127.0.0.1:8008/_matrix + + + You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true`` for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are recorded correctly. @@ -901,7 +848,7 @@ to install using pip and a virtualenv:: virtualenv -p python2.7 env source env/bin/activate - python synapse/python_dependencies.py | xargs pip install + python -m synapse.python_dependencies | xargs pip install pip install lxml mock This will run a process of downloading and installing all the needed @@ -956,5 +903,13 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage in memory constrained enviroments, or increased if performance starts to degrade. +Using `libjemalloc `_ can also yield a significant +improvement in overall amount, and especially in terms of giving back RAM +to the OS. To use it, the library must simply be put in the LD_PRELOAD +environment variable when launching Synapse. On Debian, this can be done +by installing the ``libjemalloc1`` package and adding this line to +``/etc/default/matrix-synapse``:: + + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 .. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys diff --git a/UPGRADE.rst b/UPGRADE.rst index f6bb1070b1..6cf3169f75 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -18,7 +18,7 @@ instructions that may be required are listed later in this document. .. code:: bash - pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master + pip install --upgrade --process-dependency-links matrix-synapse # restart synapse synctl restart @@ -48,11 +48,11 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" -Upgrading to $NEXT_VERSION +Upgrading to v0.27.3 ==================== This release expands the anonymous usage stats sent if the opt-in -``report_stats`` configuration is set to ``true``. We now capture RSS memory +``report_stats`` configuration is set to ``true``. We now capture RSS memory and cpu use at a very coarse level. This requires administrators to install the optional ``psutil`` python module. diff --git a/changelog.d/3659.feature b/changelog.d/3659.feature deleted file mode 100644 index a5b4821c09..0000000000 --- a/changelog.d/3659.feature +++ /dev/null @@ -1 +0,0 @@ -Support profile API endpoints on workers diff --git a/changelog.d/3673.misc b/changelog.d/3673.misc deleted file mode 100644 index d672111fb9..0000000000 --- a/changelog.d/3673.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor state module to support multiple room versions diff --git a/changelog.d/3680.feature b/changelog.d/3680.feature deleted file mode 100644 index 4edaaf76a8..0000000000 --- a/changelog.d/3680.feature +++ /dev/null @@ -1 +0,0 @@ -Server notices for resource limit blocking diff --git a/changelog.d/3722.bugfix b/changelog.d/3722.bugfix deleted file mode 100644 index 16cbaf76cb..0000000000 --- a/changelog.d/3722.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues diff --git a/changelog.d/3724.feature b/changelog.d/3724.feature deleted file mode 100644 index 1b374ccf47..0000000000 --- a/changelog.d/3724.feature +++ /dev/null @@ -1 +0,0 @@ -Allow guests to use /rooms/:roomId/event/:eventId diff --git a/changelog.d/3726.misc b/changelog.d/3726.misc deleted file mode 100644 index c4f66ec998..0000000000 --- a/changelog.d/3726.misc +++ /dev/null @@ -1 +0,0 @@ -Split the state_group_cache into member and non-member state events (and so speed up LL /sync) diff --git a/changelog.d/3727.misc b/changelog.d/3727.misc deleted file mode 100644 index 0b83220d90..0000000000 --- a/changelog.d/3727.misc +++ /dev/null @@ -1 +0,0 @@ -Log failure to authenticate remote servers as warnings (without stack traces) diff --git a/changelog.d/3734.misc b/changelog.d/3734.misc deleted file mode 100644 index 4f6e4b3848..0000000000 --- a/changelog.d/3734.misc +++ /dev/null @@ -1 +0,0 @@ -Reference the need for an HTTP replication port when using the federation_reader worker diff --git a/changelog.d/3735.misc b/changelog.d/3735.misc deleted file mode 100644 index f17004be71..0000000000 --- a/changelog.d/3735.misc +++ /dev/null @@ -1 +0,0 @@ -Fix minor spelling error in federation client documentation. diff --git a/changelog.d/3746.misc b/changelog.d/3746.misc deleted file mode 100644 index fc00ee773a..0000000000 --- a/changelog.d/3746.misc +++ /dev/null @@ -1 +0,0 @@ -Fix MAU cache invalidation due to missing yield diff --git a/changelog.d/3747.bugfix b/changelog.d/3747.bugfix deleted file mode 100644 index c41e2a1213..0000000000 --- a/changelog.d/3747.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we resent "limit exceeded" server notices repeatedly diff --git a/changelog.d/3749.feature b/changelog.d/3749.feature deleted file mode 100644 index 9f8837b106..0000000000 --- a/changelog.d/3749.feature +++ /dev/null @@ -1 +0,0 @@ -Add mau_trial_days config param, so that users only get counted as MAU after N days. diff --git a/changelog.d/3751.feature b/changelog.d/3751.feature deleted file mode 100644 index dc9742b15b..0000000000 --- a/changelog.d/3751.feature +++ /dev/null @@ -1 +0,0 @@ -Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). diff --git a/changelog.d/3753.bugfix b/changelog.d/3753.bugfix deleted file mode 100644 index b4301267df..0000000000 --- a/changelog.d/3753.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices diff --git a/changelog.d/3754.bugfix b/changelog.d/3754.bugfix deleted file mode 100644 index 6e3ec80194..0000000000 --- a/changelog.d/3754.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic diff --git a/changelog.d/3755.bugfix b/changelog.d/3755.bugfix deleted file mode 100644 index 6a1f83f0ce..0000000000 --- a/changelog.d/3755.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tagging of server notice rooms diff --git a/changelog.d/3995.bugfix b/changelog.d/3995.bugfix new file mode 100644 index 0000000000..2adc36756b --- /dev/null +++ b/changelog.d/3995.bugfix @@ -0,0 +1 @@ +Fix bug in event persistence logic which caused 'NoneType is not iterable' \ No newline at end of file diff --git a/changelog.d/3996.bugfix b/changelog.d/3996.bugfix new file mode 100644 index 0000000000..a056485ea1 --- /dev/null +++ b/changelog.d/3996.bugfix @@ -0,0 +1 @@ +Fix exception in background metrics collection diff --git a/changelog.d/3997.bugfix b/changelog.d/3997.bugfix new file mode 100644 index 0000000000..b060ee8c18 --- /dev/null +++ b/changelog.d/3997.bugfix @@ -0,0 +1 @@ +Fix exception handling in fetching remote profiles diff --git a/changelog.d/3999.bugfix b/changelog.d/3999.bugfix new file mode 100644 index 0000000000..dc3b2caffa --- /dev/null +++ b/changelog.d/3999.bugfix @@ -0,0 +1 @@ +Fix handling of rejected threepid invites diff --git a/changelog.d/4017.misc b/changelog.d/4017.misc new file mode 100644 index 0000000000..b1ceb06560 --- /dev/null +++ b/changelog.d/4017.misc @@ -0,0 +1 @@ +Optimisation for serving federation requests \ No newline at end of file diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index c58612594a..dc3f4a1d1c 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -14,7 +14,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "5.2.0" + "version": "5.2.4" }, { "type": "panel", @@ -54,7 +54,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1533598785368, + "iteration": 1537878047048, "links": [ { "asDropdown": true, @@ -86,7 +86,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -118,7 +118,7 @@ "steppedLine": false, "targets": [ { - "expr": "process_cpu_seconds:rate2m{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} ", @@ -179,7 +179,7 @@ "mode": "spectrum" }, "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "gridPos": { "h": 9, "w": 12, @@ -525,7 +525,7 @@ "x": 0, "y": 25 }, - "id": 48, + "id": 50, "legend": { "avg": false, "current": false, @@ -549,8 +549,9 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])", "format": "time_series", + "interval": "", "intervalFactor": 2, "legendFormat": "{{job}}-{{index}}", "refId": "A", @@ -560,7 +561,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Avg time waiting for db conn", + "title": "Avg reactor tick time", "tooltip": { "shared": true, "sort": 0, @@ -576,12 +577,11 @@ }, "yaxes": [ { - "decimals": null, "format": "s", - "label": "", + "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -604,6 +604,7 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", "fill": 1, "gridPos": { "h": 7, @@ -611,6 +612,196 @@ "x": 12, "y": 25 }, + "id": 105, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}} 99%", + "refId": "A", + "step": 20 + }, + { + "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 95%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 90%", + "refId": "C" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Reactor tick quantiles", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 53, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Up", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 32 + }, "id": 49, "legend": { "avg": false, @@ -691,92 +882,6 @@ "alignLevel": null } }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 50, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Avg reactor tick time", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": {}, "bars": false, @@ -790,8 +895,8 @@ "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 32 + "x": 0, + "y": 39 }, "id": 5, "legend": { @@ -901,90 +1006,6 @@ "align": false, "alignLevel": null } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "fill": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 39 - }, - "id": 53, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Up", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } } ], "repeat": null, @@ -1013,7 +1034,7 @@ "h": 7, "w": 12, "x": 0, - "y": 49 + "y": 47 }, "id": 40, "legend": { @@ -1098,7 +1119,7 @@ "h": 7, "w": 12, "x": 12, - "y": 49 + "y": 47 }, "id": 46, "legend": { @@ -1187,7 +1208,7 @@ "h": 7, "w": 12, "x": 0, - "y": 56 + "y": 54 }, "id": 44, "legend": { @@ -1276,7 +1297,7 @@ "h": 7, "w": 12, "x": 12, - "y": 56 + "y": 54 }, "id": 45, "legend": { @@ -1383,7 +1404,7 @@ "h": 8, "w": 12, "x": 0, - "y": 48 + "y": 62 }, "id": 4, "legend": { @@ -1490,7 +1511,7 @@ "h": 8, "w": 12, "x": 12, - "y": 48 + "y": 62 }, "id": 32, "legend": { @@ -1578,7 +1599,7 @@ "h": 8, "w": 12, "x": 0, - "y": 56 + "y": 70 }, "id": 23, "legend": { @@ -1688,7 +1709,7 @@ "h": 8, "w": 12, "x": 12, - "y": 56 + "y": 70 }, "id": 52, "legend": { @@ -1795,7 +1816,7 @@ "h": 8, "w": 12, "x": 0, - "y": 64 + "y": 78 }, "id": 7, "legend": { @@ -1886,7 +1907,7 @@ "h": 8, "w": 12, "x": 12, - "y": 64 + "y": 78 }, "id": 47, "legend": { @@ -1969,13 +1990,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 72 + "y": 86 }, "id": 103, "legend": { @@ -2069,13 +2090,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 23 + "y": 49 }, "id": 99, "legend": { @@ -2154,13 +2175,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 23 + "y": 49 }, "id": 101, "legend": { @@ -2186,17 +2207,24 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", + "hide": false, "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{name}}", "refId": "A" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "DB usage by background jobs", + "title": "DB usage by background jobs (including scheduling time)", "tooltip": { "shared": true, "sort": 0, @@ -2252,13 +2280,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 25 + "y": 64 }, "id": 79, "legend": { @@ -2336,13 +2364,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 25 + "y": 64 }, "id": 83, "legend": { @@ -2447,7 +2475,7 @@ "h": 7, "w": 12, "x": 0, - "y": 23 + "y": 65 }, "id": 51, "legend": { @@ -2545,6 +2573,194 @@ }, "id": 58, "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Avg time waiting for db conn", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 99%", + "refId": "A", + "step": 20 + }, + { + "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 95%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}} {{index}} 90%", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Db scheduling time quantiles", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "s", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, { "aliasColors": {}, "bars": false, @@ -2559,7 +2775,7 @@ "h": 7, "w": 12, "x": 0, - "y": 25 + "y": 31 }, "id": 10, "legend": { @@ -2648,7 +2864,7 @@ "h": 7, "w": 12, "x": 12, - "y": 25 + "y": 31 }, "id": 11, "legend": { @@ -2672,11 +2888,11 @@ "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": true, "targets": [ { - "expr": "topk(5, rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "instant": false, "interval": "", @@ -2753,7 +2969,7 @@ "h": 13, "w": 12, "x": 0, - "y": 17 + "y": 60 }, "id": 12, "legend": { @@ -2841,7 +3057,7 @@ "h": 13, "w": 12, "x": 12, - "y": 17 + "y": 60 }, "id": 26, "legend": { @@ -2929,7 +3145,7 @@ "h": 13, "w": 12, "x": 0, - "y": 30 + "y": 73 }, "id": 13, "legend": { @@ -3017,7 +3233,7 @@ "h": 13, "w": 12, "x": 12, - "y": 30 + "y": 73 }, "id": 27, "legend": { @@ -3105,7 +3321,7 @@ "h": 13, "w": 12, "x": 0, - "y": 43 + "y": 86 }, "id": 28, "legend": { @@ -3192,7 +3408,7 @@ "h": 13, "w": 12, "x": 12, - "y": 43 + "y": 86 }, "id": 25, "legend": { @@ -3295,7 +3511,7 @@ "h": 10, "w": 12, "x": 0, - "y": 55 + "y": 68 }, "id": 1, "legend": { @@ -3387,7 +3603,7 @@ "h": 10, "w": 12, "x": 12, - "y": 55 + "y": 68 }, "id": 8, "legend": { @@ -3477,7 +3693,7 @@ "h": 10, "w": 12, "x": 0, - "y": 65 + "y": 78 }, "id": 38, "legend": { @@ -3563,7 +3779,7 @@ "h": 10, "w": 12, "x": 12, - "y": 65 + "y": 78 }, "id": 39, "legend": { @@ -3643,13 +3859,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 75 + "y": 88 }, "id": 65, "legend": { @@ -3745,13 +3961,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 90 + "y": 27 }, "id": 91, "legend": { @@ -3841,7 +4057,7 @@ "h": 9, "w": 12, "x": 12, - "y": 90 + "y": 27 }, "id": 21, "legend": { @@ -3920,13 +4136,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 99 + "y": 36 }, "id": 89, "legend": { @@ -4006,13 +4222,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 99 + "y": 36 }, "id": 93, "legend": { @@ -4027,7 +4243,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4090,13 +4306,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 108 + "y": 45 }, "id": 95, "legend": { @@ -4189,7 +4405,7 @@ "h": 9, "w": 12, "x": 12, - "y": 108 + "y": 45 }, "heatmap": {}, "highlightCards": true, @@ -4251,13 +4467,13 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 19 + "y": 97 }, "id": 2, "legend": { @@ -4357,20 +4573,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 19 + "y": 97 }, "id": 41, "legend": { @@ -4439,20 +4659,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 26 + "y": 104 }, "id": 42, "legend": { @@ -4520,20 +4744,24 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 26 + "y": 104 }, "id": 43, "legend": { @@ -4601,7 +4829,11 @@ "min": null, "show": true } - ] + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, @@ -4623,7 +4855,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -4644,7 +4876,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4708,7 +4940,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "fill": 1, "gridPos": { "h": 9, @@ -4729,7 +4961,7 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "percentage": false, "pointradius": 5, "points": false, @@ -4856,9 +5088,19 @@ "selected": false, "text": "5m", "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "15m", + "value": "15m" } ], - "query": "30s,1m,2m,5m", + "query": "30s,1m,2m,5m,10m,15m", "refresh": 2, "type": "interval" }, @@ -4872,7 +5114,7 @@ "multi": false, "name": "instance", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, instance)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", "refresh": 2, "regex": "", "sort": 0, @@ -4895,7 +5137,7 @@ "multiFormat": "regex values", "name": "job", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, job)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", "refresh": 2, "refresh_on_load": false, "regex": "", @@ -4919,7 +5161,7 @@ "multiFormat": "regex values", "name": "index", "options": [], - "query": "label_values(process_cpu_user_seconds_total{job=~\"synapse.*\"}, index)", + "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", "refresh": 2, "refresh_on_load": false, "regex": "", @@ -4965,5 +5207,5 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "version": 127 + "version": 3 } \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 777976217d..db44c02a92 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,13 @@ -FROM docker.io/python:2-alpine3.8 +ARG PYTHON_VERSION=2 -RUN apk add --no-cache --virtual .nacl_deps \ +### +### Stage 0: builder +### +FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder + +# install the OS build deps + +RUN apk add \ build-base \ libffi-dev \ libjpeg-turbo-dev \ @@ -8,25 +15,46 @@ RUN apk add --no-cache --virtual .nacl_deps \ libxslt-dev \ linux-headers \ postgresql-dev \ - su-exec \ zlib-dev -COPY . /synapse +# build things which have slow build steps, before we copy synapse, so that +# the layer can be cached. +# +# (we really just care about caching a wheel here, as the "pip install" below +# will install them again.) -# A wheel cache may be provided in ./cache for faster build -RUN cd /synapse \ - && pip install --upgrade \ +RUN pip install --prefix="/install" --no-warn-script-location \ + cryptography \ + msgpack-python \ + pillow \ + pynacl + +# now install synapse and all of the python deps to /install. + +COPY . /synapse +RUN pip install --prefix="/install" --no-warn-script-location \ lxml \ - pip \ psycopg2 \ - setuptools \ - && mkdir -p /synapse/cache \ - && pip install -f /synapse/cache --upgrade --process-dependency-links . \ - && mv /synapse/docker/start.py /synapse/docker/conf / \ - && rm -rf \ - setup.cfg \ - setup.py \ - synapse + /synapse + +### +### Stage 1: runtime +### + +FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 + +RUN apk add --no-cache --virtual .runtime_deps \ + libffi \ + libjpeg-turbo \ + libressl \ + libxslt \ + libpq \ + zlib \ + su-exec + +COPY --from=builder /install /usr/local +COPY ./docker/start.py /start.py +COPY ./docker/conf /conf VOLUME ["/data"] diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests new file mode 100644 index 0000000000..7da8eeb9eb --- /dev/null +++ b/docker/Dockerfile-pgtests @@ -0,0 +1,12 @@ +# Use the Sytest image that comes with a lot of the build dependencies +# pre-installed +FROM matrixdotorg/sytest:latest + +# The Sytest image doesn't come with python, so install that +RUN apt-get -qq install -y python python-dev python-pip + +# We need tox to run the tests in run_pg_tests.sh +RUN pip install tox + +ADD run_pg_tests.sh /pg_tests.sh +ENTRYPOINT /pg_tests.sh diff --git a/docker/README.md b/docker/README.md index 038c78f7c0..3c00d1e948 100644 --- a/docker/README.md +++ b/docker/README.md @@ -88,6 +88,7 @@ variables are available for configuration: * ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN uris to enable TURN for this homeserver. * ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required. +* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`]. Shared secrets, that will be initialized to random values if not set: diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index 6bc25bb45f..cfe88788f2 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -85,7 +85,7 @@ federation_rc_concurrent: 3 media_store_path: "/data/media" uploads_path: "/data/uploads" -max_upload_size: "10M" +max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}" max_image_pixels: "32M" dynamic_thumbnails: false diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh new file mode 100755 index 0000000000..e77424c41a --- /dev/null +++ b/docker/run_pg_tests.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# This script runs the PostgreSQL tests inside a Docker container. It expects +# the relevant source files to be mounted into /src (done automatically by the +# caller script). It will set up the database, run it, and then use the tox +# configuration to run the tests. + +set -e + +# Set PGUSER so Synapse's tests know what user to connect to the database with +export PGUSER=postgres + +# Initialise & start the database +su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres +su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres + +# Run the tests +cd /src +export TRIAL_FLAGS="-j 4" +tox --workdir=/tmp -e py27-postgres diff --git a/docker/start.py b/docker/start.py index 90e8b9c51a..346df8c87f 100755 --- a/docker/start.py +++ b/docker/start.py @@ -5,6 +5,7 @@ import os import sys import subprocess import glob +import codecs # Utility functions convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ)) @@ -23,7 +24,7 @@ def generate_secrets(environ, secrets): with open(filename) as handle: value = handle.read() else: print("Generating a random secret for {}".format(name)) - value = os.urandom(32).encode("hex") + value = codecs.encode(os.urandom(32), "hex").decode() with open(filename, "w") as handle: handle.write(value) environ[secret] = value diff --git a/jenkins-dendron-haproxy-postgres.sh b/jenkins-dendron-haproxy-postgres.sh deleted file mode 100755 index 07979bf8b8..0000000000 --- a/jenkins-dendron-haproxy-postgres.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git -./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git -./dendron/jenkins/build_dendron.sh -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ - --dendron $WORKSPACE/dendron/bin/dendron \ - --haproxy \ diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh deleted file mode 100755 index 3b932fe340..0000000000 --- a/jenkins-dendron-postgres.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git -./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git -./dendron/jenkins/build_dendron.sh -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ - --dendron $WORKSPACE/dendron/bin/dendron \ diff --git a/jenkins-flake8.sh b/jenkins-flake8.sh deleted file mode 100755 index 11f1cab6c8..0000000000 --- a/jenkins-flake8.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -# Output test results as junit xml -export TRIAL_FLAGS="--reporter=subunit" -export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" -# Write coverage reports to a separate file for each process -export COVERAGE_OPTS="-p" -export DUMP_COVERAGE_COMMAND="coverage help" - -# Output flake8 violations to violations.flake8.log -export PEP8SUFFIX="--output-file=violations.flake8.log" - -rm .coverage* || echo "No coverage files to remove" - -tox -e packaging -e pep8 diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh deleted file mode 100755 index 1afb736394..0000000000 --- a/jenkins-postgres.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git - -./sytest/jenkins/prep_sytest_for_postgres.sh - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh deleted file mode 100755 index baf4713a01..0000000000 --- a/jenkins-sqlite.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export WORKSPACE -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -./jenkins/prepare_synapse.sh -./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git - -./sytest/jenkins/install_and_run.sh \ - --python $WORKSPACE/.tox/py27/bin/python \ - --synapse-directory $WORKSPACE \ diff --git a/jenkins-unittests.sh b/jenkins-unittests.sh deleted file mode 100755 index 4c2f103e80..0000000000 --- a/jenkins-unittests.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -set -eux - -: ${WORKSPACE:="$(pwd)"} - -export PYTHONDONTWRITEBYTECODE=yep -export SYNAPSE_CACHE_FACTOR=1 - -# Output test results as junit xml -export TRIAL_FLAGS="--reporter=subunit" -export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml" -# Write coverage reports to a separate file for each process -export COVERAGE_OPTS="-p" -export DUMP_COVERAGE_COMMAND="coverage help" - -# Output flake8 violations to violations.flake8.log -# Don't exit with non-0 status code on Jenkins, -# so that the build steps continue and a later step can decided whether to -# UNSTABLE or FAILURE this build. -export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?" - -rm .coverage* || echo "No coverage files to remove" - -tox --notest -e py27 -TOX_BIN=$WORKSPACE/.tox/py27/bin -python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install -$TOX_BIN/pip install lxml - -tox -e py27 diff --git a/jenkins/clone.sh b/jenkins/clone.sh deleted file mode 100755 index ab30ac7782..0000000000 --- a/jenkins/clone.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash - -# This clones a project from github into a named subdirectory -# If the project has a branch with the same name as this branch -# then it will checkout that branch after cloning. -# Otherwise it will checkout "origin/develop." -# The first argument is the name of the directory to checkout -# the branch into. -# The second argument is the URL of the remote repository to checkout. -# Usually something like https://github.com/matrix-org/sytest.git - -set -eux - -NAME=$1 -PROJECT=$2 -BASE=".$NAME-base" - -# Update our mirror. -if [ ! -d ".$NAME-base" ]; then - # Create a local mirror of the source repository. - # This saves us from having to download the entire repository - # when this script is next run. - git clone "$PROJECT" "$BASE" --mirror -else - # Fetch any updates from the source repository. - (cd "$BASE"; git fetch -p) -fi - -# Remove the existing repository so that we have a clean copy -rm -rf "$NAME" -# Cloning with --shared means that we will share portions of the -# .git directory with our local mirror. -git clone "$BASE" "$NAME" --shared - -# Jenkins may have supplied us with the name of the branch in the -# environment. Otherwise we will have to guess based on the current -# commit. -: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"} -cd "$NAME" -# check out the relevant branch -git checkout "${GIT_BRANCH}" || ( - echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" - git checkout "origin/develop" -) diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index a30179f2aa..d95ca846c4 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5' $TOX_BIN/pip install 'pip>=10' { python synapse/python_dependencies.py - echo lxml psycopg2 + echo lxml } | xargs $TOX_BIN/pip install diff --git a/scripts-dev/copyrighter-sql.pl b/scripts-dev/copyrighter-sql.pl deleted file mode 100755 index 13e630fc11..0000000000 --- a/scripts-dev/copyrighter-sql.pl +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/perl -pi -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -$copyright = < | sqlite3 homeserver.db" - echo "or" - echo " nuke-room-from-db.sh | psql --dbname=synapse" - exit -fi - -ROOMID="$1" - -cat <= self.hs.config.max_mau_value: raise ResourceLimitError( 403, "Monthly Active User Limit Exceeded", - - admin_uri=self.hs.config.admin_uri, + admin_contact=self.hs.config.admin_contact, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, limit_type="monthly_active_user" ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 140dbfe8b8..4124469442 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -240,10 +240,10 @@ class ResourceLimitError(SynapseError): def __init__( self, code, msg, errcode=Codes.RESOURCE_LIMIT_EXCEEDED, - admin_uri=None, + admin_contact=None, limit_type=None, ): - self.admin_uri = admin_uri + self.admin_contact = admin_contact self.limit_type = limit_type super(ResourceLimitError, self).__init__(code, msg, errcode=errcode) @@ -251,7 +251,7 @@ class ResourceLimitError(SynapseError): return cs_error( self.msg, self.errcode, - admin_uri=self.admin_uri, + admin_contact=self.admin_contact, limit_type=self.limit_type ) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 186831e118..eed8c67e6a 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -226,7 +226,7 @@ class Filtering(object): jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA, format_checker=FormatChecker()) except jsonschema.ValidationError as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) class FilterCollection(object): @@ -251,6 +251,7 @@ class FilterCollection(object): "include_leave", False ) self.event_fields = filter_json.get("event_fields", []) + self.event_format = filter_json.get("event_format", "client") def __repr__(self): return "" % (json.dumps(self._filter_json),) diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 71347912f1..6d9f1ca0ef 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -64,7 +64,7 @@ class ConsentURIBuilder(object): """ mac = hmac.new( key=self._hmac_secret, - msg=user_id, + msg=user_id.encode('ascii'), digestmod=sha256, ).hexdigest() consent_uri = "%s_matrix/consent?%s" % ( diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index 3b6b9368b8..c3afcc573b 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -24,7 +24,7 @@ try: python_dependencies.check_requirements() except python_dependencies.MissingRequirementError as e: message = "\n".join([ - "Missing Requirement: %s" % (e.message,), + "Missing Requirement: %s" % (str(e),), "To install run:", " pip install --upgrade --force \"%s\"" % (e.dependency,), "", diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index 3348a8ec6d..8559e141af 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -51,10 +51,7 @@ class AppserviceSlaveStore( class AppserviceServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = AppserviceSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = AppserviceSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -139,7 +136,7 @@ def start(config_options): "Synapse appservice", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.appservice" @@ -175,7 +172,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ab79a45646..76aed8c60a 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -74,10 +74,7 @@ class ClientReaderSlavedStore( class ClientReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = ClientReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -156,7 +153,7 @@ def start(config_options): "Synapse client reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.client_reader" @@ -184,7 +181,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index a34c89fa99..9060ab14f6 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -90,10 +90,7 @@ class EventCreatorSlavedStore( class EventCreatorServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = EventCreatorSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -172,7 +169,7 @@ def start(config_options): "Synapse event creator", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.event_creator" @@ -202,7 +199,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 7d8105778d..228a297fb8 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -72,10 +72,7 @@ class FederationReaderSlavedStore( class FederationReaderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationReaderSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -143,7 +140,7 @@ def start(config_options): "Synapse federation reader", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_reader" @@ -171,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index d59007099b..e9a99d76e1 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -78,10 +78,7 @@ class FederationSenderSlaveStore( class FederationSenderServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FederationSenderSlaveStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -163,7 +160,7 @@ def start(config_options): "Synapse federation sender", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.federation_sender" @@ -204,7 +201,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) _base.start_worker_reactor("synapse-federation-sender", config) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8d484c1cd4..fc4b25de1c 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -148,10 +148,7 @@ class FrontendProxySlavedStore( class FrontendProxyServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = FrontendProxySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -231,7 +228,7 @@ def start(config_options): "Synapse frontend proxy", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.frontend_proxy" @@ -261,7 +258,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 005921dcf7..e3f0d99a3f 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,7 +62,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer -from synapse.storage import are_all_users_on_domain +from synapse.storage import DataStore, are_all_users_on_domain from synapse.storage.engines import IncorrectDatabaseSetup, create_engine from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database from synapse.util.caches import CACHE_SIZE_FACTOR @@ -111,6 +111,8 @@ def build_resource_for_web_client(hs): class SynapseHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + def _listener_http(self, config, listener_config): port = listener_config["port"] bind_addresses = listener_config["bind_addresses"] @@ -299,12 +301,16 @@ class SynapseHomeServer(HomeServer): try: database_engine.check_database(db_conn.cursor()) except IncorrectDatabaseSetup as e: - quit_with_error(e.message) + quit_with_error(str(e)) # Gauges to expose monthly active user control metrics current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids" +) def setup(config_options): @@ -322,7 +328,7 @@ def setup(config_options): config_options, ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) if not config: @@ -356,13 +362,13 @@ def setup(config_options): logger.info("Preparing database: %s...", config.database_config['name']) try: - db_conn = hs.get_db_conn(run_new_connection=False) - prepare_database(db_conn, database_engine, config=config) - database_engine.on_new_connection(db_conn) + with hs.get_db_conn(run_new_connection=False) as db_conn: + prepare_database(db_conn, database_engine, config=config) + database_engine.on_new_connection(db_conn) - hs.run_startup_checks(db_conn, database_engine) + hs.run_startup_checks(db_conn, database_engine) - db_conn.commit() + db_conn.commit() except UpgradeDatabaseException: sys.stderr.write( "\nFailed to upgrade database.\n" @@ -378,10 +384,8 @@ def setup(config_options): def start(): hs.get_pusherpool().start() - hs.get_state_handler().start_caching() hs.get_datastore().start_profiling() hs.get_datastore().start_doing_background_updates() - hs.get_federation_client().start_get_pdu_cache() reactor.callWhenRunning(start) @@ -451,6 +455,10 @@ def run(hs): stats["homeserver"] = hs.config.server_name stats["timestamp"] = now stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) stats["total_users"] = yield hs.get_datastore().count_all_users() total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users() @@ -529,10 +537,14 @@ def run(hs): @defer.inlineCallbacks def generate_monthly_active_users(): - count = 0 + current_mau_count = 0 + reserved_count = 0 + store = hs.get_datastore() if hs.config.limit_usage_by_mau: - count = yield hs.get_datastore().get_monthly_active_count() - current_mau_gauge.set(float(count)) + current_mau_count = yield store.get_monthly_active_count() + reserved_count = yield store.get_registered_reserved_users_count() + current_mau_gauge.set(float(current_mau_count)) + registered_reserved_users_mau_gauge.set(float(reserved_count)) max_mau_gauge.set(float(hs.config.max_mau_value)) hs.get_datastore().initialise_reserved_users( diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index fd1f6cbf7e..acc0487adc 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -60,10 +60,7 @@ class MediaRepositorySlavedStore( class MediaRepositoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = MediaRepositorySlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -136,7 +133,7 @@ def start(config_options): "Synapse media repository", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.media_repository" @@ -171,7 +168,6 @@ def start(config_options): ss.start_listening(config.worker_listeners) def start(): - ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() reactor.callWhenRunning(start) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index a4fc7e91fa..630dcda478 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -78,10 +78,7 @@ class PusherSlaveStore( class PusherServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = PusherSlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = PusherSlaveStore def remove_pusher(self, app_id, push_key, user_id): self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id) @@ -194,7 +191,7 @@ def start(config_options): "Synapse pusher", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.pusher" @@ -231,7 +228,6 @@ def start(config_options): def start(): ps.get_pusherpool().start() ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 27e1998660..9a7fc6ee9d 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -249,10 +249,7 @@ class SynchrotronApplicationService(object): class SynchrotronServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = SynchrotronSlavedStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -413,7 +410,7 @@ def start(config_options): "Synapse synchrotron", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.synchrotron" @@ -438,7 +435,6 @@ def start(config_options): def start(): ss.get_datastore().start_profiling() - ss.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py deleted file mode 100755 index d658f967ba..0000000000 --- a/synapse/app/synctl.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import collections -import errno -import glob -import os -import os.path -import signal -import subprocess -import sys -import time - -from six import iteritems - -import yaml - -SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] - -GREEN = "\x1b[1;32m" -YELLOW = "\x1b[1;33m" -RED = "\x1b[1;31m" -NORMAL = "\x1b[m" - - -def pid_running(pid): - try: - os.kill(pid, 0) - return True - except OSError as err: - if err.errno == errno.EPERM: - return True - return False - - -def write(message, colour=NORMAL, stream=sys.stdout): - if colour == NORMAL: - stream.write(message + "\n") - else: - stream.write(colour + message + NORMAL + "\n") - - -def abort(message, colour=RED, stream=sys.stderr): - write(message, colour, stream) - sys.exit(1) - - -def start(configfile): - write("Starting ...") - args = SYNAPSE - args.extend(["--daemonize", "-c", configfile]) - - try: - subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % - (configfile,), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting (exit code: %d); see above for logs" % e.returncode, - colour=RED, - ) - - -def start_worker(app, configfile, worker_configfile): - args = [ - "python", "-B", - "-m", app, - "-c", configfile, - "-c", worker_configfile - ] - - try: - subprocess.check_call(args) - write("started %s(%r)" % (app, worker_configfile), colour=GREEN) - except subprocess.CalledProcessError as e: - write( - "error starting %s(%r) (exit code: %d); see above for logs" % ( - app, worker_configfile, e.returncode, - ), - colour=RED, - ) - - -def stop(pidfile, app): - if os.path.exists(pidfile): - pid = int(open(pidfile).read()) - try: - os.kill(pid, signal.SIGTERM) - write("stopped %s" % (app,), colour=GREEN) - except OSError as err: - if err.errno == errno.ESRCH: - write("%s not running" % (app,), colour=YELLOW) - elif err.errno == errno.EPERM: - abort("Cannot stop %s: Operation not permitted" % (app,)) - else: - abort("Cannot stop %s: Unknown error" % (app,)) - - -Worker = collections.namedtuple("Worker", [ - "app", "configfile", "pidfile", "cache_factor" -]) - - -def main(): - - parser = argparse.ArgumentParser() - - parser.add_argument( - "action", - choices=["start", "stop", "restart"], - help="whether to start, stop or restart the synapse", - ) - parser.add_argument( - "configfile", - nargs="?", - default="homeserver.yaml", - help="the homeserver config file, defaults to homeserver.yaml", - ) - parser.add_argument( - "-w", "--worker", - metavar="WORKERCONFIG", - help="start or stop a single worker", - ) - parser.add_argument( - "-a", "--all-processes", - metavar="WORKERCONFIGDIR", - help="start or stop all the workers in the given directory" - " and the main synapse process", - ) - - options = parser.parse_args() - - if options.worker and options.all_processes: - write( - 'Cannot use "--worker" with "--all-processes"', - stream=sys.stderr - ) - sys.exit(1) - - configfile = options.configfile - - if not os.path.exists(configfile): - write( - "No config file found\n" - "To generate a config file, run '%s -c %s --generate-config" - " --server-name='\n" % ( - " ".join(SYNAPSE), options.configfile - ), - stream=sys.stderr, - ) - sys.exit(1) - - with open(configfile) as stream: - config = yaml.load(stream) - - pidfile = config["pid_file"] - cache_factor = config.get("synctl_cache_factor") - start_stop_synapse = True - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - - cache_factors = config.get("synctl_cache_factors", {}) - for cache_name, factor in iteritems(cache_factors): - os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - - worker_configfiles = [] - if options.worker: - start_stop_synapse = False - worker_configfile = options.worker - if not os.path.exists(worker_configfile): - write( - "No worker config found at %r" % (worker_configfile,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.append(worker_configfile) - - if options.all_processes: - # To start the main synapse with -a you need to add a worker file - # with worker_app == "synapse.app.homeserver" - start_stop_synapse = False - worker_configdir = options.all_processes - if not os.path.isdir(worker_configdir): - write( - "No worker config directory found at %r" % (worker_configdir,), - stream=sys.stderr, - ) - sys.exit(1) - worker_configfiles.extend(sorted(glob.glob( - os.path.join(worker_configdir, "*.yaml") - ))) - - workers = [] - for worker_configfile in worker_configfiles: - with open(worker_configfile) as stream: - worker_config = yaml.load(stream) - worker_app = worker_config["worker_app"] - if worker_app == "synapse.app.homeserver": - # We need to special case all of this to pick up options that may - # be set in the main config file or in this worker config file. - worker_pidfile = ( - worker_config.get("pid_file") - or pidfile - ) - worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor - daemonize = worker_config.get("daemonize") or config.get("daemonize") - assert daemonize, "Main process must have daemonize set to true" - - # The master process doesn't support using worker_* config. - for key in worker_config: - if key == "worker_app": # But we allow worker_app - continue - assert not key.startswith("worker_"), \ - "Main process cannot use worker_* config" - else: - worker_pidfile = worker_config["worker_pid_file"] - worker_daemonize = worker_config["worker_daemonize"] - assert worker_daemonize, "In config %r: expected '%s' to be True" % ( - worker_configfile, "worker_daemonize") - worker_cache_factor = worker_config.get("synctl_cache_factor") - workers.append(Worker( - worker_app, worker_configfile, worker_pidfile, worker_cache_factor, - )) - - action = options.action - - if action == "stop" or action == "restart": - for worker in workers: - stop(worker.pidfile, worker.app) - - if start_stop_synapse: - stop(pidfile, "synapse.app.homeserver") - - # Wait for synapse to actually shutdown before starting it again - if action == "restart": - running_pids = [] - if start_stop_synapse and os.path.exists(pidfile): - running_pids.append(int(open(pidfile).read())) - for worker in workers: - if os.path.exists(worker.pidfile): - running_pids.append(int(open(worker.pidfile).read())) - if len(running_pids) > 0: - write("Waiting for process to exit before restarting...") - for running_pid in running_pids: - while pid_running(running_pid): - time.sleep(0.2) - write("All processes exited; now restarting...") - - if action == "start" or action == "restart": - if start_stop_synapse: - # Check if synapse is already running - if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): - abort("synapse.app.homeserver already running") - start(configfile) - - for worker in workers: - if worker.cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) - - start_worker(worker.app, configfile, worker.configfile) - - if cache_factor: - os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) - else: - os.environ.pop("SYNAPSE_CACHE_FACTOR", None) - - -if __name__ == "__main__": - main() diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 1388a42b59..0a5f62b509 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -94,10 +94,7 @@ class UserDirectorySlaveStore( class UserDirectoryServer(HomeServer): - def setup(self): - logger.info("Setting up.") - self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self) - logger.info("Finished setting up.") + DATASTORE_CLASS = UserDirectorySlaveStore def _listen_http(self, listener_config): port = listener_config["port"] @@ -191,7 +188,7 @@ def start(config_options): "Synapse user directory", config_options ) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) assert config.worker_app == "synapse.app.user_dir" @@ -232,7 +229,6 @@ def start(config_options): def start(): ps.get_datastore().start_profiling() - ps.get_state_handler().start_caching() reactor.callWhenRunning(start) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 6980e5890e..9ccc5a80fc 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import urllib + +from six.moves import urllib from prometheus_client import Counter @@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_user(self, service, user_id): if service.url is None: defer.returnValue(False) - uri = service.url + ("/users/%s" % urllib.quote(user_id)) + uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, { @@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient): def query_alias(self, service, alias): if service.url is None: defer.returnValue(False) - uri = service.url + ("/rooms/%s" % urllib.quote(alias)) + uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, { @@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient): service.url, APP_SERVICE_PREFIX, kind, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: response = yield self.get_json(uri, fields) @@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient): uri = "%s%s/thirdparty/protocol/%s" % ( service.url, APP_SERVICE_PREFIX, - urllib.quote(protocol) + urllib.parse.quote(protocol) ) try: info = yield self.get_json(uri, {}) @@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient): txn_id = str(txn_id) uri = service.url + ("/transactions/%s" % - urllib.quote(txn_id)) + urllib.parse.quote(txn_id)) try: yield self.put_json( uri=uri, diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 58c97a70af..8fccf573ee 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -25,7 +25,7 @@ if __name__ == "__main__": try: config = HomeServerConfig.load_config("", sys.argv[3:]) except ConfigError as e: - sys.stderr.write("\n" + e.message + "\n") + sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) print (getattr(config, key)) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 2fd9c48abf..b8d5690f2b 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -21,7 +21,7 @@ from .consent_config import ConsentConfig from .database import DatabaseConfig from .emailconfig import EmailConfig from .groups import GroupsConfig -from .jwt import JWTConfig +from .jwt_config import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .metrics import MetricsConfig diff --git a/synapse/config/jwt.py b/synapse/config/jwt_config.py similarity index 100% rename from synapse/config/jwt.py rename to synapse/config/jwt_config.py diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 3f187adfc8..e9a936118d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -227,7 +227,22 @@ def setup_logging(config, use_worker_options=False): # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() + + def _log(event): + + if "log_text" in event: + if event["log_text"].startswith("DNSDatagramProtocol starting on "): + return + + if event["log_text"].startswith("(UDP Port "): + return + + if event["log_text"].startswith("Timing out client"): + return + + return observer(event) + globalLogBeginner.beginLoggingTo( - [observer], + [_log], redirectStandardIO=not config.no_redirect_stdio, ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 8eecd28e7d..c1c7c0105e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -93,7 +93,7 @@ class ServerConfig(Config): # Admin uri to direct users at should their instance become blocked # due to resource constraints - self.admin_uri = config.get("admin_uri", None) + self.admin_contact = config.get("admin_contact", None) # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist = None @@ -357,7 +357,7 @@ class ServerConfig(Config): # Homeserver blocking # # How to reach the server admin, used in ResourceLimitError - # admin_uri: 'mailto:admin@server.com' + # admin_contact: 'mailto:admin@server.com' # # Global block config # @@ -404,6 +404,23 @@ class ServerConfig(Config): " service on the given port.") +def is_threepid_reserved(config, threepid): + """Check the threepid against the reserved threepid config + Args: + config(ServerConfig) - to access server config attributes + threepid(dict) - The threepid to test for + + Returns: + boolean Is the threepid undertest reserved_user + """ + + for tp in config.mau_limits_reserved_threepids: + if (threepid['medium'] == tp['medium'] + and threepid['address'] == tp['address']): + return True + return False + + def read_gc_thresholds(thresholds): """Reads the three integer thresholds for garbage collection. Ensures that the thresholds are integers if thresholds are supplied. diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 1a391adec1..02b76dfcfb 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object): def get_options(self, host): return ClientTLSOptions( - host.decode('utf-8'), + host, CertificateOptions(verify=False).getContext() ) diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index e94400b8e2..57d4665e84 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -50,7 +50,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): defer.returnValue((server_response, server_certificate)) except SynapseKeyClientError as e: logger.warn("Error getting key for %r: %s", server_name, e) - if e.status.startswith("4"): + if e.status.startswith(b"4"): # Don't retry for 4xx responses. raise IOError("Cannot get key for %r" % server_name) except (ConnectError, DomainError) as e: @@ -82,6 +82,12 @@ class SynapseKeyClientProtocol(HTTPClient): self._peer = self.transport.getPeer() logger.debug("Connected to %s", self._peer) + if not isinstance(self.path, bytes): + self.path = self.path.encode('ascii') + + if not isinstance(self.host, bytes): + self.host = self.host.encode('ascii') + self.sendCommand(b"GET", self.path) if self.host: self.sendHeader(b"Host", self.host) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 30e2742102..d89f94c219 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -16,9 +16,10 @@ import hashlib import logging -import urllib from collections import namedtuple +from six.moves import urllib + from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -40,6 +41,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.crypto.keyclient import fetch_server_key from synapse.util import logcontext, unwrapFirstError from synapse.util.logcontext import ( + LoggingContext, PreserveLoggingContext, preserve_fn, run_in_background, @@ -216,23 +218,34 @@ class Keyring(object): servers have completed. Follows the synapse rules of logcontext preservation. """ + loop_count = 1 while True: wait_on = [ - self.key_downloads[server_name] + (server_name, self.key_downloads[server_name]) for server_name in server_names if server_name in self.key_downloads ] - if wait_on: - with PreserveLoggingContext(): - yield defer.DeferredList(wait_on) - else: + if not wait_on: break + logger.info( + "Waiting for existing lookups for %s to complete [loop %i]", + [w[0] for w in wait_on], loop_count, + ) + with PreserveLoggingContext(): + yield defer.DeferredList((w[1] for w in wait_on)) + + loop_count += 1 + + ctx = LoggingContext.current_context() def rm(r, server_name_): - self.key_downloads.pop(server_name_, None) + with PreserveLoggingContext(ctx): + logger.debug("Releasing key lookup lock on %s", server_name_) + self.key_downloads.pop(server_name_, None) return r for server_name, deferred in server_to_deferred.items(): + logger.debug("Got key lookup lock on %s", server_name) self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name) @@ -432,7 +445,7 @@ class Keyring(object): # an incoming request. query_response = yield self.client.post_json( destination=perspective_name, - path=b"/_matrix/key/v2/query", + path="/_matrix/key/v2/query", data={ u"server_keys": { server_name: { @@ -513,8 +526,8 @@ class Keyring(object): (response, tls_certificate) = yield fetch_server_key( server_name, self.hs.tls_client_options_factory, - path=(b"/_matrix/key/v2/server/%s" % ( - urllib.quote(requested_key_id), + path=("/_matrix/key/v2/server/%s" % ( + urllib.parse.quote(requested_key_id), )).encode("ascii"), ) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 6baeccca38..af3eee95b9 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): creation_event = auth_events.get((EventTypes.Create, ""), None) if not creation_event: - raise SynapseError( + raise AuthError( 403, - "Room %r does not exist" % (event.room_id,) + "No create event in auth events", ) creating_domain = get_domain_from_id(event.room_id) @@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True): if user_level < invite_level: raise AuthError( - 403, ( - "You cannot issue a third party invite for %s." % - (event.content.display_name,) - ) + 403, "You don't have permission to invite users", ) else: logger.debug("Allowing! %s", event) @@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events): if user_level < invite_level: raise AuthError( - 403, "You cannot invite user %s." % target_user_id + 403, "You don't have permission to invite users", ) elif Membership.JOIN == membership: # Joins are valid iff caller == target and they were: diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 51f9084b90..12f1eb0a3e 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -13,13 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +from distutils.util import strtobool + +import six + from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze # Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents -# bugs where we accidentally share e.g. signature dicts. However, converting -# a dict to frozen_dicts is expensive. -USE_FROZEN_DICTS = True +# bugs where we accidentally share e.g. signature dicts. However, converting a +# dict to frozen_dicts is expensive. +# +# NOTE: This is overridden by the configuration by the Synapse worker apps, but +# for the sake of tests, it is set here while it cannot be configured on the +# homeserver object itself. +USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0")) class _EventInternalMetadata(object): @@ -147,6 +156,9 @@ class EventBase(object): def items(self): return list(self._event_dict.items()) + def keys(self): + return six.iterkeys(self._event_dict) + class FrozenEvent(EventBase): def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index c11798093d..b7ad729c63 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -13,17 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from collections import namedtuple import six from twisted.internet import defer +from twisted.internet.defer import DeferredList -from synapse.api.constants import MAX_DEPTH +from synapse.api.constants import MAX_DEPTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.crypto.event_signing import check_event_content_hash from synapse.events import FrozenEvent from synapse.events.utils import prune_event from synapse.http.servlet import assert_params_in_dict +from synapse.types import get_domain_from_id from synapse.util import logcontext, unwrapFirstError logger = logging.getLogger(__name__) @@ -133,34 +136,45 @@ class FederationBase(object): * throws a SynapseError if the signature check failed. The deferreds run their callbacks in the sentinel logcontext. """ - - redacted_pdus = [ - prune_event(pdu) - for pdu in pdus - ] - - deferreds = self.keyring.verify_json_objects_for_server([ - (p.origin, p.get_pdu_json()) - for p in redacted_pdus - ]) + deferreds = _check_sigs_on_pdus(self.keyring, pdus) ctx = logcontext.LoggingContext.current_context() - def callback(_, pdu, redacted): + def callback(_, pdu): with logcontext.PreserveLoggingContext(ctx): if not check_event_content_hash(pdu): - logger.warn( - "Event content has been tampered, redacting %s: %s", - pdu.event_id, pdu.get_pdu_json() - ) - return redacted + # let's try to distinguish between failures because the event was + # redacted (which are somewhat expected) vs actual ball-tampering + # incidents. + # + # This is just a heuristic, so we just assume that if the keys are + # about the same between the redacted and received events, then the + # received event was probably a redacted copy (but we then use our + # *actual* redacted copy to be on the safe side.) + redacted_event = prune_event(pdu) + if ( + set(redacted_event.keys()) == set(pdu.keys()) and + set(six.iterkeys(redacted_event.content)) + == set(six.iterkeys(pdu.content)) + ): + logger.info( + "Event %s seems to have been redacted; using our redacted " + "copy", + pdu.event_id, + ) + else: + logger.warning( + "Event %s content has been tampered, redacting", + pdu.event_id, pdu.get_pdu_json(), + ) + return redacted_event if self.spam_checker.check_event_for_spam(pdu): logger.warn( "Event contains spam, redacting %s: %s", pdu.event_id, pdu.get_pdu_json() ) - return redacted + return prune_event(pdu) return pdu @@ -168,21 +182,121 @@ class FederationBase(object): failure.trap(SynapseError) with logcontext.PreserveLoggingContext(ctx): logger.warn( - "Signature check failed for %s", - pdu.event_id, + "Signature check failed for %s: %s", + pdu.event_id, failure.getErrorMessage(), ) return failure - for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus): + for deferred, pdu in zip(deferreds, pdus): deferred.addCallbacks( callback, errback, - callbackArgs=[pdu, redacted], + callbackArgs=[pdu], errbackArgs=[pdu], ) return deferreds +class PduToCheckSig(namedtuple("PduToCheckSig", [ + "pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds", +])): + pass + + +def _check_sigs_on_pdus(keyring, pdus): + """Check that the given events are correctly signed + + Args: + keyring (synapse.crypto.Keyring): keyring object to do the checks + pdus (Collection[EventBase]): the events to be checked + + Returns: + List[Deferred]: a Deferred for each event in pdus, which will either succeed if + the signatures are valid, or fail (with a SynapseError) if not. + """ + + # (currently this is written assuming the v1 room structure; we'll probably want a + # separate function for checking v2 rooms) + + # we want to check that the event is signed by: + # + # (a) the server which created the event_id + # + # (b) the sender's server. + # + # - except in the case of invites created from a 3pid invite, which are exempt + # from this check, because the sender has to match that of the original 3pid + # invite, but the event may come from a different HS, for reasons that I don't + # entirely grok (why do the senders have to match? and if they do, why doesn't the + # joining server ask the inviting server to do the switcheroo with + # exchange_third_party_invite?). + # + # That's pretty awful, since redacting such an invite will render it invalid + # (because it will then look like a regular invite without a valid signature), + # and signatures are *supposed* to be valid whether or not an event has been + # redacted. But this isn't the worst of the ways that 3pid invites are broken. + # + # let's start by getting the domain for each pdu, and flattening the event back + # to JSON. + pdus_to_check = [ + PduToCheckSig( + pdu=p, + redacted_pdu_json=prune_event(p).get_pdu_json(), + event_id_domain=get_domain_from_id(p.event_id), + sender_domain=get_domain_from_id(p.sender), + deferreds=[], + ) + for p in pdus + ] + + # first make sure that the event is signed by the event_id's domain + deferreds = keyring.verify_json_objects_for_server([ + (p.event_id_domain, p.redacted_pdu_json) + for p in pdus_to_check + ]) + + for p, d in zip(pdus_to_check, deferreds): + p.deferreds.append(d) + + # now let's look for events where the sender's domain is different to the + # event id's domain (normally only the case for joins/leaves), and add additional + # checks. + pdus_to_check_sender = [ + p for p in pdus_to_check + if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu) + ] + + more_deferreds = keyring.verify_json_objects_for_server([ + (p.sender_domain, p.redacted_pdu_json) + for p in pdus_to_check_sender + ]) + + for p, d in zip(pdus_to_check_sender, more_deferreds): + p.deferreds.append(d) + + # replace lists of deferreds with single Deferreds + return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check] + + +def _flatten_deferred_list(deferreds): + """Given a list of one or more deferreds, either return the single deferred, or + combine into a DeferredList. + """ + if len(deferreds) > 1: + return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True) + else: + assert len(deferreds) == 1 + return deferreds[0] + + +def _is_invite_via_3pid(event): + return ( + event.type == EventTypes.Member + and event.membership == Membership.INVITE + and "third_party_invite" in event.content + ) + + def event_from_pdu_json(pdu_json, outlier=False): """Construct a FrozenEvent from an event json received over federation diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c9f3c2d352..d05ed91d64 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -66,6 +66,14 @@ class FederationClient(FederationBase): self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() + self._get_pdu_cache = ExpiringCache( + cache_name="get_pdu_cache", + clock=self._clock, + max_len=1000, + expiry_ms=120 * 1000, + reset_expiry_on_get=False, + ) + def _clear_tried_cache(self): """Clear pdu_destination_tried cache""" now = self._clock.time_msec() @@ -82,17 +90,6 @@ class FederationClient(FederationBase): if destination_dict: self.pdu_destination_tried[event_id] = destination_dict - def start_get_pdu_cache(self): - self._get_pdu_cache = ExpiringCache( - cache_name="get_pdu_cache", - clock=self._clock, - max_len=1000, - expiry_ms=120 * 1000, - reset_expiry_on_get=False, - ) - - self._get_pdu_cache.start() - @log_function def make_query(self, destination, query_type, args, retry_on_dns_fail=False, ignore_backoff=False): @@ -212,8 +209,6 @@ class FederationClient(FederationBase): Will attempt to get the PDU from each destination in the list until one succeeds. - This will persist the PDU locally upon receipt. - Args: destinations (list): Which home servers to query event_id (str): event to fetch @@ -229,10 +224,9 @@ class FederationClient(FederationBase): # TODO: Rate limit the number of times we try and get the same event. - if self._get_pdu_cache: - ev = self._get_pdu_cache.get(event_id) - if ev: - defer.returnValue(ev) + ev = self._get_pdu_cache.get(event_id) + if ev: + defer.returnValue(ev) pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -271,10 +265,10 @@ class FederationClient(FederationBase): event_id, destination, e, ) except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: - logger.info(e.message) + logger.info(str(e)) continue except Exception as e: pdu_attempts[destination] = now @@ -285,7 +279,7 @@ class FederationClient(FederationBase): ) continue - if self._get_pdu_cache is not None and signed_pdu: + if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu defer.returnValue(signed_pdu) @@ -293,8 +287,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks @log_function def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the `current` state PDUs for a given room from - a remote home server. + """Requests all of the room state at a given event from a remote home server. Args: destination (str): The remote homeserver to query for the state. @@ -302,9 +295,10 @@ class FederationClient(FederationBase): event_id (str): The id of the event we want the state at. Returns: - Deferred: Results in a list of PDUs. + Deferred[Tuple[List[EventBase], List[EventBase]]]: + A list of events in the state, and a list of events in the auth chain + for the given event. """ - try: # First we try and ask for just the IDs, as thats far quicker if # we have most of the state and auth_chain already. @@ -510,7 +504,7 @@ class FederationClient(FederationBase): else: logger.warn( "Failed to %s via %s: %i %s", - description, destination, e.code, e.message, + description, destination, e.code, e.args[0], ) except Exception: logger.warn( @@ -875,7 +869,7 @@ class FederationClient(FederationBase): except Exception as e: logger.exception( "Failed to send_third_party_invite via %s: %s", - destination, e.message + destination, str(e) ) raise RuntimeError("Failed to send to any server.") diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3e0cd294a1..819e8f7331 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -46,6 +46,7 @@ from synapse.replication.http.federation import ( from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache +from synapse.util.logcontext import nested_logging_context from synapse.util.logutils import log_function # when processing incoming transactions, we try to handle multiple rooms in @@ -99,7 +100,7 @@ class FederationServer(FederationBase): @defer.inlineCallbacks @log_function - def on_incoming_transaction(self, transaction_data): + def on_incoming_transaction(self, origin, transaction_data): # keep this as early as possible to make the calculated origin ts as # accurate as possible. request_time = self._clock.time_msec() @@ -108,34 +109,33 @@ class FederationServer(FederationBase): if not transaction.transaction_id: raise Exception("Transaction missing transaction_id") - if not transaction.origin: - raise Exception("Transaction missing origin") logger.debug("[%s] Got transaction", transaction.transaction_id) # use a linearizer to ensure that we don't process the same transaction # multiple times in parallel. with (yield self._transaction_linearizer.queue( - (transaction.origin, transaction.transaction_id), + (origin, transaction.transaction_id), )): result = yield self._handle_incoming_transaction( - transaction, request_time, + origin, transaction, request_time, ) defer.returnValue(result) @defer.inlineCallbacks - def _handle_incoming_transaction(self, transaction, request_time): + def _handle_incoming_transaction(self, origin, transaction, request_time): """ Process an incoming transaction and return the HTTP response Args: + origin (unicode): the server making the request transaction (Transaction): incoming transaction request_time (int): timestamp that the HTTP request arrived at Returns: Deferred[(int, object)]: http response code and body """ - response = yield self.transaction_actions.have_responded(transaction) + response = yield self.transaction_actions.have_responded(origin, transaction) if response: logger.debug( @@ -149,7 +149,7 @@ class FederationServer(FederationBase): received_pdus_counter.inc(len(transaction.pdus)) - origin_host, _ = parse_server_name(transaction.origin) + origin_host, _ = parse_server_name(origin) pdus_by_room = {} @@ -188,21 +188,22 @@ class FederationServer(FederationBase): for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - try: - yield self._handle_received_pdu( - transaction.origin, pdu - ) - pdu_results[event_id] = {} - except FederationError as e: - logger.warn("Error handling PDU %s: %s", event_id, e) - pdu_results[event_id] = {"error": str(e)} - except Exception as e: - f = failure.Failure() - pdu_results[event_id] = {"error": str(e)} - logger.error( - "Failed to handle PDU %s: %s", - event_id, f.getTraceback().rstrip(), - ) + with nested_logging_context(event_id): + try: + yield self._handle_received_pdu( + origin, pdu + ) + pdu_results[event_id] = {} + except FederationError as e: + logger.warn("Error handling PDU %s: %s", event_id, e) + pdu_results[event_id] = {"error": str(e)} + except Exception as e: + f = failure.Failure() + pdu_results[event_id] = {"error": str(e)} + logger.error( + "Failed to handle PDU %s: %s", + event_id, f.getTraceback().rstrip(), + ) yield concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), @@ -212,7 +213,7 @@ class FederationServer(FederationBase): if hasattr(transaction, "edus"): for edu in (Edu(**x) for x in transaction.edus): yield self.received_edu( - transaction.origin, + origin, edu.edu_type, edu.content ) @@ -224,6 +225,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response( + origin, transaction, 200, response ) @@ -618,7 +620,7 @@ class FederationServer(FederationBase): ) yield self.handler.on_receive_pdu( - origin, pdu, get_missing=True, sent_to_us_directly=True, + origin, pdu, sent_to_us_directly=True, ) def __str__(self): @@ -838,9 +840,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): ) return self._send_edu( - edu_type=edu_type, - origin=origin, - content=content, + edu_type=edu_type, + origin=origin, + content=content, ) def on_query(self, query_type, args): @@ -851,6 +853,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry): return handler(args) return self._get_query_client( - query_type=query_type, - args=args, + query_type=query_type, + args=args, ) diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 9146215c21..74ffd13b4f 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -36,7 +36,7 @@ class TransactionActions(object): self.store = datastore @log_function - def have_responded(self, transaction): + def have_responded(self, origin, transaction): """ Have we already responded to a transaction with the same id and origin? @@ -50,11 +50,11 @@ class TransactionActions(object): "transaction_id") return self.store.get_received_txn_response( - transaction.transaction_id, transaction.origin + transaction.transaction_id, origin ) @log_function - def set_response(self, transaction, code, response): + def set_response(self, origin, transaction, code, response): """ Persist how we responded to a transaction. Returns: @@ -66,7 +66,7 @@ class TransactionActions(object): return self.store.set_received_txn_response( transaction.transaction_id, - transaction.origin, + origin, code, response, ) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0bb468385d..6f5995735a 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -32,7 +32,7 @@ Events are replicated via a separate events stream. import logging from collections import namedtuple -from six import iteritems, itervalues +from six import iteritems from sortedcontainers import SortedDict @@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object): user_ids = set( user_id - for uids in itervalues(self.presence_changed) + for uids in self.presence_changed.values() for user_id in uids ) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 94d7423d01..98b5950800 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -137,26 +137,6 @@ class TransactionQueue(object): self._processing_pending_presence = False - def can_send_to(self, destination): - """Can we send messages to the given server? - - We can't send messages to ourselves. If we are running on localhost - then we can only federation with other servers running on localhost. - Otherwise we only federate with servers on a public domain. - - Args: - destination(str): The server we are possibly trying to send to. - Returns: - bool: True if we can send to the server. - """ - - if destination == self.server_name: - return False - if self.server_name.startswith("localhost"): - return destination.startswith("localhost") - else: - return not destination.startswith("localhost") - def notify_new_events(self, current_id): """This gets called when we have some new events we might want to send out to other servers. @@ -279,10 +259,7 @@ class TransactionQueue(object): self._order += 1 destinations = set(destinations) - destinations = set( - dest for dest in destinations if self.can_send_to(dest) - ) - + destinations.discard(self.server_name) logger.debug("Sending to: %s", str(destinations)) if not destinations: @@ -358,7 +335,7 @@ class TransactionQueue(object): for destinations, states in hosts_and_states: for destination in destinations: - if not self.can_send_to(destination): + if destination == self.server_name: continue self.pending_presence_by_dest.setdefault( @@ -377,7 +354,8 @@ class TransactionQueue(object): content=content, ) - if not self.can_send_to(destination): + if destination == self.server_name: + logger.info("Not sending EDU to ourselves") return sent_edus_counter.inc() @@ -392,10 +370,8 @@ class TransactionQueue(object): self._attempt_new_transaction(destination) def send_device_messages(self, destination): - if destination == self.server_name or destination == "localhost": - return - - if not self.can_send_to(destination): + if destination == self.server_name: + logger.info("Not sending device update to ourselves") return self._attempt_new_transaction(destination) @@ -463,7 +439,19 @@ class TransactionQueue(object): # pending_transactions flag. pending_pdus = self.pending_pdus_by_dest.pop(destination, []) + + # We can only include at most 50 PDUs per transactions + pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:] + if leftover_pdus: + self.pending_pdus_by_dest[destination] = leftover_pdus + pending_edus = self.pending_edus_by_dest.pop(destination, []) + + # We can only include at most 100 EDUs per transactions + pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:] + if leftover_edus: + self.pending_edus_by_dest[destination] = leftover_edus + pending_presence = self.pending_presence_by_dest.pop(destination, {}) pending_edus.extend( diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1054441ca5..2ab973d6c8 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import urllib + +from six.moves import urllib from twisted.internet import defer @@ -951,4 +952,4 @@ def _create_path(prefix, path, *args): Returns: str """ - return prefix + path % tuple(urllib.quote(arg, "") for arg in args) + return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7a993fd1cf..2f874b4838 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -90,8 +90,8 @@ class Authenticator(object): @defer.inlineCallbacks def authenticate_request(self, request, content): json_request = { - "method": request.method, - "uri": request.uri, + "method": request.method.decode('ascii'), + "uri": request.uri.decode('ascii'), "destination": self.server_name, "signatures": {}, } @@ -252,7 +252,7 @@ class BaseFederationServlet(object): by the callback method. None if the request has already been handled. """ content = None - if request.method in ["PUT", "POST"]: + if request.method in [b"PUT", b"POST"]: # TODO: Handle other method types? other content types? content = parse_json_object_from_request(request) @@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet): try: code, response = yield self.handler.on_incoming_transaction( - transaction_data + origin, transaction_data, ) except Exception: logger.exception("on_incoming_transaction failed") @@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet): return self.handler.on_context_state_request( origin, context, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet): return self.handler.on_state_ids_request( origin, room_id, - query.get("event_id", [None])[0], + parse_string_from_args(query, "event_id", None), ) @@ -405,14 +405,12 @@ class FederationBackfillServlet(BaseFederationServlet): PATH = "/backfill/(?P[^/]*)/" def on_GET(self, origin, content, query, context): - versions = query["v"] - limits = query["limit"] + versions = [x.decode('ascii') for x in query[b"v"]] + limit = parse_integer_from_args(query, "limit", None) - if not limits: + if not limit: return defer.succeed((400, {"error": "Did not include limit param"})) - limit = int(limits[-1]) - return self.handler.on_backfill_request(origin, context, versions, limit) @@ -423,7 +421,7 @@ class FederationQueryServlet(BaseFederationServlet): def on_GET(self, origin, content, query, query_type): return self.handler.on_query_request( query_type, - {k: v[0].decode("utf-8") for k, v in query.items()} + {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()} ) @@ -630,14 +628,14 @@ class OpenIdUserInfo(BaseFederationServlet): @defer.inlineCallbacks def on_GET(self, origin, content, query): - token = query.get("access_token", [None])[0] + token = query.get(b"access_token", [None])[0] if token is None: defer.returnValue((401, { "errcode": "M_MISSING_TOKEN", "error": "Access Token required" })) return - user_id = yield self.handler.on_openid_userinfo(token) + user_id = yield self.handler.on_openid_userinfo(token.decode('ascii')) if user_id is None: defer.returnValue((401, { diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4a81bd2ba9..2a5eab124f 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -895,22 +895,24 @@ class AuthHandler(BaseHandler): Args: password (unicode): Password to hash. - stored_hash (unicode): Expected hash value. + stored_hash (bytes): Expected hash value. Returns: Deferred(bool): Whether self.hash(password) == stored_hash. """ - def _do_validate_hash(): # Normalise the Unicode in the password pw = unicodedata.normalize("NFKC", password) return bcrypt.checkpw( pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"), - stored_hash.encode('utf8') + stored_hash ) if stored_hash: + if not isinstance(stored_hash, bytes): + stored_hash = stored_hash.encode('ascii') + return make_deferred_yieldable( threads.deferToThreadPool( self.hs.get_reactor(), diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index ef866da1b6..18741c5fac 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -20,7 +20,14 @@ import string from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + CodeMessageException, + Codes, + NotFoundError, + StoreError, + SynapseError, +) from synapse.types import RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler @@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler): def delete_association(self, requester, user_id, room_alias): # association deletion for human users - can_delete = yield self._user_can_delete_alias(room_alias, user_id) + try: + can_delete = yield self._user_can_delete_alias(room_alias, user_id) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown room alias") + raise + if not can_delete: raise AuthError( 403, "You don't have permission to delete the alias.", @@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler): def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) - if creator and creator == user_id: + if creator is not None and creator == user_id: defer.returnValue(True) is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 5816bf8b4f..9dc46aa15f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -330,7 +330,8 @@ class E2eKeysHandler(object): (algorithm, key_id, ex_json, key) ) else: - new_keys.append((algorithm, key_id, encode_canonical_json(key))) + new_keys.append(( + algorithm, key_id, encode_canonical_json(key).decode('ascii'))) yield self.store.add_e2e_one_time_keys( user_id, device_id, time_now, new_keys @@ -340,7 +341,7 @@ class E2eKeysHandler(object): def _exception_to_failure(e): if isinstance(e, CodeMessageException): return { - "status": e.code, "message": e.message, + "status": e.code, "message": str(e), } if isinstance(e, NotRetryingDestination): @@ -358,7 +359,7 @@ def _exception_to_failure(e): # Note that some Exceptions (notably twisted's ResponseFailed etc) don't # give a string for e.message, which json then fails to serialize. return { - "status": 503, "message": str(e.message), + "status": 503, "message": str(e), } diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0ebf0fd188..45d955e6f5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -18,7 +18,6 @@ import itertools import logging -import sys import six from six import iteritems, itervalues @@ -69,6 +68,27 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +def shortstr(iterable, maxitems=5): + """If iterable has maxitems or fewer, return the stringification of a list + containing those items. + + Otherwise, return the stringification of a a list with the first maxitems items, + followed by "...". + + Args: + iterable (Iterable): iterable to truncate + maxitems (int): number of items to return before truncating + + Returns: + unicode + """ + + items = list(itertools.islice(iterable, maxitems + 1)) + if len(items) <= maxitems: + return str(items) + return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]" + + class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: @@ -85,7 +105,7 @@ class FederationHandler(BaseHandler): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastore() # type: synapse.storage.DataStore self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self.server_name = hs.hostname @@ -114,9 +134,8 @@ class FederationHandler(BaseHandler): self._room_pdu_linearizer = Linearizer("fed_room_pdu") @defer.inlineCallbacks - @log_function def on_receive_pdu( - self, origin, pdu, get_missing=True, sent_to_us_directly=False, + self, origin, pdu, sent_to_us_directly=False, ): """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events @@ -125,14 +144,23 @@ class FederationHandler(BaseHandler): origin (str): server which initiated the /send/ transaction. Will be used to fetch missing events or state. pdu (FrozenEvent): received PDU - get_missing (bool): True if we should fetch missing prev_events + sent_to_us_directly (bool): True if this event was pushed to us; False if + we pulled it as the result of a missing prev_event. Returns (Deferred): completes with None """ + room_id = pdu.room_id + event_id = pdu.event_id + + logger.info( + "[%s %s] handling received PDU: %s", + room_id, event_id, pdu, + ) + # We reprocess pdus when we have seen them only as outliers existing = yield self.store.get_event( - pdu.event_id, + event_id, allow_none=True, allow_rejected=True, ) @@ -147,7 +175,7 @@ class FederationHandler(BaseHandler): ) ) if already_seen: - logger.debug("Already seen pdu %s", pdu.event_id) + logger.debug("[%s %s]: Already seen pdu", room_id, event_id) return # do some initial sanity-checking of the event. In particular, make @@ -156,6 +184,7 @@ class FederationHandler(BaseHandler): try: self._sanity_check_event(pdu) except SynapseError as err: + logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id) raise FederationError( "ERROR", err.code, @@ -165,10 +194,12 @@ class FederationHandler(BaseHandler): # If we are currently in the process of joining this room, then we # queue up events for later processing. - if pdu.room_id in self.room_queues: - logger.info("Ignoring PDU %s for room %s from %s for now; join " - "in progress", pdu.event_id, pdu.room_id, origin) - self.room_queues[pdu.room_id].append((pdu, origin)) + if room_id in self.room_queues: + logger.info( + "[%s %s] Queuing PDU from %s for now: join in progress", + room_id, event_id, origin, + ) + self.room_queues[room_id].append((pdu, origin)) return # If we're no longer in the room just ditch the event entirely. This @@ -179,7 +210,7 @@ class FederationHandler(BaseHandler): # we should check if we *are* in fact in the room. If we are then we # can magically rejoin the room. is_in_room = yield self.auth.check_host_in_room( - pdu.room_id, + room_id, self.server_name ) if not is_in_room: @@ -188,8 +219,8 @@ class FederationHandler(BaseHandler): ) if was_in_room: logger.info( - "Ignoring PDU %s for room %s from %s as we've left the room!", - pdu.event_id, pdu.room_id, origin, + "[%s %s] Ignoring PDU from %s as we've left the room", + room_id, event_id, origin, ) defer.returnValue(None) @@ -204,8 +235,8 @@ class FederationHandler(BaseHandler): ) logger.debug( - "_handle_new_pdu min_depth for %s: %d", - pdu.room_id, min_depth + "[%s %s] min_depth: %d", + room_id, event_id, min_depth, ) prevs = {e_id for e_id, _ in pdu.prev_events} @@ -218,17 +249,18 @@ class FederationHandler(BaseHandler): # send to the clients. pdu.internal_metadata.outlier = True elif min_depth and pdu.depth > min_depth: - if get_missing and prevs - seen: + missing_prevs = prevs - seen + if sent_to_us_directly and missing_prevs: # If we're missing stuff, ensure we only fetch stuff one # at a time. logger.info( - "Acquiring lock for room %r to fetch %d missing events: %r...", - pdu.room_id, len(prevs - seen), list(prevs - seen)[:5], + "[%s %s] Acquiring room lock to fetch %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) with (yield self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( - "Acquired lock for room %r to fetch %d missing events", - pdu.room_id, len(prevs - seen), + "[%s %s] Acquired room lock to fetch %d missing prev_events", + room_id, event_id, len(missing_prevs), ) yield self._get_missing_events_for_pdu( @@ -241,69 +273,150 @@ class FederationHandler(BaseHandler): if not prevs - seen: logger.info( - "Found all missing prev events for %s", pdu.event_id + "[%s %s] Found all missing prev_events", + room_id, event_id, ) - elif prevs - seen: + elif missing_prevs: logger.info( - "Not fetching %d missing events for room %r,event %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, - list(prevs - seen)[:5], + "[%s %s] Not recursively fetching %d missing prev_events: %s", + room_id, event_id, len(missing_prevs), shortstr(missing_prevs), ) - if sent_to_us_directly and prevs - seen: - # If they have sent it to us directly, and the server - # isn't telling us about the auth events that it's - # made a message referencing, we explode - raise FederationError( - "ERROR", - 403, - ( - "Your server isn't divulging details about prev_events " - "referenced in this event." - ), - affected=pdu.event_id, - ) - elif prevs - seen: - # Calculate the state of the previous events, and - # de-conflict them to find the current state. - state_groups = [] + if prevs - seen: + # We've still not been able to get all of the prev_events for this event. + # + # In this case, we need to fall back to asking another server in the + # federation for the state at this event. That's ok provided we then + # resolve the state against other bits of the DAG before using it (which + # will ensure that you can't just take over a room by sending an event, + # withholding its prev_events, and declaring yourself to be an admin in + # the subsequent state request). + # + # Now, if we're pulling this event as a missing prev_event, then clearly + # this event is not going to become the only forward-extremity and we are + # guaranteed to resolve its state against our existing forward + # extremities, so that should be fine. + # + # On the other hand, if this event was pushed to us, it is possible for + # it to become the only forward-extremity in the room, and we would then + # trust its state to be the state for the whole room. This is very bad. + # Further, if the event was pushed to us, there is no excuse for us not to + # have all the prev_events. We therefore reject any such events. + # + # XXX this really feels like it could/should be merged with the above, + # but there is an interaction with min_depth that I'm not really + # following. + + if sent_to_us_directly: + logger.warn( + "[%s %s] Failed to fetch %d prev events: rejecting", + room_id, event_id, len(prevs - seen), + ) + raise FederationError( + "ERROR", + 403, + ( + "Your server isn't divulging details about prev_events " + "referenced in this event." + ), + affected=pdu.event_id, + ) + + # Calculate the state after each of the previous events, and + # resolve them to find the correct state at the current event. auth_chains = set() + event_map = { + event_id: pdu, + } try: # Get the state of the events we know about - ours = yield self.store.get_state_groups(pdu.room_id, list(seen)) - state_groups.append(ours) + ours = yield self.store.get_state_groups_ids(room_id, seen) + + # state_maps is a list of mappings from (type, state_key) to event_id + # type: list[dict[tuple[str, str], str]] + state_maps = list(ours.values()) + + # we don't need this any more, let's delete it. + del ours # Ask the remote server for the states we don't # know about for p in prevs - seen: - state, got_auth_chain = ( - yield self.federation_client.get_state_for_room( - origin, pdu.room_id, p - ) + logger.info( + "[%s %s] Requesting state at missing prev_event %s", + room_id, event_id, p, ) - auth_chains.update(got_auth_chain) - state_group = {(x.type, x.state_key): x.event_id for x in state} - state_groups.append(state_group) + + with logcontext.nested_logging_context(p): + # note that if any of the missing prevs share missing state or + # auth events, the requests to fetch those events are deduped + # by the get_pdu_cache in federation_client. + remote_state, got_auth_chain = ( + yield self.federation_client.get_state_for_room( + origin, room_id, p, + ) + ) + + # we want the state *after* p; get_state_for_room returns the + # state *before* p. + remote_event = yield self.federation_client.get_pdu( + [origin], p, outlier=True, + ) + + if remote_event is None: + raise Exception( + "Unable to get missing prev_event %s" % (p, ) + ) + + if remote_event.is_state(): + remote_state.append(remote_event) + + # XXX hrm I'm not convinced that duplicate events will compare + # for equality, so I'm not sure this does what the author + # hoped. + auth_chains.update(got_auth_chain) + + remote_state_map = { + (x.type, x.state_key): x.event_id for x in remote_state + } + state_maps.append(remote_state_map) + + for x in remote_state: + event_map[x.event_id] = x # Resolve any conflicting state + @defer.inlineCallbacks def fetch(ev_ids): - return self.store.get_events( - ev_ids, get_prev_content=False, check_redacted=False + fetched = yield self.store.get_events( + ev_ids, get_prev_content=False, check_redacted=False, ) + # add any events we fetch here to the `event_map` so that we + # can use them to build the state event list below. + event_map.update(fetched) + defer.returnValue(fetched) - room_version = yield self.store.get_room_version(pdu.room_id) + room_version = yield self.store.get_room_version(room_id) state_map = yield resolve_events_with_factory( - room_version, state_groups, {pdu.event_id: pdu}, fetch + room_version, state_maps, event_map, fetch, ) - state = (yield self.store.get_events(state_map.values())).values() + # we need to give _process_received_pdu the actual state events + # rather than event ids, so generate that now. + state = [ + event_map[e] for e in six.itervalues(state_map) + ] auth_chain = list(auth_chains) except Exception: + logger.warn( + "[%s %s] Error attempting to resolve state at missing " + "prev_events", + room_id, event_id, exc_info=True, + ) raise FederationError( "ERROR", 403, "We can't get valid state history.", - affected=pdu.event_id, + affected=event_id, ) yield self._process_received_pdu( @@ -322,15 +435,16 @@ class FederationHandler(BaseHandler): prevs (set(str)): List of event ids which we are missing min_depth (int): Minimum depth of events to return. """ - # We recalculate seen, since it may have changed. + + room_id = pdu.room_id + event_id = pdu.event_id + seen = yield self.store.have_seen_events(prevs) if not prevs - seen: return - latest = yield self.store.get_latest_event_ids_in_room( - pdu.room_id - ) + latest = yield self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us @@ -338,8 +452,8 @@ class FederationHandler(BaseHandler): latest |= seen logger.info( - "Missing %d events for room %r pdu %s: %r...", - len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5] + "[%s %s]: Requesting %d prev_events: %s", + room_id, event_id, len(prevs - seen), shortstr(prevs - seen) ) # XXX: we set timeout to 10s to help workaround @@ -360,49 +474,88 @@ class FederationHandler(BaseHandler): # apparently. # # see https://github.com/matrix-org/synapse/pull/1744 + # + # ---- + # + # Update richvdh 2018/09/18: There are a number of problems with timing this + # request out agressively on the client side: + # + # - it plays badly with the server-side rate-limiter, which starts tarpitting you + # if you send too many requests at once, so you end up with the server carefully + # working through the backlog of your requests, which you have already timed + # out. + # + # - for this request in particular, we now (as of + # https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the + # server can't produce a plausible-looking set of prev_events - so we becone + # much more likely to reject the event. + # + # - contrary to what it says above, we do *not* fall back to fetching fresh state + # for the room if get_missing_events times out. Rather, we give up processing + # the PDU whose prevs we are missing, which then makes it much more likely that + # we'll end up back here for the *next* PDU in the list, which exacerbates the + # problem. + # + # - the agressive 10s timeout was introduced to deal with incoming federation + # requests taking 8 hours to process. It's not entirely clear why that was going + # on; certainly there were other issues causing traffic storms which are now + # resolved, and I think in any case we may be more sensible about our locking + # now. We're *certainly* more sensible about our logging. + # + # All that said: Let's try increasing the timout to 60s and see what happens. missing_events = yield self.federation_client.get_missing_events( origin, - pdu.room_id, + room_id, earliest_events_ids=list(latest), latest_events=[pdu], limit=10, min_depth=min_depth, - timeout=10000, + timeout=60000, ) logger.info( - "Got %d events: %r...", - len(missing_events), [e.event_id for e in missing_events[:5]] + "[%s %s]: Got %d prev_events: %s", + room_id, event_id, len(missing_events), shortstr(missing_events), ) # We want to sort these by depth so we process them and # tell clients about them in order. missing_events.sort(key=lambda x: x.depth) - for e in missing_events: - logger.info("Handling found event %s", e.event_id) - try: - yield self.on_receive_pdu( - origin, - e, - get_missing=False - ) - except FederationError as e: - if e.code == 403: - logger.warn("Event %s failed history check.") - else: - raise + for ev in missing_events: + logger.info( + "[%s %s] Handling received prev_event %s", + room_id, event_id, ev.event_id, + ) + with logcontext.nested_logging_context(ev.event_id): + try: + yield self.on_receive_pdu( + origin, + ev, + sent_to_us_directly=False, + ) + except FederationError as e: + if e.code == 403: + logger.warn( + "[%s %s] Received prev_event %s failed history check.", + room_id, event_id, ev.event_id, + ) + else: + raise - @log_function @defer.inlineCallbacks - def _process_received_pdu(self, origin, pdu, state, auth_chain): + def _process_received_pdu(self, origin, event, state, auth_chain): """ Called when we have a new pdu. We need to do auth checks and put it through the StateHandler. """ - event = pdu + room_id = event.room_id + event_id = event.event_id - logger.debug("Processing event: %s", event) + logger.debug( + "[%s %s] Processing event: %s", + room_id, event_id, event, + ) # FIXME (erikj): Awful hack to make the case where we are not currently # in the room work @@ -411,15 +564,16 @@ class FederationHandler(BaseHandler): # event. if state and auth_chain and not event.internal_metadata.is_outlier(): is_in_room = yield self.auth.check_host_in_room( - event.room_id, + room_id, self.server_name ) else: is_in_room = True + if not is_in_room: logger.info( - "Got event for room we're not in: %r %r", - event.room_id, event.event_id + "[%s %s] Got event for room we're not in", + room_id, event_id, ) try: @@ -431,7 +585,7 @@ class FederationHandler(BaseHandler): "ERROR", e.code, e.msg, - affected=event.event_id, + affected=event_id, ) else: @@ -464,6 +618,10 @@ class FederationHandler(BaseHandler): }) seen_ids.add(e.event_id) + logger.info( + "[%s %s] persisting newly-received auth/state events %s", + room_id, event_id, [e["event"].event_id for e in event_infos] + ) yield self._handle_new_events(origin, event_infos) try: @@ -480,12 +638,12 @@ class FederationHandler(BaseHandler): affected=event.event_id, ) - room = yield self.store.get_room(event.room_id) + room = yield self.store.get_room(room_id) if not room: try: yield self.store.store_room( - room_id=event.room_id, + room_id=room_id, room_creator_user_id="", is_public=False, ) @@ -513,7 +671,7 @@ class FederationHandler(BaseHandler): if newly_joined: user = UserID.from_string(event.state_key) - yield self.user_joined_room(user, event.room_id) + yield self.user_joined_room(user, room_id) @log_function @defer.inlineCallbacks @@ -594,7 +752,7 @@ class FederationHandler(BaseHandler): required_auth = set( a_id - for event in events + state_events.values() + auth_events.values() + for event in events + list(state_events.values()) + list(auth_events.values()) for a_id, _ in event.auth_events ) auth_events.update({ @@ -802,7 +960,7 @@ class FederationHandler(BaseHandler): ) continue except NotRetryingDestination as e: - logger.info(e.message) + logger.info(str(e)) continue except FederationDeniedError as e: logger.info(e) @@ -1027,7 +1185,8 @@ class FederationHandler(BaseHandler): try: logger.info("Processing queued PDU %s which was received " "while we were joining %s", p.event_id, p.room_id) - yield self.on_receive_pdu(origin, p) + with logcontext.nested_logging_context(p.event_id): + yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warn( "Error handling queued PDU %s from %s: %s", @@ -1358,7 +1517,7 @@ class FederationHandler(BaseHandler): ) if state_groups: - _, state = state_groups.items().pop() + _, state = list(state_groups.items()).pop() results = state if event.is_state(): @@ -1430,12 +1589,10 @@ class FederationHandler(BaseHandler): else: defer.returnValue(None) - @log_function def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @defer.inlineCallbacks - @log_function def _handle_new_event(self, origin, event, state=None, auth_events=None, backfilled=False): context = yield self._prep_event( @@ -1444,6 +1601,9 @@ class FederationHandler(BaseHandler): auth_events=auth_events, ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: if not event.internal_metadata.is_outlier() and not backfilled: yield self.action_generator.handle_push_actions_for_event( @@ -1454,15 +1614,13 @@ class FederationHandler(BaseHandler): [(event, context)], backfilled=backfilled, ) - except: # noqa: E722, as we reraise the exception this is fine. - tp, value, tb = sys.exc_info() - - logcontext.run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - - six.reraise(tp, value, tb) + success = True + finally: + if not success: + logcontext.run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) defer.returnValue(context) @@ -1475,15 +1633,22 @@ class FederationHandler(BaseHandler): Notifies about the events where appropriate. """ - contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - logcontext.run_in_background( - self._prep_event, + + @defer.inlineCallbacks + def prep(ev_info): + event = ev_info["event"] + with logcontext.nested_logging_context(suffix=event.event_id): + res = yield self._prep_event( origin, - ev_info["event"], + event, state=ev_info.get("state"), auth_events=ev_info.get("auth_events"), ) + defer.returnValue(res) + + contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults( + [ + logcontext.run_in_background(prep, ev_info) for ev_info in event_infos ], consumeErrors=True, )) @@ -1635,8 +1800,8 @@ class FederationHandler(BaseHandler): ) except AuthError as e: logger.warn( - "Rejecting %s because %s", - event.event_id, e.msg + "[%s %s] Rejecting: %s", + event.room_id, event.event_id, e.msg ) context.rejected = RejectedReason.AUTH_ERROR @@ -1831,7 +1996,7 @@ class FederationHandler(BaseHandler): room_version = yield self.store.get_room_version(event.room_id) - new_state = self.state_handler.resolve_events( + new_state = yield self.state_handler.resolve_events( room_version, [list(local_view.values()), list(remote_view.values())], event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e484061cc0..4954b23a0d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -14,9 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import sys -import six from six import iteritems, itervalues, string_types from canonicaljson import encode_canonical_json, json @@ -624,6 +622,9 @@ class EventCreationHandler(object): event, context ) + # reraise does not allow inlineCallbacks to preserve the stacktrace, so we + # hack around with a try/finally instead. + success = False try: # If we're a worker we need to hit out to the master. if self.config.worker_app: @@ -636,6 +637,7 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) + success = True return yield self.persist_and_notify_client_event( @@ -645,17 +647,16 @@ class EventCreationHandler(object): ratelimit=ratelimit, extra_users=extra_users, ) - except: # noqa: E722, as we reraise the exception this is fine. - # Ensure that we actually remove the entries in the push actions - # staging area, if we calculated them. - tp, value, tb = sys.exc_info() - run_in_background( - self.store.remove_push_actions_from_staging, - event.event_id, - ) - - six.reraise(tp, value, tb) + success = True + finally: + if not success: + # Ensure that we actually remove the entries in the push actions + # staging area, if we calculated them. + run_in_background( + self.store.remove_push_actions_from_staging, + event.event_id, + ) @defer.inlineCallbacks def persist_and_notify_client_event( diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5170d093e3..a155b6e938 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -269,14 +269,7 @@ class PaginationHandler(object): if state_ids: state = yield self.store.get_events(list(state_ids.values())) - - if state: - state = yield filter_events_for_client( - self.store, - user_id, - state.values(), - is_peeking=(member_event_id is None), - ) + state = state.values() time_now = self.clock.time_msec() diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 75b8b7ce6a..1dfbde84fd 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get displayname") raise - except Exception: - logger.exception("Failed to get displayname") - else: - defer.returnValue(result["displayname"]) + + defer.returnValue(result["displayname"]) @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler): if e.code != 404: logger.exception("Failed to get avatar_url") raise - except Exception: - logger.exception("Failed to get avatar_url") defer.returnValue(result["avatar_url"]) @@ -278,7 +274,7 @@ class BaseProfileHandler(BaseHandler): except Exception as e: logger.warn( "Failed to update join event for room %s - %s", - room_id, str(e.message) + room_id, str(e) ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f03ee1476b..da914c46ff 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -125,6 +125,7 @@ class RegistrationHandler(BaseHandler): guest_access_token=None, make_guest=False, admin=False, + threepid=None, ): """Registers a new client on the server. @@ -145,7 +146,7 @@ class RegistrationHandler(BaseHandler): RegistrationError if there was a problem registering. """ - yield self.auth.check_auth_blocking() + yield self.auth.check_auth_blocking(threepid=threepid) password_hash = None if password: password_hash = yield self.auth_handler().hash(password) @@ -533,4 +534,5 @@ class RegistrationHandler(BaseHandler): room_id=room_id, remote_room_hosts=remote_room_hosts, action="join", + ratelimit=False, ) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 37e41afd61..38e1737ec9 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler): # Filter out rooms that we don't want to return rooms_to_scan = [ r for r in sorted_rooms - if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 + if r not in newly_unpublished and rooms_to_num_joined[r] > 0 ] total_room_count = len(rooms_to_scan) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index f643619047..07fd3e82fc 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -583,6 +583,11 @@ class RoomMemberHandler(object): room_id = mapping["room_id"] servers = mapping["servers"] + # put the server which owns the alias at the front of the server list. + if room_alias.domain in servers: + servers.remove(room_alias.domain) + servers.insert(0, room_alias.domain) + defer.returnValue((RoomID.from_string(room_id), servers)) @defer.inlineCallbacks diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index c464adbd0b..0c1d52fd11 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -54,7 +54,7 @@ class SearchHandler(BaseHandler): batch_token = None if batch: try: - b = decode_base64(batch) + b = decode_base64(batch).decode('ascii') batch_group, batch_group_key, batch_token = b.split("\n") assert batch_group is not None @@ -258,18 +258,18 @@ class SearchHandler(BaseHandler): # it returns more from the same group (if applicable) rather # than reverting to searching all results again. if batch_group and batch_group_key: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( batch_group, batch_group_key, pagination_token - )) + )).encode('ascii')) else: - global_next_batch = encode_base64("%s\n%s\n%s" % ( + global_next_batch = encode_base64(("%s\n%s\n%s" % ( "all", "", pagination_token - )) + )).encode('ascii')) for room_id, group in room_groups.items(): - group["next_batch"] = encode_base64("%s\n%s\n%s" % ( + group["next_batch"] = encode_base64(("%s\n%s\n%s" % ( "room_id", room_id, pagination_token - )) + )).encode('ascii')) allowed_events.extend(room_events) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 648debc8aa..67b8ca28c7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -24,6 +24,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user +from synapse.storage.roommember import MemberSummary from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -525,6 +526,8 @@ class SyncHandler(object): A deferred dict describing the room summary """ + # FIXME: we could/should get this from room_stats when matthew/stats lands + # FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305 last_events, _ = yield self.store.get_recent_event_ids_for_room( room_id, end_token=now_token.room_key, limit=1, @@ -537,44 +540,67 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( last_event.event_id, [ - (EventTypes.Member, None), (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), ] ) - member_ids = { - state_key: event_id - for (t, state_key), event_id in state_ids.iteritems() - if t == EventTypes.Member - } + # this is heavily cached, thus: fast. + details = yield self.store.get_room_summary(room_id) + name_id = state_ids.get((EventTypes.Name, '')) canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, '')) summary = {} - - # FIXME: it feels very heavy to load up every single membership event - # just to calculate the counts. - member_events = yield self.store.get_events(member_ids.values()) - - joined_user_ids = [] - invited_user_ids = [] - - for ev in member_events.values(): - if ev.content.get("membership") == Membership.JOIN: - joined_user_ids.append(ev.state_key) - elif ev.content.get("membership") == Membership.INVITE: - invited_user_ids.append(ev.state_key) + empty_ms = MemberSummary([], 0) # TODO: only send these when they change. - summary["m.joined_member_count"] = len(joined_user_ids) - summary["m.invited_member_count"] = len(invited_user_ids) + summary["m.joined_member_count"] = ( + details.get(Membership.JOIN, empty_ms).count + ) + summary["m.invited_member_count"] = ( + details.get(Membership.INVITE, empty_ms).count + ) - if name_id or canonical_alias_id: - defer.returnValue(summary) + # if the room has a name or canonical_alias set, we can skip + # calculating heroes. we assume that if the event has contents, it'll + # be a valid name or canonical_alias - i.e. we're checking that they + # haven't been "deleted" by blatting {} over the top. + if name_id: + name = yield self.store.get_event(name_id, allow_none=True) + if name and name.content: + defer.returnValue(summary) - # FIXME: order by stream ordering, not alphabetic + if canonical_alias_id: + canonical_alias = yield self.store.get_event( + canonical_alias_id, allow_none=True, + ) + if canonical_alias and canonical_alias.content: + defer.returnValue(summary) + joined_user_ids = [ + r[0] for r in details.get(Membership.JOIN, empty_ms).members + ] + invited_user_ids = [ + r[0] for r in details.get(Membership.INVITE, empty_ms).members + ] + gone_user_ids = ( + [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + + [r[0] for r in details.get(Membership.BAN, empty_ms).members] + ) + + # FIXME: only build up a member_ids list for our heroes + member_ids = {} + for membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.LEAVE, + Membership.BAN + ): + for user_id, event_id in details.get(membership, empty_ms).members: + member_ids[user_id] = event_id + + # FIXME: order by stream ordering rather than as returned by SQL me = sync_config.user.to_string() if (joined_user_ids or invited_user_ids): summary['m.heroes'] = sorted( @@ -586,7 +612,11 @@ class SyncHandler(object): )[0:5] else: summary['m.heroes'] = sorted( - [user_id for user_id in member_ids.keys() if user_id != me] + [ + user_id + for user_id in gone_user_ids + if user_id != me + ] )[0:5] if not sync_config.filter_collection.lazy_load_members(): @@ -692,6 +722,13 @@ class SyncHandler(object): } if full_state: + if lazy_load_members: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + types.append((EventTypes.Member, sync_config.user.to_string())) + if batch: current_state_ids = yield self.store.get_state_ids_for_event( batch.events[-1].event_id, types=types, @@ -719,6 +756,26 @@ class SyncHandler(object): lazy_load_members=lazy_load_members, ) elif batch.limited: + state_at_timeline_start = yield self.store.get_state_ids_for_event( + batch.events[0].event_id, types=types, + filtered_types=filtered_types, + ) + + # for now, we disable LL for gappy syncs - see + # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 + # N.B. this slows down incr syncs as we are now processing way + # more state in the server than if we were LLing. + # + # We still have to filter timeline_start to LL entries (above) in order + # for _calculate_state's LL logic to work, as we have to include LL + # members for timeline senders in case they weren't loaded in the initial + # sync. We do this by (counterintuitively) by filtering timeline_start + # members to just be ones which were timeline senders, which then ensures + # all of the rest get included in the state block (if we need to know + # about them). + types = None + filtered_types = None + state_at_previous_sync = yield self.get_state_at( room_id, stream_position=since_token, types=types, filtered_types=filtered_types, @@ -729,25 +786,29 @@ class SyncHandler(object): filtered_types=filtered_types, ) - state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, - ) - state_ids = _calculate_state( timeline_contains=timeline_state, timeline_start=state_at_timeline_start, previous=state_at_previous_sync, current=current_state_ids, + # we have to include LL members in case LL initial sync missed them lazy_load_members=lazy_load_members, ) else: state_ids = {} if lazy_load_members: - if types: + if types and batch.events: + # We're returning an incremental sync, with no + # "gap" since the previous sync, so normally there would be + # no state to return. + # But we're lazy-loading, so the client might need some more + # member events to understand the events in this timeline. + # So we fish out all the member events corresponding to the + # timeline here, and then dedupe any redundant ones below. + state_ids = yield self.store.get_state_ids_for_event( batch.events[0].event_id, types=types, - filtered_types=filtered_types, + filtered_types=None, # we only want members! ) if lazy_load_members and not include_redundant_members: @@ -767,7 +828,7 @@ class SyncHandler(object): logger.debug("filtering state from %r...", state_ids) state_ids = { t: event_id - for t, event_id in state_ids.iteritems() + for t, event_id in iteritems(state_ids) if cache.get(t[1]) != event_id } logger.debug("...to %r", state_ids) @@ -1568,6 +1629,19 @@ class SyncHandler(object): newly_joined_room=newly_joined, ) + # When we join the room (or the client requests full_state), we should + # send down any existing tags. Usually the user won't have tags in a + # newly joined room, unless either a) they've joined before or b) the + # tag was added by synapse e.g. for server notice rooms. + if full_state: + user_id = sync_result_builder.sync_config.user.to_string() + tags = yield self.store.get_tags_for_room(user_id, room_id) + + # If there aren't any tags, don't send the empty tags list down + # sync + if not tags: + tags = None + account_data_events = [] if tags is not None: account_data_events.append({ @@ -1596,10 +1670,24 @@ class SyncHandler(object): ) summary = {} + + # we include a summary in room responses when we're lazy loading + # members (as the client otherwise doesn't have enough info to form + # the name itself). if ( sync_config.filter_collection.lazy_load_members() and ( + # we recalulate the summary: + # if there are membership changes in the timeline, or + # if membership has changed during a gappy sync, or + # if this is an initial sync. any(ev.type == EventTypes.Member for ev in batch.events) or + ( + # XXX: this may include false positives in the form of LL + # members which have snuck into state + batch.limited and + any(t == EventTypes.Member for (t, k) in state) + ) or since_token is None ) ): @@ -1629,6 +1717,16 @@ class SyncHandler(object): unread_notifications["highlight_count"] = notifs["highlight_count"] sync_result_builder.joined.append(room_sync) + + if batch.limited and since_token: + user_id = sync_result_builder.sync_config.user.to_string() + logger.info( + "Incremental gappy sync of %s for user %s with %d state events" % ( + room_id, + user_id, + len(state), + ) + ) elif room_builder.rtype == "archived": room_sync = ArchivedSyncResult( room_id=room_id, @@ -1722,17 +1820,17 @@ def _calculate_state( event_id_to_key = { e: key for key, e in itertools.chain( - timeline_contains.items(), - previous.items(), - timeline_start.items(), - current.items(), + iteritems(timeline_contains), + iteritems(previous), + iteritems(timeline_start), + iteritems(current), ) } - c_ids = set(e for e in current.values()) - ts_ids = set(e for e in timeline_start.values()) - p_ids = set(e for e in previous.values()) - tc_ids = set(e for e in timeline_contains.values()) + c_ids = set(e for e in itervalues(current)) + ts_ids = set(e for e in itervalues(timeline_start)) + p_ids = set(e for e in itervalues(previous)) + tc_ids = set(e for e in itervalues(timeline_contains)) # If we are lazyloading room members, we explicitly add the membership events # for the senders in the timeline into the state block returned by /sync, @@ -1746,7 +1844,7 @@ def _calculate_state( if lazy_load_members: p_ids.difference_update( - e for t, e in timeline_start.iteritems() + e for t, e in iteritems(timeline_start) if t[0] == EventTypes.Member ) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 2d2d3d5a0d..c610933dd4 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.types import UserID, get_domain_from_id +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.logcontext import run_in_background from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -68,6 +69,11 @@ class TypingHandler(object): # map room IDs to sets of users currently typing self._room_typing = {} + # caches which room_ids changed at which serials + self._typing_stream_change_cache = StreamChangeCache( + "TypingStreamChangeCache", self._latest_room_serial, + ) + self.clock.looping_call( self._handle_timeouts, 5000, @@ -218,6 +224,7 @@ class TypingHandler(object): for domain in set(get_domain_from_id(u) for u in users): if domain != self.server_name: + logger.debug("sending typing update to %s", domain) self.federation.send_edu( destination=domain, edu_type="m.typing", @@ -274,19 +281,29 @@ class TypingHandler(object): self._latest_room_serial += 1 self._room_serials[member.room_id] = self._latest_room_serial + self._typing_stream_change_cache.entity_has_changed( + member.room_id, self._latest_room_serial, + ) self.notifier.on_new_event( "typing_key", self._latest_room_serial, rooms=[member.room_id] ) def get_all_typing_updates(self, last_id, current_id): - # TODO: Work out a way to do this without scanning the entire state. if last_id == current_id: return [] + changed_rooms = self._typing_stream_change_cache.get_all_entities_changed( + last_id, + ) + + if changed_rooms is None: + changed_rooms = self._room_serials + rows = [] - for room_id, serial in self._room_serials.items(): - if last_id < serial and serial <= current_id: + for room_id in changed_rooms: + serial = self._room_serials[room_id] + if last_id < serial <= current_id: typing = self._room_typing[room_id] rows.append((serial, room_id, list(typing))) rows.sort() diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 58ef8d3ce4..a3f9e4f67c 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -38,12 +38,12 @@ def cancelled_to_request_timed_out_error(value, timeout): return value -ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') +ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') def redact_uri(uri): """Strips access tokens from the uri replaces with """ return ACCESS_TOKEN_RE.sub( - br'\1\3', + r'\1\3', uri ) diff --git a/synapse/http/client.py b/synapse/http/client.py index ab4fbf59b2..3d05f83b8c 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -13,24 +13,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import logging -import urllib -from six import StringIO +from six import text_type +from six.moves import urllib +import treq from canonicaljson import encode_canonical_json, json from prometheus_client import Counter from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE -from twisted.internet import defer, protocol, reactor, ssl, task +from twisted.internet import defer, protocol, reactor, ssl from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.web._newclient import ResponseDone from twisted.web.client import ( Agent, BrowserLikeRedirectAgent, ContentDecoderAgent, - FileBodyProducer as TwistedFileBodyProducer, GzipDecoder, HTTPConnectionPool, PartialDownloadError, @@ -42,7 +43,7 @@ from twisted.web.http_headers import Headers from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import cancelled_to_request_timed_out_error, redact_uri from synapse.http.endpoint import SpiderEndpoint -from synapse.util.async_helpers import add_timeout_to_deferred +from synapse.util.async_helpers import timeout_deferred from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.logcontext import make_deferred_yieldable @@ -83,8 +84,10 @@ class SimpleHttpClient(object): if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,) + self.user_agent = self.user_agent.encode('ascii') + @defer.inlineCallbacks - def request(self, method, uri, *args, **kwargs): + def request(self, method, uri, data=b'', headers=None): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() @@ -93,10 +96,10 @@ class SimpleHttpClient(object): logger.info("Sending request %s %s", method, redact_uri(uri)) try: - request_deferred = self.agent.request( - method, uri, *args, **kwargs + request_deferred = treq.request( + method, uri, agent=self.agent, data=data, headers=headers ) - add_timeout_to_deferred( + request_deferred = timeout_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) @@ -112,7 +115,7 @@ class SimpleHttpClient(object): incoming_responses_counter.labels(method, "ERR").inc() logger.info( "Error sending request to %s %s: %s %s", - method, redact_uri(uri), type(e).__name__, e.message + method, redact_uri(uri), type(e).__name__, e.args[0] ) raise @@ -137,7 +140,8 @@ class SimpleHttpClient(object): # TODO: Do we ever want to log message contents? logger.debug("post_urlencoded_get_json args: %s", args) - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode( + encode_urlencode_args(args), True).encode("utf8") actual_headers = { b"Content-Type": [b"application/x-www-form-urlencoded"], @@ -148,15 +152,14 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(query_bytes)) + data=query_bytes ) - body = yield make_deferred_yieldable(readBody(response)) - if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + body = yield make_deferred_yieldable(treq.json_content(response)) + defer.returnValue(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -191,9 +194,9 @@ class SimpleHttpClient(object): response = yield self.request( "POST", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -248,7 +251,7 @@ class SimpleHttpClient(object): ValueError: if the response was not JSON """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) json_str = encode_canonical_json(json_body) @@ -262,9 +265,9 @@ class SimpleHttpClient(object): response = yield self.request( "PUT", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), - bodyProducer=FileBodyProducer(StringIO(json_str)) + data=json_str ) body = yield make_deferred_yieldable(readBody(response)) @@ -293,7 +296,7 @@ class SimpleHttpClient(object): HttpResponseException on a non-2xx HTTP response. """ if len(args): - query_bytes = urllib.urlencode(args, True) + query_bytes = urllib.parse.urlencode(args, True) uri = "%s?%s" % (uri, query_bytes) actual_headers = { @@ -304,7 +307,7 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - uri.encode("ascii"), + uri, headers=Headers(actual_headers), ) @@ -339,13 +342,14 @@ class SimpleHttpClient(object): response = yield self.request( "GET", - url.encode("ascii"), + url, headers=Headers(actual_headers), ) resp_headers = dict(response.headers.getAllRawHeaders()) - if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size: + if (b'Content-Length' in resp_headers and + int(resp_headers[b'Content-Length']) > max_size): logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) raise SynapseError( 502, @@ -378,7 +382,12 @@ class SimpleHttpClient(object): ) defer.returnValue( - (length, resp_headers, response.request.absoluteURI, response.code), + ( + length, + resp_headers, + response.request.absoluteURI.decode('ascii'), + response.code, + ), ) @@ -434,12 +443,12 @@ class CaptchaServerHttpClient(SimpleHttpClient): @defer.inlineCallbacks def post_urlencoded_get_raw(self, url, args={}): - query_bytes = urllib.urlencode(encode_urlencode_args(args), True) + query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) response = yield self.request( "POST", - url.encode("ascii"), - bodyProducer=FileBodyProducer(StringIO(query_bytes)), + url, + data=query_bytes, headers=Headers({ b"Content-Type": [b"application/x-www-form-urlencoded"], b"User-Agent": [self.user_agent], @@ -463,9 +472,9 @@ class SpiderEndpointFactory(object): def endpointForURI(self, uri): logger.info("Getting endpoint for %s", uri.toBytes()) - if uri.scheme == "http": + if uri.scheme == b"http": endpoint_factory = HostnameEndpoint - elif uri.scheme == "https": + elif uri.scheme == b"https": tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port) def endpoint_factory(reactor, host, port, **kw): @@ -510,7 +519,7 @@ def encode_urlencode_args(args): def encode_urlencode_arg(arg): - if isinstance(arg, unicode): + if isinstance(arg, text_type): return arg.encode('utf-8') elif isinstance(arg, list): return [encode_urlencode_arg(i) for i in arg] @@ -542,26 +551,3 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory): def creatorForNetloc(self, hostname, port): return self - - -class FileBodyProducer(TwistedFileBodyProducer): - """Workaround for https://twistedmatrix.com/trac/ticket/8473 - - We override the pauseProducing and resumeProducing methods in twisted's - FileBodyProducer so that they do not raise exceptions if the task has - already completed. - """ - - def pauseProducing(self): - try: - super(FileBodyProducer, self).pauseProducing() - except task.TaskDone: - # task has already completed - pass - - def resumeProducing(self): - try: - super(FileBodyProducer, self).resumeProducing() - except task.NotPaused: - # task was not paused (probably because it had already completed) - pass diff --git a/synapse/http/endpoint.py b/synapse/http/endpoint.py index b0c9369519..91025037a3 100644 --- a/synapse/http/endpoint.py +++ b/synapse/http/endpoint.py @@ -108,7 +108,7 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= Args: reactor: Twisted reactor. - destination (bytes): The name of the server to connect to. + destination (unicode): The name of the server to connect to. tls_client_options_factory (synapse.crypto.context_factory.ClientTLSOptionsFactory): Factory which generates TLS options for client connections. @@ -126,10 +126,17 @@ def matrix_federation_endpoint(reactor, destination, tls_client_options_factory= transport_endpoint = HostnameEndpoint default_port = 8008 else: + # the SNI string should be the same as the Host header, minus the port. + # as per https://github.com/matrix-org/synapse/issues/2525#issuecomment-336896777, + # the Host header and SNI should therefore be the server_name of the remote + # server. + tls_options = tls_client_options_factory.get_options(domain) + def transport_endpoint(reactor, host, port, timeout): return wrapClientTLS( - tls_client_options_factory.get_options(host), - HostnameEndpoint(reactor, host, port, timeout=timeout)) + tls_options, + HostnameEndpoint(reactor, host, port, timeout=timeout), + ) default_port = 8448 if port is None: diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b34bb8e31a..14b12cd1c4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -17,19 +17,22 @@ import cgi import logging import random import sys -import urllib +from io import BytesIO -from six import string_types -from six.moves.urllib import parse as urlparse +from six import PY3, string_types +from six.moves import urllib -from canonicaljson import encode_canonical_json, json +import attr +import treq +from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json -from twisted.internet import defer, protocol, reactor +from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError +from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, readBody +from twisted.web.client import Agent, FileBodyProducer, HTTPConnectionPool from twisted.web.http_headers import Headers import synapse.metrics @@ -40,14 +43,12 @@ from synapse.api.errors import ( HttpResponseException, SynapseError, ) -from synapse.http import cancelled_to_request_timed_out_error from synapse.http.endpoint import matrix_federation_endpoint -from synapse.util import logcontext -from synapse.util.async_helpers import add_timeout_to_deferred +from synapse.util.async_helpers import timeout_deferred from synapse.util.logcontext import make_deferred_yieldable +from synapse.util.metrics import Measure logger = logging.getLogger(__name__) -outbound_logger = logging.getLogger("synapse.http.outbound") outgoing_requests_counter = Counter("synapse_http_matrixfederationclient_requests", "", ["method"]) @@ -58,20 +59,119 @@ incoming_responses_counter = Counter("synapse_http_matrixfederationclient_respon MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 +if PY3: + MAXINT = sys.maxsize +else: + MAXINT = sys.maxint + class MatrixFederationEndpointFactory(object): def __init__(self, hs): + self.reactor = hs.get_reactor() self.tls_client_options_factory = hs.tls_client_options_factory def endpointForURI(self, uri): - destination = uri.netloc + destination = uri.netloc.decode('ascii') return matrix_federation_endpoint( - reactor, destination, timeout=10, + self.reactor, destination, timeout=10, tls_client_options_factory=self.tls_client_options_factory ) +_next_id = 1 + + +@attr.s +class MatrixFederationRequest(object): + method = attr.ib() + """HTTP method + :type: str + """ + + path = attr.ib() + """HTTP path + :type: str + """ + + destination = attr.ib() + """The remote server to send the HTTP request to. + :type: str""" + + json = attr.ib(default=None) + """JSON to send in the body. + :type: dict|None + """ + + json_callback = attr.ib(default=None) + """A callback to generate the JSON. + :type: func|None + """ + + query = attr.ib(default=None) + """Query arguments. + :type: dict|None + """ + + txn_id = attr.ib(default=None) + """Unique ID for this request (for logging) + :type: str|None + """ + + def __attrs_post_init__(self): + global _next_id + self.txn_id = "%s-O-%s" % (self.method, _next_id) + _next_id = (_next_id + 1) % (MAXINT - 1) + + def get_json(self): + if self.json_callback: + return self.json_callback() + return self.json + + +@defer.inlineCallbacks +def _handle_json_response(reactor, timeout_sec, request, response): + """ + Reads the JSON body of a response, with a timeout + + Args: + reactor (IReactor): twisted reactor, for the timeout + timeout_sec (float): number of seconds to wait for response to complete + request (MatrixFederationRequest): the request that triggered the response + response (IResponse): response to the request + + Returns: + dict: parsed JSON response + """ + try: + check_content_type_is_json(response.headers) + + d = treq.json_content(response) + d = timeout_deferred( + d, + timeout=timeout_sec, + reactor=reactor, + ) + + body = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%s] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) + raise + logger.info( + "{%s} [%s] Completed: %d %s", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) + defer.returnValue(body) + + class MatrixFederationHttpClient(object): """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. @@ -85,7 +185,9 @@ class MatrixFederationHttpClient(object): self.hs = hs self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname + reactor = hs.get_reactor() pool = HTTPConnectionPool(reactor) + pool.retryAutomatically = False pool.maxPersistentPerHost = 5 pool.cachedConnectionTimeout = 2 * 60 self.agent = Agent.usingEndpointFactory( @@ -93,28 +195,36 @@ class MatrixFederationHttpClient(object): ) self.clock = hs.get_clock() self._store = hs.get_datastore() - self.version_string = hs.version_string - self._next_id = 1 + self.version_string = hs.version_string.encode('ascii') + self.default_timeout = 60 - def _create_url(self, destination, path_bytes, param_bytes, query_bytes): - return urlparse.urlunparse( - ("matrix", destination, path_bytes, param_bytes, query_bytes, "") - ) + def schedule(x): + reactor.callLater(_EPSILON, x) + + self._cooperator = Cooperator(scheduler=schedule) @defer.inlineCallbacks - def _request(self, destination, method, path, - body_callback, headers_dict={}, param_bytes=b"", - query_bytes=b"", retry_on_dns_fail=True, - timeout=None, long_retries=False, - ignore_backoff=False, - backoff_on_404=False): - """ Creates and sends a request to the given server + def _send_request( + self, + request, + retry_on_dns_fail=True, + timeout=None, + long_retries=False, + ignore_backoff=False, + backoff_on_404=False + ): + """ + Sends a request to the given server. + Args: - destination (str): The remote server to send the HTTP request to. - method (str): HTTP method - path (str): The HTTP path + request (MatrixFederationRequest): details of request to be sent + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server). 60s by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + backoff_on_404 (bool): Back off if we get a 404 Returns: @@ -132,38 +242,39 @@ class MatrixFederationHttpClient(object): (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + if ( self.hs.config.federation_domain_whitelist is not None and - destination not in self.hs.config.federation_domain_whitelist + request.destination not in self.hs.config.federation_domain_whitelist ): - raise FederationDeniedError(destination) + raise FederationDeniedError(request.destination) limiter = yield synapse.util.retryutils.get_retry_limiter( - destination, + request.destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) - destination = destination.encode("ascii") - path_bytes = path.encode("ascii") + method = request.method + destination = request.destination + path_bytes = request.path.encode("ascii") + if request.query: + query_bytes = encode_query_args(request.query) + else: + query_bytes = b"" + + headers_dict = { + "User-Agent": [self.version_string], + "Host": [request.destination], + } + with limiter: - headers_dict[b"User-Agent"] = [self.version_string] - headers_dict[b"Host"] = [destination] - - url_bytes = self._create_url( - destination, path_bytes, param_bytes, query_bytes - ) - - txn_id = "%s-O-%s" % (method, self._next_id) - self._next_id = (self._next_id + 1) % (sys.maxint - 1) - - outbound_logger.info( - "{%s} [%s] Sending request: %s %s", - txn_id, destination, method, url_bytes - ) - # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -171,88 +282,119 @@ class MatrixFederationHttpClient(object): else: retries_left = MAX_SHORT_RETRIES - http_url_bytes = urlparse.urlunparse( - ("", "", path_bytes, param_bytes, query_bytes, "") - ) + url = urllib.parse.urlunparse(( + b"matrix", destination.encode("ascii"), + path_bytes, None, query_bytes, b"", + )).decode('ascii') - log_result = None - try: - while True: - producer = None - if body_callback: - producer = body_callback(method, http_url_bytes, headers_dict) + http_url = urllib.parse.urlunparse(( + b"", b"", + path_bytes, None, query_bytes, b"", + )).decode('ascii') - try: - request_deferred = self.agent.request( - method, - url_bytes, - Headers(headers_dict), - producer + while True: + try: + json = request.get_json() + if json: + data = encode_canonical_json(json) + headers_dict["Content-Type"] = ["application/json"] + self.sign_request( + destination, method, http_url, headers_dict, json ) - add_timeout_to_deferred( - request_deferred, - timeout / 1000. if timeout else 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, + else: + data = None + self.sign_request(destination, method, http_url, headers_dict) + + logger.info( + "{%s} [%s] Sending request: %s %s", + request.txn_id, destination, method, url + ) + + if data: + producer = FileBodyProducer( + BytesIO(data), + cooperator=self._cooperator ) + else: + producer = None + + request_deferred = treq.request( + method, + url, + headers=Headers(headers_dict), + data=producer, + agent=self.agent, + reactor=self.hs.get_reactor(), + unbuffered=True + ) + + request_deferred = timeout_deferred( + request_deferred, + timeout=_sec_timeout, + reactor=self.hs.get_reactor(), + ) + + with Measure(self.clock, "outbound_request"): response = yield make_deferred_yieldable( request_deferred, ) - log_result = "%d %s" % (response.code, response.phrase,) - break - except Exception as e: - if not retry_on_dns_fail and isinstance(e, DNSLookupError): - logger.warn( - "DNS Lookup failed to %s with %s", - destination, - e - ) - log_result = "DNS Lookup failed to %s with %s" % ( - destination, e - ) - raise + break + except Exception as e: + logger.warn( + "{%s} [%s] Request failed: %s %s: %s", + request.txn_id, + destination, + method, + url, + _flatten_response_never_received(e), + ) - logger.warn( - "{%s} Sending request failed to %s: %s %s: %s", - txn_id, + if not retry_on_dns_fail and isinstance(e, DNSLookupError): + raise + + if retries_left and not timeout: + if long_retries: + delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) + delay = min(delay, 60) + delay *= random.uniform(0.8, 1.4) + else: + delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) + delay = min(delay, 2) + delay *= random.uniform(0.8, 1.4) + + logger.debug( + "{%s} [%s] Waiting %ss before re-sending...", + request.txn_id, destination, - method, - url_bytes, - _flatten_response_never_received(e), + delay, ) - log_result = _flatten_response_never_received(e) + yield self.clock.sleep(delay) + retries_left -= 1 + else: + raise - if retries_left and not timeout: - if long_retries: - delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) - delay = min(delay, 60) - delay *= random.uniform(0.8, 1.4) - else: - delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) - delay = min(delay, 2) - delay *= random.uniform(0.8, 1.4) - - yield self.clock.sleep(delay) - retries_left -= 1 - else: - raise - finally: - outbound_logger.info( - "{%s} [%s] Result: %s", - txn_id, - destination, - log_result, - ) + logger.info( + "{%s} [%s] Got response headers: %d %s", + request.txn_id, + destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? - with logcontext.PreserveLoggingContext(): - body = yield readBody(response) + d = treq.content(response) + d = timeout_deferred( + d, + timeout=_sec_timeout, + reactor=self.hs.get_reactor(), + ) + body = yield make_deferred_yieldable(d) raise HttpResponseException( response.code, response.phrase, body ) @@ -297,11 +439,11 @@ class MatrixFederationHttpClient(object): auth_headers = [] for key, sig in request["signatures"][self.server_name].items(): - auth_headers.append(bytes( + auth_headers.append(( "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( self.server_name, key, sig, - ) - )) + )).encode('ascii') + ) headers_dict[b"Authorization"] = auth_headers @@ -346,38 +488,27 @@ class MatrixFederationHttpClient(object): is not on our federation whitelist """ - if not json_data_callback: - def json_data_callback(): - return data + request = MatrixFederationRequest( + method="PUT", + destination=destination, + path=path, + query=args, + json_callback=json_data_callback, + json=data, + ) - def body_callback(method, url_bytes, headers_dict): - json_data = json_data_callback() - self.sign_request( - destination, method, url_bytes, headers_dict, json_data - ) - producer = _JsonProducer(json_data) - return producer - - response = yield self._request( - destination, - "PUT", - path, - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, - query_bytes=encode_query_args(args), + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, backoff_on_404=backoff_on_404, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - defer.returnValue(json.loads(body)) + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) + defer.returnValue(body) @defer.inlineCallbacks def post_json(self, destination, path, data={}, long_retries=False, @@ -411,32 +542,30 @@ class MatrixFederationHttpClient(object): is not on our federation whitelist """ - def body_callback(method, url_bytes, headers_dict): - self.sign_request( - destination, method, url_bytes, headers_dict, data - ) - return _JsonProducer(data) + request = MatrixFederationRequest( + method="POST", + destination=destination, + path=path, + query=args, + json=data, + ) - response = yield self._request( - destination, - "POST", - path, - query_bytes=encode_query_args(args), - body_callback=body_callback, - headers_dict={"Content-Type": ["application/json"]}, + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) + if timeout: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout - with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - - defer.returnValue(json.loads(body)) + body = yield _handle_json_response( + self.hs.get_reactor(), _sec_timeout, request, response, + ) + defer.returnValue(body) @defer.inlineCallbacks def get_json(self, destination, path, args=None, retry_on_dns_fail=True, @@ -471,29 +600,24 @@ class MatrixFederationHttpClient(object): logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, + query=args, + ) - response = yield self._request( - destination, - "GET", - path, - query_bytes=encode_query_args(args), - body_callback=body_callback, + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - - defer.returnValue(json.loads(body)) + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) + defer.returnValue(body) @defer.inlineCallbacks def delete_json(self, destination, path, long_retries=False, @@ -523,26 +647,24 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ + request = MatrixFederationRequest( + method="DELETE", + destination=destination, + path=path, + query=args, + ) - response = yield self._request( - destination, - "DELETE", - path, - query_bytes=encode_query_args(args), - headers_dict={"Content-Type": ["application/json"]}, + response = yield self._send_request( + request, long_retries=long_retries, timeout=timeout, ignore_backoff=ignore_backoff, ) - if 200 <= response.code < 300: - # We need to update the transactions table to say it was sent? - check_content_type_is_json(response.headers) - - with logcontext.PreserveLoggingContext(): - body = yield readBody(response) - - defer.returnValue(json.loads(body)) + body = yield _handle_json_response( + self.hs.get_reactor(), self.default_timeout, request, response, + ) + defer.returnValue(body) @defer.inlineCallbacks def get_file(self, destination, path, output_stream, args={}, @@ -569,26 +691,15 @@ class MatrixFederationHttpClient(object): Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist """ + request = MatrixFederationRequest( + method="GET", + destination=destination, + path=path, + query=args, + ) - encoded_args = {} - for k, vs in args.items(): - if isinstance(vs, string_types): - vs = [vs] - encoded_args[k] = [v.encode("UTF-8") for v in vs] - - query_bytes = urllib.urlencode(encoded_args, True) - logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail) - - def body_callback(method, url_bytes, headers_dict): - self.sign_request(destination, method, url_bytes, headers_dict) - return None - - response = yield self._request( - destination, - "GET", - path, - query_bytes=query_bytes, - body_callback=body_callback, + response = yield self._send_request( + request, retry_on_dns_fail=retry_on_dns_fail, ignore_backoff=ignore_backoff, ) @@ -596,14 +707,25 @@ class MatrixFederationHttpClient(object): headers = dict(response.headers.getAllRawHeaders()) try: - with logcontext.PreserveLoggingContext(): - length = yield _readBodyToFile( - response, output_stream, max_size - ) - except Exception: - logger.exception("Failed to download body") + d = _readBodyToFile(response, output_stream, max_size) + d.addTimeout(self.default_timeout, self.hs.get_reactor()) + length = yield make_deferred_yieldable(d) + except Exception as e: + logger.warn( + "{%s} [%s] Error reading response: %s", + request.txn_id, + request.destination, + e, + ) raise - + logger.info( + "{%s} [%s] Completed: %d %s [%d bytes]", + request.txn_id, + request.destination, + response.code, + response.phrase.decode('ascii', errors='replace'), + length, + ) defer.returnValue((length, headers)) @@ -639,30 +761,6 @@ def _readBodyToFile(response, stream, max_size): return d -class _JsonProducer(object): - """ Used by the twisted http client to create the HTTP body from json - """ - def __init__(self, jsn): - self.reset(jsn) - - def reset(self, jsn): - self.body = encode_canonical_json(jsn) - self.length = len(self.body) - - def startProducing(self, consumer): - consumer.write(self.body) - return defer.succeed(None) - - def pauseProducing(self): - pass - - def stopProducing(self): - pass - - def resumeProducing(self): - pass - - def _flatten_response_never_received(e): if hasattr(e, "reasons"): reasons = ", ".join( @@ -693,7 +791,7 @@ def check_content_type_is_json(headers): "No Content-Type header" ) - c_type = c_type[0] # only the first header + c_type = c_type[0].decode('ascii') # only the first header val, options = cgi.parse_header(c_type) if val != "application/json": raise RuntimeError( @@ -711,6 +809,6 @@ def encode_query_args(args): vs = [vs] encoded_args[k] = [v.encode("UTF-8") for v in vs] - query_bytes = urllib.urlencode(encoded_args, True) + query_bytes = urllib.parse.urlencode(encoded_args, True) - return query_bytes + return query_bytes.encode('utf8') diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 72c2654678..fedb4e6b18 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -162,7 +162,7 @@ class RequestMetrics(object): with _in_flight_requests_lock: _in_flight_requests.add(self) - def stop(self, time_sec, request): + def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) @@ -179,35 +179,35 @@ class RequestMetrics(object): ) return - response_code = str(request.code) + response_code = str(response_code) - outgoing_responses_counter.labels(request.method, response_code).inc() + outgoing_responses_counter.labels(self.method, response_code).inc() - response_count.labels(request.method, self.name, tag).inc() + response_count.labels(self.method, self.name, tag).inc() - response_timer.labels(request.method, self.name, tag, response_code).observe( + response_timer.labels(self.method, self.name, tag, response_code).observe( time_sec - self.start ) resource_usage = context.get_resource_usage() - response_ru_utime.labels(request.method, self.name, tag).inc( + response_ru_utime.labels(self.method, self.name, tag).inc( resource_usage.ru_utime, ) - response_ru_stime.labels(request.method, self.name, tag).inc( + response_ru_stime.labels(self.method, self.name, tag).inc( resource_usage.ru_stime, ) - response_db_txn_count.labels(request.method, self.name, tag).inc( + response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count ) - response_db_txn_duration.labels(request.method, self.name, tag).inc( + response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec ) - response_db_sched_duration.labels(request.method, self.name, tag).inc( + response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec ) - response_size.labels(request.method, self.name, tag).inc(request.sentLength) + response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in diff --git a/synapse/http/server.py b/synapse/http/server.py index 2d5c23e673..b4b25cab19 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -84,10 +84,21 @@ def wrap_json_request_handler(h): logger.info( "%s SynapseError: %s - %s", request, code, e.msg ) - respond_with_json( - request, code, e.error_dict(), send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, code, e.error_dict(), send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) except Exception: # failure.Failure() fishes the original Failure out @@ -100,16 +111,26 @@ def wrap_json_request_handler(h): request, f.getTraceback().rstrip(), ) - respond_with_json( - request, - 500, - { - "error": "Internal server error", - "errcode": Codes.UNKNOWN, - }, - send_cors=True, - pretty_print=_request_user_agent_is_curl(request), - ) + # Only respond with an error response if we haven't already started + # writing, otherwise lets just kill the connection + if request.startedWriting: + if request.transport: + try: + request.transport.abortConnection() + except Exception: + # abortConnection throws if the connection is already closed + pass + else: + respond_with_json( + request, + 500, + { + "error": "Internal server error", + "errcode": Codes.UNKNOWN, + }, + send_cors=True, + pretty_print=_request_user_agent_is_curl(request), + ) return wrap_async_request_handler(wrapped_request_handler) diff --git a/synapse/http/site.py b/synapse/http/site.py index 88ed3714f9..e508c0bd4f 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -75,17 +75,35 @@ class SynapseRequest(Request): return '<%s at 0x%x method=%r uri=%r clientproto=%r site=%r>' % ( self.__class__.__name__, id(self), - self.method, + self.get_method(), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), self.site.site_tag, ) def get_request_id(self): - return "%s-%i" % (self.method, self.request_seq) + return "%s-%i" % (self.get_method(), self.request_seq) def get_redacted_uri(self): - return redact_uri(self.uri) + uri = self.uri + if isinstance(uri, bytes): + uri = self.uri.decode('ascii') + return redact_uri(uri) + + def get_method(self): + """Gets the method associated with the request (or placeholder if not + method has yet been received). + + Note: This is necessary as the placeholder value in twisted is str + rather than bytes, so we need to sanitise `self.method`. + + Returns: + str + """ + method = self.method + if isinstance(method, bytes): + method = self.method.decode('ascii') + return method def get_user_agent(self): return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] @@ -116,7 +134,7 @@ class SynapseRequest(Request): # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics - requests_counter.labels(self.method, + requests_counter.labels(self.get_method(), self.request_metrics.name).inc() @contextlib.contextmanager @@ -204,14 +222,14 @@ class SynapseRequest(Request): self.start_time = time.time() self.request_metrics = RequestMetrics() self.request_metrics.start( - self.start_time, name=servlet_name, method=self.method, + self.start_time, name=servlet_name, method=self.get_method(), ) self.site.access_logger.info( "%s - %s - Received request: %s %s", self.getClientIP(), self.site.site_tag, - self.method, + self.get_method(), self.get_redacted_uri() ) @@ -277,15 +295,15 @@ class SynapseRequest(Request): int(usage.db_txn_count), self.sentLength, code, - self.method, + self.get_method(), self.get_redacted_uri(), - self.clientproto, + self.clientproto.decode('ascii', errors='replace'), user_agent, usage.evt_db_fetch_count, ) try: - self.request_metrics.stop(self.finish_time, self) + self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: logger.warn("Failed to stop metrics: %r", e) @@ -305,7 +323,7 @@ class XForwardedForRequest(SynapseRequest): C{b"-"}. """ return self.requestHeaders.getRawHeaders( - b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip() + b"x-forwarded-for", [b"-"])[0].split(b",")[0].strip().decode('ascii') class SynapseRequestFactory(object): diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 550f8443f7..59900aa5d1 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -18,8 +18,11 @@ import gc import logging import os import platform +import threading import time +import six + import attr from prometheus_client import Counter, Gauge, Histogram from prometheus_client.core import REGISTRY, GaugeMetricFamily @@ -68,7 +71,7 @@ class LaterGauge(object): return if isinstance(calls, dict): - for k, v in calls.items(): + for k, v in six.iteritems(calls): g.add_metric(k, v) else: g.add_metric([], calls) @@ -87,6 +90,109 @@ class LaterGauge(object): all_gauges[self.name] = self +class InFlightGauge(object): + """Tracks number of things (e.g. requests, Measure blocks, etc) in flight + at any given time. + + Each InFlightGauge will create a metric called `_total` that counts + the number of in flight blocks, as well as a metrics for each item in the + given `sub_metrics` as `_` which will get updated by the + callbacks. + + Args: + name (str) + desc (str) + labels (list[str]) + sub_metrics (list[str]): A list of sub metrics that the callbacks + will update. + """ + + def __init__(self, name, desc, labels, sub_metrics): + self.name = name + self.desc = desc + self.labels = labels + self.sub_metrics = sub_metrics + + # Create a class which have the sub_metrics values as attributes, which + # default to 0 on initialization. Used to pass to registered callbacks. + self._metrics_class = attr.make_class( + "_MetricsEntry", + attrs={x: attr.ib(0) for x in sub_metrics}, + slots=True, + ) + + # Counts number of in flight blocks for a given set of label values + self._registrations = {} + + # Protects access to _registrations + self._lock = threading.Lock() + + self._register_with_collector() + + def register(self, key, callback): + """Registers that we've entered a new block with labels `key`. + + `callback` gets called each time the metrics are collected. The same + value must also be given to `unregister`. + + `callback` gets called with an object that has an attribute per + sub_metric, which should be updated with the necessary values. Note that + the metrics object is shared between all callbacks registered with the + same key. + + Note that `callback` may be called on a separate thread. + """ + with self._lock: + self._registrations.setdefault(key, set()).add(callback) + + def unregister(self, key, callback): + """Registers that we've exited a block with labels `key`. + """ + + with self._lock: + self._registrations.setdefault(key, set()).discard(callback) + + def collect(self): + """Called by prometheus client when it reads metrics. + + Note: may be called by a separate thread. + """ + in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) + + metrics_by_key = {} + + # We copy so that we don't mutate the list while iterating + with self._lock: + keys = list(self._registrations) + + for key in keys: + with self._lock: + callbacks = set(self._registrations[key]) + + in_flight.add_metric(key, len(callbacks)) + + metrics = self._metrics_class() + metrics_by_key[key] = metrics + for callback in callbacks: + callback(metrics) + + yield in_flight + + for name in self.sub_metrics: + gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) + for key, metrics in six.iteritems(metrics_by_key): + gauge.add_metric(key, getattr(metrics, name)) + yield gauge + + def _register_with_collector(self): + if self.name in all_gauges.keys(): + logger.warning("%s already registered, reregistering" % (self.name,)) + REGISTRY.unregister(all_gauges.pop(self.name)) + + REGISTRY.register(self) + all_gauges[self.name] = self + + # # Detailed CPU metrics # diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 167167be0a..037f1c490e 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import threading import six @@ -23,6 +24,9 @@ from twisted.internet import defer from synapse.util.logcontext import LoggingContext, PreserveLoggingContext +logger = logging.getLogger(__name__) + + _background_process_start_count = Counter( "synapse_background_process_start_count", "Number of background processes started", @@ -97,9 +101,13 @@ class _Collector(object): labels=["name"], ) - # We copy the dict so that it doesn't change from underneath us + # We copy the dict so that it doesn't change from underneath us. + # We also copy the process lists as that can also change with _bg_metrics_lock: - _background_processes_copy = dict(_background_processes) + _background_processes_copy = { + k: list(v) + for k, v in six.iteritems(_background_processes) + } for desc, processes in six.iteritems(_background_processes_copy): background_process_in_flight_count.add_metric( @@ -191,6 +199,8 @@ def run_as_background_process(desc, func, *args, **kwargs): try: yield func(*args, **kwargs) + except Exception: + logger.exception("Background process '%s' threw an exception", desc) finally: proc.update_metrics() diff --git a/synapse/notifier.py b/synapse/notifier.py index 82f391481c..340b16ce25 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -24,13 +24,10 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError from synapse.handlers.presence import format_user_presence_state from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import StreamToken -from synapse.util.async_helpers import ( - DeferredTimeoutError, - ObservableDeferred, - add_timeout_to_deferred, -) -from synapse.util.logcontext import PreserveLoggingContext, run_in_background +from synapse.util.async_helpers import ObservableDeferred, timeout_deferred +from synapse.util.logcontext import PreserveLoggingContext from synapse.util.logutils import log_function from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -252,7 +249,10 @@ class Notifier(object): def _on_new_room_event(self, event, room_stream_id, extra_users=[]): """Notify any user streams that are interested in this room event""" # poke any interested application service. - run_in_background(self._notify_app_services, room_stream_id) + run_as_background_process( + "notify_app_services", + self._notify_app_services, room_stream_id, + ) if self.federation_sender: self.federation_sender.notify_new_events(room_stream_id) @@ -337,7 +337,7 @@ class Notifier(object): # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - add_timeout_to_deferred( + listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000., self.hs.get_reactor(), @@ -354,7 +354,7 @@ class Notifier(object): # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break @@ -559,15 +559,16 @@ class Notifier(object): if end_time <= now: break - add_timeout_to_deferred( - listener.deferred.addTimeout, - (end_time - now) / 1000., - self.hs.get_reactor(), + listener.deferred = timeout_deferred( + listener.deferred, + timeout=(end_time - now) / 1000., + reactor=self.hs.get_reactor(), ) + try: with PreserveLoggingContext(): yield listener.deferred - except DeferredTimeoutError: + except defer.TimeoutError: break except defer.CancelledError: break diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 81e18bcf7d..48abd5e4d6 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -15,6 +15,8 @@ # limitations under the License. import logging +import six + from prometheus_client import Counter from twisted.internet import defer @@ -26,6 +28,9 @@ from synapse.util.metrics import Measure from . import push_rule_evaluator, push_tools +if six.PY3: + long = int + logger = logging.getLogger(__name__) http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "") @@ -96,7 +101,7 @@ class HttpPusher(object): @defer.inlineCallbacks def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) yield self._process() @defer.inlineCallbacks diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index bfa6df7b68..1a5a10d974 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -17,10 +17,11 @@ import email.mime.multipart import email.utils import logging import time -import urllib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from six.moves import urllib + import bleach import jinja2 @@ -440,7 +441,7 @@ class Mailer(object): def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: - base_url = self.hs.config.email_riot_base_url + base_url = "%s/#/room" % (self.hs.config.email_riot_base_url) elif self.app_name == "Vector": # need /beta for Universal Links to work on iOS base_url = "https://vector.im/beta/#/room" @@ -474,7 +475,7 @@ class Mailer(object): # XXX: make r0 once API is stable return "%s_matrix/client/unstable/pushers/remove?%s" % ( self.hs.config.public_baseurl, - urllib.urlencode(params), + urllib.parse.urlencode(params), ) @@ -561,7 +562,7 @@ def _create_mxc_to_http_filter(config): return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( config.public_baseurl, serverAndMediaId, - urllib.urlencode(params), + urllib.parse.urlencode(params), fragment or "", ) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 9c55e79ef5..d4d983b00a 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -33,31 +33,38 @@ logger = logging.getLogger(__name__) # [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies REQUIREMENTS = { "jsonschema>=2.5.1": ["jsonschema>=2.5.1"], - "frozendict>=0.4": ["frozendict"], + "frozendict>=1": ["frozendict"], "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"], "canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"], "signedjson>=1.0.0": ["signedjson>=1.0.0"], "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], - "service_identity>=1.0.0": ["service_identity>=1.0.0"], + "service_identity>=16.0.0": ["service_identity>=16.0.0"], "Twisted>=17.1.0": ["twisted>=17.1.0"], + "treq>=15.1": ["treq>=15.1"], - # We use crypto.get_elliptic_curve which is only supported in >=0.15 - "pyopenssl>=0.15": ["OpenSSL>=0.15"], + # Twisted has required pyopenssl 16.0 since about Twisted 16.6. + "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], - "pyyaml": ["yaml"], - "pyasn1": ["pyasn1"], - "daemonize": ["daemonize"], - "bcrypt": ["bcrypt>=3.1.0"], - "pillow": ["PIL"], - "pydenticon": ["pydenticon"], - "sortedcontainers": ["sortedcontainers"], - "pysaml2>=3.0.0": ["saml2>=3.0.0"], - "pymacaroons-pynacl": ["pymacaroons"], + "pyyaml>=3.11": ["yaml"], + "pyasn1>=0.1.9": ["pyasn1"], + "pyasn1-modules>=0.0.7": ["pyasn1_modules"], + "daemonize>=2.3.1": ["daemonize"], + "bcrypt>=3.1.0": ["bcrypt>=3.1.0"], + "pillow>=3.1.2": ["PIL"], + "pydenticon>=0.2": ["pydenticon"], + "sortedcontainers>=1.4.4": ["sortedcontainers"], + "pysaml2>=3.0.0": ["saml2"], + "pymacaroons-pynacl>=0.9.3": ["pymacaroons"], "msgpack-python>=0.3.0": ["msgpack"], "phonenumbers>=8.2.0": ["phonenumbers"], - "six": ["six"], - "prometheus_client": ["prometheus_client"], - "attrs": ["attr"], + "six>=1.10": ["six"], + + # prometheus_client 0.4.0 changed the format of counter metrics + # (cf https://github.com/matrix-org/synapse/issues/4001) + "prometheus_client>=0.0.18,<0.4.0": ["prometheus_client"], + + # we use attr.s(slots), which arrived in 16.0.0 + "attrs>=16.0.0": ["attr>=16.0.0"], "netaddr>=0.7.18": ["netaddr"], } @@ -78,6 +85,9 @@ CONDITIONAL_REQUIREMENTS = { "affinity": { "affinity": ["affinity"], }, + "postgres": { + "psycopg2>=2.6": ["psycopg2"] + } } diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 8206a988f7..21b8c468fa 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + from synapse.storage import DataStore from synapse.storage.end_to_end_keys import EndToEndKeyStore from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -21,6 +23,13 @@ from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker +def __func__(inp): + if six.PY3: + return inp + else: + return inp.__func__ + + class SlavedDeviceStore(BaseSlavedStore): def __init__(self, db_conn, hs): super(SlavedDeviceStore, self).__init__(db_conn, hs) @@ -38,14 +47,14 @@ class SlavedDeviceStore(BaseSlavedStore): "DeviceListFederationStreamChangeCache", device_list_max, ) - get_device_stream_token = DataStore.get_device_stream_token.__func__ - get_user_whose_devices_changed = DataStore.get_user_whose_devices_changed.__func__ - get_devices_by_remote = DataStore.get_devices_by_remote.__func__ - _get_devices_by_remote_txn = DataStore._get_devices_by_remote_txn.__func__ - _get_e2e_device_keys_txn = DataStore._get_e2e_device_keys_txn.__func__ - mark_as_sent_devices_by_remote = DataStore.mark_as_sent_devices_by_remote.__func__ + get_device_stream_token = __func__(DataStore.get_device_stream_token) + get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed) + get_devices_by_remote = __func__(DataStore.get_devices_by_remote) + _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn) + _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn) + mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote) _mark_as_sent_devices_by_remote_txn = ( - DataStore._mark_as_sent_devices_by_remote_txn.__func__ + __func__(DataStore._mark_as_sent_devices_by_remote_txn) ) count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"] diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 74e892c104..5dc7b3fffc 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -590,9 +590,9 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol): pending_commands = LaterGauge( "synapse_replication_tcp_protocol_pending_commands", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections + (p.name,): len(p.pending_commands) for p in connected_connections }, ) @@ -607,9 +607,9 @@ def transport_buffer_size(protocol): transport_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections + (p.name,): transport_buffer_size(p) for p in connected_connections }, ) @@ -632,9 +632,9 @@ def transport_kernel_read_buffer_size(protocol, read=True): tcp_transport_kernel_send_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_send_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) + (p.name,): transport_kernel_read_buffer_size(p, False) for p in connected_connections }, ) @@ -643,9 +643,9 @@ tcp_transport_kernel_send_buffer = LaterGauge( tcp_transport_kernel_read_buffer = LaterGauge( "synapse_replication_tcp_protocol_transport_kernel_read_buffer", "", - ["name", "conn_id"], + ["name"], lambda: { - (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) + (p.name,): transport_kernel_read_buffer_size(p, True) for p in connected_connections }, ) @@ -654,9 +654,9 @@ tcp_transport_kernel_read_buffer = LaterGauge( tcp_inbound_commands = LaterGauge( "synapse_replication_tcp_protocol_inbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.inbound_commands_counter) }, @@ -665,9 +665,9 @@ tcp_inbound_commands = LaterGauge( tcp_outbound_commands = LaterGauge( "synapse_replication_tcp_protocol_outbound_commands", "", - ["command", "name", "conn_id"], + ["command", "name"], lambda: { - (k[0], p.name, p.conn_id): count + (k[0], p.name,): count for p in connected_connections for k, count in iteritems(p.outbound_commands_counter) }, diff --git a/synapse/replication/tcp/streams.py b/synapse/replication/tcp/streams.py index 55fe701c5c..c1e626be3f 100644 --- a/synapse/replication/tcp/streams.py +++ b/synapse/replication/tcp/streams.py @@ -196,7 +196,7 @@ class Stream(object): ) if len(rows) >= MAX_EVENTS_BEHIND: - raise Exception("stream %s has fallen behined" % (self.NAME)) + raise Exception("stream %s has fallen behind" % (self.NAME)) else: rows = yield self.update_function( from_token, current_token, diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index ad536ab570..41534b8c2a 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -101,7 +101,7 @@ class UserRegisterServlet(ClientV1RestServlet): nonce = self.hs.get_secrets().token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) - return (200, {"nonce": nonce.encode('ascii')}) + return (200, {"nonce": nonce}) @defer.inlineCallbacks def on_POST(self, request): @@ -164,7 +164,7 @@ class UserRegisterServlet(ClientV1RestServlet): key=self.hs.config.registration_shared_secret.encode(), digestmod=hashlib.sha1, ) - want_mac.update(nonce) + want_mac.update(nonce.encode('utf8')) want_mac.update(b"\x00") want_mac.update(username) want_mac.update(b"\x00") @@ -173,7 +173,10 @@ class UserRegisterServlet(ClientV1RestServlet): want_mac.update(b"admin" if admin else b"notadmin") want_mac = want_mac.hexdigest() - if not hmac.compare_digest(want_mac, got_mac.encode('ascii')): + if not hmac.compare_digest( + want_mac.encode('ascii'), + got_mac.encode('ascii') + ): raise SynapseError(403, "HMAC incorrect") # Reuse the parts of RegisterRestServlet to reduce code duplication diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 0f3a2e8b51..cd9b3bdbd1 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -45,20 +45,20 @@ class EventStreamRestServlet(ClientV1RestServlet): is_guest = requester.is_guest room_id = None if is_guest: - if "room_id" not in request.args: + if b"room_id" not in request.args: raise SynapseError(400, "Guest users must specify room_id param") - if "room_id" in request.args: - room_id = request.args["room_id"][0] + if b"room_id" in request.args: + room_id = request.args[b"room_id"][0].decode('ascii') pagin_config = PaginationConfig.from_request(request) timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS - if "timeout" in request.args: + if b"timeout" in request.args: try: - timeout = int(request.args["timeout"][0]) + timeout = int(request.args[b"timeout"][0]) except ValueError: raise SynapseError(400, "timeout must be in milliseconds.") - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args chunk = yield self.event_stream_handler.get_stream( requester.user.to_string(), diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index fd5f85b53e..3ead75cb77 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -32,7 +32,7 @@ class InitialSyncRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - as_client_event = "raw" not in request.args + as_client_event = b"raw" not in request.args pagination_config = PaginationConfig.from_request(request) include_archived = parse_boolean(request, "archived", default=False) content = yield self.initial_sync_handler.snapshot_all_rooms( diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index cb85fa1436..0010699d31 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -14,10 +14,9 @@ # limitations under the License. import logging -import urllib import xml.etree.ElementTree as ET -from six.moves.urllib import parse as urlparse +from six.moves import urllib from canonicaljson import json from saml2 import BINDING_HTTP_POST, config @@ -134,7 +133,7 @@ class LoginRestServlet(ClientV1RestServlet): LoginRestServlet.SAML2_TYPE): relay_state = "" if "relay_state" in login_submission: - relay_state = "&RelayState=" + urllib.quote( + relay_state = "&RelayState=" + urllib.parse.quote( login_submission["relay_state"]) result = { "uri": "%s%s" % (self.idp_redirect_url, relay_state) @@ -366,7 +365,7 @@ class SAML2RestServlet(ClientV1RestServlet): (user_id, token) = yield handler.register_saml2(username) # Forward to the RelayState callback along with ava if 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=authenticated&access_token=' + token + '&user_id=' + user_id + '&ava=' + @@ -377,7 +376,7 @@ class SAML2RestServlet(ClientV1RestServlet): "user_id": user_id, "token": token, "ava": saml2_auth.ava})) elif 'RelayState' in request.args: - request.redirect(urllib.unquote( + request.redirect(urllib.parse.unquote( request.args['RelayState'][0]) + '?status=not_authenticated') finish_request(request) @@ -390,21 +389,22 @@ class CasRedirectServlet(ClientV1RestServlet): def __init__(self, hs): super(CasRedirectServlet, self).__init__(hs) - self.cas_server_url = hs.config.cas_server_url - self.cas_service_url = hs.config.cas_service_url + self.cas_server_url = hs.config.cas_server_url.encode('ascii') + self.cas_service_url = hs.config.cas_service_url.encode('ascii') def on_GET(self, request): args = request.args - if "redirectUrl" not in args: + if b"redirectUrl" not in args: return (400, "Redirect URL not specified for CAS auth") - client_redirect_url_param = urllib.urlencode({ - "redirectUrl": args["redirectUrl"][0] - }) - hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket" - service_param = urllib.urlencode({ - "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) - }) - request.redirect("%s/login?%s" % (self.cas_server_url, service_param)) + client_redirect_url_param = urllib.parse.urlencode({ + b"redirectUrl": args[b"redirectUrl"][0] + }).encode('ascii') + hs_redirect_url = (self.cas_service_url + + b"/_matrix/client/api/v1/login/cas/ticket") + service_param = urllib.parse.urlencode({ + b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) + }).encode('ascii') + request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param)) finish_request(request) @@ -422,11 +422,11 @@ class CasTicketServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request): - client_redirect_url = request.args["redirectUrl"][0] + client_redirect_url = request.args[b"redirectUrl"][0] http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { - "ticket": request.args["ticket"], + "ticket": request.args[b"ticket"][0].decode('ascii'), "service": self.cas_service_url } try: @@ -471,11 +471,11 @@ class CasTicketServlet(ClientV1RestServlet): finish_request(request) def add_login_token_to_redirect_url(self, url, token): - url_parts = list(urlparse.urlparse(url)) - query = dict(urlparse.parse_qsl(url_parts[4])) + url_parts = list(urllib.parse.urlparse(url)) + query = dict(urllib.parse.parse_qsl(url_parts[4])) query.update({"loginToken": token}) - url_parts[4] = urllib.urlencode(query) - return urlparse.urlunparse(url_parts) + url_parts[4] = urllib.parse.urlencode(query).encode('ascii') + return urllib.parse.urlunparse(url_parts) def parse_cas_response(self, cas_response_body): user = None diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 6e95d9bec2..9382b1f124 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -46,7 +46,7 @@ class PushRuleRestServlet(ClientV1RestServlet): try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) requester = yield self.auth.get_user_by_req(request) @@ -73,7 +73,7 @@ class PushRuleRestServlet(ClientV1RestServlet): content, ) except InvalidRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) before = parse_string(request, "before") if before: @@ -95,9 +95,9 @@ class PushRuleRestServlet(ClientV1RestServlet): ) self.notify_user(user_id) except InconsistentRuleException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) except RuleNotFoundException as e: - raise SynapseError(400, e.message) + raise SynapseError(400, str(e)) defer.returnValue((200, {})) @@ -142,10 +142,10 @@ class PushRuleRestServlet(ClientV1RestServlet): PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR ) - if path[0] == '': + if path[0] == b'': defer.returnValue((200, rules)) - elif path[0] == 'global': - path = path[1:] + elif path[0] == b'global': + path = [x.decode('ascii') for x in path[1:]] result = _filter_ruleset_with_path(rules['global'], path) defer.returnValue((200, result)) else: @@ -192,10 +192,10 @@ class PushRuleRestServlet(ClientV1RestServlet): def _rule_spec_from_path(path): if len(path) < 2: raise UnrecognizedRequestError() - if path[0] != 'pushrules': + if path[0] != b'pushrules': raise UnrecognizedRequestError() - scope = path[1] + scope = path[1].decode('ascii') path = path[2:] if scope != 'global': raise UnrecognizedRequestError() @@ -203,13 +203,13 @@ def _rule_spec_from_path(path): if len(path) == 0: raise UnrecognizedRequestError() - template = path[0] + template = path[0].decode('ascii') path = path[1:] if len(path) == 0 or len(path[0]) == 0: raise UnrecognizedRequestError() - rule_id = path[0] + rule_id = path[0].decode('ascii') spec = { 'scope': scope, @@ -220,7 +220,7 @@ def _rule_spec_from_path(path): path = path[1:] if len(path) > 0 and len(path[0]) > 0: - spec['attr'] = path[0] + spec['attr'] = path[0].decode('ascii') return spec diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 182a68b1e2..b84f0260f2 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -59,7 +59,7 @@ class PushersRestServlet(ClientV1RestServlet): ] for p in pushers: - for k, v in p.items(): + for k, v in list(p.items()): if k not in allowed_keys: del p[k] @@ -126,7 +126,7 @@ class PushersSetRestServlet(ClientV1RestServlet): profile_tag=content.get('profile_tag', ""), ) except PusherConfigException as pce: - raise SynapseError(400, "Config Error: " + pce.message, + raise SynapseError(400, "Config Error: " + str(pce), errcode=Codes.MISSING_PARAM) self.notifier.on_new_replication_data() diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 976d98387d..663934efd0 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -207,7 +207,7 @@ class RoomSendEventRestServlet(ClientV1RestServlet): "sender": requester.user.to_string(), } - if 'ts' in request.args and requester.app_service: + if b'ts' in request.args and requester.app_service: event_dict['origin_server_ts'] = parse_integer(request, "ts", 0) event = yield self.event_creation_hander.create_and_send_nonmember_event( @@ -255,7 +255,9 @@ class JoinRoomAliasServlet(ClientV1RestServlet): if RoomID.is_valid(room_identifier): room_id = room_identifier try: - remote_room_hosts = request.args["server_name"] + remote_room_hosts = [ + x.decode('ascii') for x in request.args[b"server_name"] + ] except Exception: remote_room_hosts = None elif RoomAlias.is_valid(room_identifier): @@ -461,10 +463,10 @@ class RoomMessageListRestServlet(ClientV1RestServlet): pagination_config = PaginationConfig.from_request( request, default_limit=10, ) - as_client_event = "raw" not in request.args - filter_bytes = parse_string(request, "filter") + as_client_event = b"raw" not in request.args + filter_bytes = parse_string(request, b"filter", encoding=None) if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) else: event_filter = None @@ -560,7 +562,7 @@ class RoomEventContextServlet(ClientV1RestServlet): # picking the API shape for symmetry with /messages filter_bytes = parse_string(request, "filter") if filter_bytes: - filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") + filter_json = urlparse.unquote(filter_bytes) event_filter = Filter(json.loads(filter_json)) else: event_filter = None diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 62f4c3d93e..53da905eea 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -42,7 +42,11 @@ class VoipRestServlet(ClientV1RestServlet): expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000 username = "%d:%s" % (expiry, requester.user.to_string()) - mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1) + mac = hmac.new( + turnSecret.encode(), + msg=username.encode(), + digestmod=hashlib.sha1 + ) # We need to use standard padded base64 encoding here # encode_base64 because we need to add the standard padding to get the # same result as the TURN server. diff --git a/synapse/rest/client/v1_only/register.py b/synapse/rest/client/v1_only/register.py index 5e99cffbcb..dadb376b02 100644 --- a/synapse/rest/client/v1_only/register.py +++ b/synapse/rest/client/v1_only/register.py @@ -23,6 +23,7 @@ from twisted.internet import defer import synapse.util.stringutils as stringutils from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.rest.client.v1.base import ClientV1RestServlet from synapse.types import create_requester @@ -281,12 +282,20 @@ class RegisterRestServlet(ClientV1RestServlet): register_json["user"].encode("utf-8") if "user" in register_json else None ) + threepid = None + if session.get(LoginType.EMAIL_IDENTITY): + threepid = session["threepidCreds"] handler = self.handlers.registration_handler (user_id, token) = yield handler.register( localpart=desired_user_id, - password=password + password=password, + threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(user_id) if session[LoginType.EMAIL_IDENTITY]: logger.debug("Binding emails %s to %s" % ( diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 372648cafd..37b32dd37b 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -53,7 +53,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -89,7 +91,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -241,7 +245,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( @@ -276,7 +282,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Account phone numbers are not authorized on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.datastore.get_user_id_by_threepid( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 2f64155d13..192f52e462 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -26,6 +26,7 @@ import synapse import synapse.types from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError +from synapse.config.server import is_threepid_reserved from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -74,7 +75,9 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", body['email']): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Your email domain is not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -114,7 +117,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( - 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, + 403, + "Phone numbers are not authorized to register on this server", + Codes.THREEPID_DENIED, ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( @@ -372,7 +377,9 @@ class RegisterRestServlet(RestServlet): if not check_3pid_allowed(self.hs, medium, address): raise SynapseError( - 403, "Third party identifier is not allowed", + 403, + "Third party identifiers (email/phone numbers)" + + " are not authorized on this server", Codes.THREEPID_DENIED, ) @@ -395,12 +402,21 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() + threepid = None + if auth_result: + threepid = auth_result.get(LoginType.EMAIL_IDENTITY) + (registered_user_id, _) = yield self.registration_handler.register( localpart=desired_username, password=new_password, guest_access_token=guest_access_token, generate_token=False, + threepid=threepid, ) + # Necessary due to auth checks prior to the threepid being + # written to the db + if is_threepid_reserved(self.hs.config, threepid): + yield self.store.upsert_monthly_active_user(registered_user_id) # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 1275baa1ba..0251146722 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -25,6 +25,7 @@ from synapse.api.errors import SynapseError from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection from synapse.events.utils import ( format_event_for_client_v2_without_room_id, + format_event_raw, serialize_event, ) from synapse.handlers.presence import format_user_presence_state @@ -88,7 +89,7 @@ class SyncRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - if "from" in request.args: + if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( @@ -175,17 +176,28 @@ class SyncRestServlet(RestServlet): @staticmethod def encode_response(time_now, sync_result, access_token_id, filter): + if filter.event_format == 'client': + event_formatter = format_event_for_client_v2_without_room_id + elif filter.event_format == 'federation': + event_formatter = format_event_raw + else: + raise Exception("Unknown event format %s" % (filter.event_format, )) + joined = SyncRestServlet.encode_joined( - sync_result.joined, time_now, access_token_id, filter.event_fields + sync_result.joined, time_now, access_token_id, + filter.event_fields, + event_formatter, ) invited = SyncRestServlet.encode_invited( sync_result.invited, time_now, access_token_id, + event_formatter, ) archived = SyncRestServlet.encode_archived( sync_result.archived, time_now, access_token_id, filter.event_fields, + event_formatter, ) return { @@ -228,7 +240,7 @@ class SyncRestServlet(RestServlet): } @staticmethod - def encode_joined(rooms, time_now, token_id, event_fields): + def encode_joined(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the joined rooms in a sync result @@ -240,7 +252,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the joined rooms list, in our response format @@ -248,13 +262,14 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, only_fields=event_fields + room, time_now, token_id, joined=True, only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_invited(rooms, time_now, token_id): + def encode_invited(rooms, time_now, token_id, event_formatter): """ Encode the invited rooms in a sync result @@ -264,7 +279,9 @@ class SyncRestServlet(RestServlet): time_now(int): current time - used as a baseline for age calculations token_id(int): ID of the user's auth token - used for namespacing - of transaction IDs + of transaction IDs + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: the invited rooms list, in our @@ -274,7 +291,7 @@ class SyncRestServlet(RestServlet): for room in rooms: invite = serialize_event( room.invite, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, is_invite=True, ) unsigned = dict(invite.get("unsigned", {})) @@ -288,7 +305,7 @@ class SyncRestServlet(RestServlet): return invited @staticmethod - def encode_archived(rooms, time_now, token_id, event_fields): + def encode_archived(rooms, time_now, token_id, event_fields, event_formatter): """ Encode the archived rooms in a sync result @@ -300,7 +317,9 @@ class SyncRestServlet(RestServlet): token_id(int): ID of the user's auth token - used for namespacing of transaction IDs event_fields(list): List of event fields to include. If empty, - all fields will be returned. + all fields will be returned. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, dict[str, object]]: The invited rooms list, in our response format @@ -308,13 +327,18 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = SyncRestServlet.encode_room( - room, time_now, token_id, joined=False, only_fields=event_fields + room, time_now, token_id, joined=False, + only_fields=event_fields, + event_formatter=event_formatter, ) return joined @staticmethod - def encode_room(room, time_now, token_id, joined=True, only_fields=None): + def encode_room( + room, time_now, token_id, joined, + only_fields, event_formatter, + ): """ Args: room (JoinedSyncResult|ArchivedSyncResult): sync result for a @@ -326,14 +350,15 @@ class SyncRestServlet(RestServlet): joined (bool): True if the user is joined to this room - will mean we handle ephemeral events only_fields(list): Optional. The list of event fields to include. + event_formatter (func[dict]): function to convert from federation format + to client format Returns: dict[str, object]: the room, encoded in our response format """ def serialize(event): - # TODO(mjark): Respect formatting requirements in the filter. return serialize_event( event, time_now, token_id=token_id, - event_format=format_event_for_client_v2_without_room_id, + event_format=event_formatter, only_event_fields=only_fields, ) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index d9d379182e..b9b5d07677 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -79,7 +79,7 @@ class ThirdPartyUserServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.USER, protocol, fields @@ -102,7 +102,7 @@ class ThirdPartyLocationServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) fields = request.args - fields.pop("access_token", None) + fields.pop(b"access_token", None) results = yield self.appservice_handler.query_3pe( ThirdPartyEntityKind.LOCATION, protocol, fields diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py index b9ee6e1c13..38eb2ee23f 100644 --- a/synapse/rest/key/v1/server_key_resource.py +++ b/synapse/rest/key/v1/server_key_resource.py @@ -88,5 +88,5 @@ class LocalKey(Resource): ) def getChild(self, name, request): - if name == '': + if name == b'': return self diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index 3491fd2118..cb5abcf826 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -22,5 +22,5 @@ from .remote_key_resource import RemoteKey class KeyApiV2Resource(Resource): def __init__(self, hs): Resource.__init__(self) - self.putChild("server", LocalKey(hs)) - self.putChild("query", RemoteKey(hs)) + self.putChild(b"server", LocalKey(hs)) + self.putChild(b"query", RemoteKey(hs)) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 7d67e4b064..eb8782aa6e 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -103,7 +103,7 @@ class RemoteKey(Resource): def async_render_GET(self, request): if len(request.postpath) == 1: server, = request.postpath - query = {server: {}} + query = {server.decode('ascii'): {}} elif len(request.postpath) == 2: server, key_id = request.postpath minimum_valid_until_ts = parse_integer( @@ -112,11 +112,12 @@ class RemoteKey(Resource): arguments = {} if minimum_valid_until_ts is not None: arguments["minimum_valid_until_ts"] = minimum_valid_until_ts - query = {server: {key_id: arguments}} + query = {server.decode('ascii'): {key_id.decode('ascii'): arguments}} else: raise SynapseError( 404, "Not found %r" % request.postpath, Codes.NOT_FOUND ) + yield self.query_keys(request, query, query_remote_on_cache_miss=True) def render_POST(self, request): @@ -135,6 +136,7 @@ class RemoteKey(Resource): @defer.inlineCallbacks def query_keys(self, request, query, query_remote_on_cache_miss=False): logger.info("Handling query for keys %r", query) + store_queries = [] for server_name, key_ids in query.items(): if ( diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py index f255f2883f..5a426ff2f6 100644 --- a/synapse/rest/media/v0/content_repository.py +++ b/synapse/rest/media/v0/content_repository.py @@ -56,7 +56,7 @@ class ContentRepoResource(resource.Resource): # servers. # TODO: A little crude here, we could do this better. - filename = request.path.split('/')[-1] + filename = request.path.decode('ascii').split('/')[-1] # be paranoid filename = re.sub("[^0-9A-z.-_]", "", filename) @@ -78,7 +78,7 @@ class ContentRepoResource(resource.Resource): # select private. don't bother setting Expires as all our matrix # clients are smart enough to be happy with Cache-Control (right?) request.setHeader( - "Cache-Control", "public,max-age=86400,s-maxage=86400" + b"Cache-Control", b"public,max-age=86400,s-maxage=86400" ) d = FileSender().beginFileTransfer(f, request) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 65f4bd2910..76e479afa3 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -15,9 +15,8 @@ import logging import os -import urllib -from six.moves.urllib import parse as urlparse +from six.moves import urllib from twisted.internet import defer from twisted.protocols.basic import FileSender @@ -35,10 +34,15 @@ def parse_media_id(request): # This allows users to append e.g. /test.png to the URL. Useful for # clients that parse the URL to see content type. server_name, media_id = request.postpath[:2] + + if isinstance(server_name, bytes): + server_name = server_name.decode('utf-8') + media_id = media_id.decode('utf8') + file_name = None if len(request.postpath) > 2: try: - file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8") + file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8")) except UnicodeDecodeError: pass return server_name, media_id, file_name @@ -93,22 +97,18 @@ def add_file_headers(request, media_type, file_size, upload_name): file_size (int): Size in bytes of the media, if known. upload_name (str): The name of the requested file, if any. """ + def _quote(x): + return urllib.parse.quote(x.encode("utf-8")) + request.setHeader(b"Content-Type", media_type.encode("UTF-8")) if upload_name: if is_ascii(upload_name): - request.setHeader( - b"Content-Disposition", - b"inline; filename=%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii") else: - request.setHeader( - b"Content-Disposition", - b"inline; filename*=utf-8''%s" % ( - urllib.quote(upload_name.encode("utf-8")), - ), - ) + disposition = ( + "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii") + + request.setHeader(b"Content-Disposition", disposition) # cache for at least a day. # XXX: we might want to turn this off for data we don't want to diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index fbfa85f74f..f911b120b1 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -47,12 +47,13 @@ class DownloadResource(Resource): def _async_render_GET(self, request): set_cors_headers(request) request.setHeader( - "Content-Security-Policy", - "default-src 'none';" - " script-src 'none';" - " plugin-types application/pdf;" - " style-src 'unsafe-inline';" - " object-src 'self';" + b"Content-Security-Policy", + b"default-src 'none';" + b" script-src 'none';" + b" plugin-types application/pdf;" + b" style-src 'unsafe-inline';" + b" media-src 'self';" + b" object-src 'self';" ) server_name, media_id, name = parse_media_id(request) if server_name == self.server_name: diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 241c972070..a828ff4438 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -20,7 +20,7 @@ import logging import os import shutil -from six import iteritems +from six import PY3, iteritems from six.moves.urllib import parse as urlparse import twisted.internet.error @@ -397,13 +397,13 @@ class MediaRepository(object): yield finish() - media_type = headers["Content-Type"][0] + media_type = headers[b"Content-Type"][0].decode('ascii') time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: - _, params = cgi.parse_header(content_disposition[0],) + _, params = cgi.parse_header(content_disposition[0].decode('ascii'),) upload_name = None # First check if there is a valid UTF-8 filename @@ -419,9 +419,13 @@ class MediaRepository(object): upload_name = upload_name_ascii if upload_name: - upload_name = urlparse.unquote(upload_name) + if PY3: + upload_name = urlparse.unquote(upload_name) + else: + upload_name = urlparse.unquote(upload_name.encode('ascii')) try: - upload_name = upload_name.decode("utf-8") + if isinstance(upload_name, bytes): + upload_name = upload_name.decode("utf-8") except UnicodeDecodeError: upload_name = None else: @@ -755,14 +759,15 @@ class MediaRepositoryResource(Resource): Resource.__init__(self) media_repo = hs.get_media_repository() - self.putChild("upload", UploadResource(hs, media_repo)) - self.putChild("download", DownloadResource(hs, media_repo)) - self.putChild("thumbnail", ThumbnailResource( + + self.putChild(b"upload", UploadResource(hs, media_repo)) + self.putChild(b"download", DownloadResource(hs, media_repo)) + self.putChild(b"thumbnail", ThumbnailResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("identicon", IdenticonResource()) + self.putChild(b"identicon", IdenticonResource()) if hs.config.url_preview_enabled: - self.putChild("preview_url", PreviewUrlResource( + self.putChild(b"preview_url", PreviewUrlResource( hs, media_repo, media_repo.media_storage, )) - self.putChild("config", MediaConfigResource(hs)) + self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 778ef97337..af01040a38 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -79,7 +79,6 @@ class PreviewUrlResource(Resource): # don't spider URLs more often than once an hour expiry_ms=60 * 60 * 1000, ) - self._cache.start() self._cleaner_loop = self.clock.looping_call( self._start_expire_url_cache_data, 10 * 1000, @@ -261,7 +260,7 @@ class PreviewUrlResource(Resource): logger.debug("Calculated OG for %s as %s" % (url, og)) - jsonog = json.dumps(og) + jsonog = json.dumps(og).encode('utf8') # store OG in history-aware DB cache yield self.store.store_url_cache( @@ -301,20 +300,20 @@ class PreviewUrlResource(Resource): logger.warn("Error downloading %s: %r", url, e) raise SynapseError( 500, "Failed to download content: %s" % ( - traceback.format_exception_only(sys.exc_type, e), + traceback.format_exception_only(sys.exc_info()[0], e), ), Codes.UNKNOWN, ) yield finish() try: - if "Content-Type" in headers: - media_type = headers["Content-Type"][0] + if b"Content-Type" in headers: + media_type = headers[b"Content-Type"][0].decode('ascii') else: media_type = "application/octet-stream" time_now_ms = self.clock.time_msec() - content_disposition = headers.get("Content-Disposition", None) + content_disposition = headers.get(b"Content-Disposition", None) if content_disposition: _, params = cgi.parse_header(content_disposition[0],) download_name = None diff --git a/synapse/server.py b/synapse/server.py index a795643add..3e9d3d8256 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -19,6 +19,7 @@ # partial one for unit test mocking. # Imports required for the default HomeServer() implementation +import abc import logging from twisted.enterprise import adbapi @@ -82,7 +83,6 @@ from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import WorkerServerNoticesSender from synapse.state import StateHandler, StateResolutionHandler -from synapse.storage import DataStore from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor @@ -112,6 +112,8 @@ class HomeServer(object): config (synapse.config.homeserver.HomeserverConfig): """ + __metaclass__ = abc.ABCMeta + DEPENDENCIES = [ 'http_client', 'db_pool', @@ -174,6 +176,11 @@ class HomeServer(object): 'room_context_handler', ] + # This is overridden in derived application classes + # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be + # instantiated during setup() for future return by get_datastore() + DATASTORE_CLASS = abc.abstractproperty() + def __init__(self, hostname, reactor=None, **kwargs): """ Args: @@ -190,13 +197,16 @@ class HomeServer(object): self.distributor = Distributor() self.ratelimiter = Ratelimiter() + self.datastore = None + # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname]) def setup(self): logger.info("Setting up.") - self.datastore = DataStore(self.get_db_conn(), self) + with self.get_db_conn() as conn: + self.datastore = self.DATASTORE_CLASS(conn, self) logger.info("Finished setting up.") def get_reactor(self): diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 893b9001cd..af15cba0ee 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -118,7 +118,7 @@ class ResourceLimitsServerNotices(object): 'body': event_content, 'msgtype': ServerNoticeMsgType, 'server_notice_type': ServerNoticeLimitReached, - 'admin_uri': self._config.admin_uri, + 'admin_contact': self._config.admin_contact, 'limit_type': event_limit_type } event = yield self._server_notices_manager.send_notice( @@ -146,11 +146,10 @@ class ResourceLimitsServerNotices(object): user_id(str): the user in question room_id(str): the server notices room for that user """ - tags = yield self._store.get_tags_for_user(user_id) - server_notices_tags = tags.get(room_id) + tags = yield self._store.get_tags_for_room(user_id, room_id) need_to_set_tag = True - if server_notices_tags: - if server_notices_tags.get(SERVER_NOTICE_ROOM_TAG): + if tags: + if SERVER_NOTICE_ROOM_TAG in tags: # tag already present, nothing to do here need_to_set_tag = False if need_to_set_tag: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b34970e4d1..b22495c1f9 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -95,10 +95,6 @@ class StateHandler(object): self.hs = hs self._state_resolution_handler = hs.get_state_resolution_handler() - def start_caching(self): - # TODO: remove this shim - self._state_resolution_handler.start_caching() - @defer.inlineCallbacks def get_current_state(self, room_id, event_type=None, state_key="", latest_event_ids=None): @@ -385,6 +381,7 @@ class StateHandler(object): ev_ids, get_prev_content=False, check_redacted=False, ) + @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): logger.info( "Resolving state for %s with %d groups", event.room_id, len(state_sets) @@ -401,15 +398,17 @@ class StateHandler(object): } with Measure(self.clock, "state._resolve_events"): - new_state = resolve_events_with_state_map( - room_version, state_set_ids, state_map, + new_state = yield resolve_events_with_factory( + room_version, state_set_ids, + event_map=state_map, + state_map_factory=self._state_map_factory ) new_state = { key: state_map[ev_id] for key, ev_id in iteritems(new_state) } - return new_state + defer.returnValue(new_state) class StateResolutionHandler(object): @@ -425,9 +424,6 @@ class StateResolutionHandler(object): self._state_cache = None self.resolve_linearizer = Linearizer(name="state_resolve_lock") - def start_caching(self): - logger.debug("start_caching") - self._state_cache = ExpiringCache( cache_name="state_cache", clock=self.clock, @@ -437,8 +433,6 @@ class StateResolutionHandler(object): reset_expiry_on_get=True, ) - self._state_cache.start() - @defer.inlineCallbacks @log_function def resolve_state_groups( @@ -589,31 +583,6 @@ def _make_state_cache_entry( ) -def resolve_events_with_state_map(room_version, state_sets, state_map): - """ - Args: - room_version(str): Version of the room - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if room_version in (RoomVersions.V1, RoomVersions.VDH_TEST,): - return v1.resolve_events_with_state_map( - state_sets, state_map, - ) - else: - # This should only happen if we added a version but forgot to add it to - # the list above. - raise Exception( - "No state resolution algorithm defined for version %r" % (room_version,) - ) - - def resolve_events_with_factory(room_version, state_sets, event_map, state_map_factory): """ Args: diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 3a1f7054a1..7a7157b352 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,34 +30,6 @@ logger = logging.getLogger(__name__) POWER_KEY = (EventTypes.PowerLevels, "") -def resolve_events_with_state_map(state_sets, state_map): - """ - Args: - state_sets(list): List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - state_map(dict): a dict from event_id to event, for all events in - state_sets. - - Returns - dict[(str, str), str]: - a map from (type, state_key) to event_id. - """ - if len(state_sets) == 1: - return state_sets[0] - - unconflicted_state, conflicted_state = _seperate( - state_sets, - ) - - auth_events = _create_auth_events_from_maps( - unconflicted_state, conflicted_state, state_map - ) - - return _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) - - @defer.inlineCallbacks def resolve_events_with_factory(state_sets, event_map, state_map_factory): """ @@ -93,10 +65,15 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): for event_ids in itervalues(conflicted_state) for event_id in event_ids ) + needed_event_count = len(needed_events) if event_map is not None: needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d conflicted events", len(needed_events)) + logger.info( + "Asking for %d/%d conflicted events", + len(needed_events), + needed_event_count, + ) # dict[str, FrozenEvent]: a map from state event id to event. Only includes # the state events which are in conflict (and those in event_map) @@ -113,11 +90,16 @@ def resolve_events_with_factory(state_sets, event_map, state_map_factory): ) new_needed_events = set(itervalues(auth_events)) + new_needed_event_count = len(new_needed_events) new_needed_events -= needed_events if event_map is not None: new_needed_events -= set(iterkeys(event_map)) - logger.info("Asking for %d auth events", len(new_needed_events)) + logger.info( + "Asking for %d/%d auth events", + len(new_needed_events), + new_needed_event_count, + ) state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 08dffd774f..be61147b9b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -17,9 +17,10 @@ import sys import threading import time -from six import iteritems, iterkeys, itervalues +from six import PY2, iteritems, iterkeys, itervalues from six.moves import intern, range +from canonicaljson import json from prometheus_client import Histogram from twisted.internet import defer @@ -1216,3 +1217,32 @@ class _RollbackButIsFineException(Exception): something went wrong. """ pass + + +def db_to_json(db_content): + """ + Take some data from a database row and return a JSON-decoded object. + + Args: + db_content (memoryview|buffer|bytes|bytearray|unicode) + """ + # psycopg2 on Python 3 returns memoryview objects, which we need to + # cast to bytes to decode + if isinstance(db_content, memoryview): + db_content = db_content.tobytes() + + # psycopg2 on Python 2 returns buffer objects, which we need to cast to + # bytes to decode + if PY2 and isinstance(db_content, buffer): + db_content = bytes(db_content) + + # Decode it to a Unicode string before feeding it to json.loads, so we + # consistenty get a Unicode-containing object out. + if isinstance(db_content, (bytes, bytearray)): + db_content = db_content.decode('utf8') + + try: + return json.loads(db_content) + except Exception: + logging.warning("Tried to decode '%r' as JSON and failed", db_content) + raise diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index 8fc678fa67..9ad17b7c25 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -119,21 +119,25 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): for entry in iteritems(to_update): (user_id, access_token, ip), (user_agent, device_id, last_seen) = entry - self._simple_upsert_txn( - txn, - table="user_ips", - keyvalues={ - "user_id": user_id, - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "device_id": device_id, - }, - values={ - "last_seen": last_seen, - }, - lock=False, - ) + try: + self._simple_upsert_txn( + txn, + table="user_ips", + keyvalues={ + "user_id": user_id, + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "device_id": device_id, + }, + values={ + "last_seen": last_seen, + }, + lock=False, + ) + except Exception as e: + # Failed to upsert, log and continue + logger.error("Failed to insert client IP %r: %r", entry, e) @defer.inlineCallbacks def get_last_client_ip_by_device(self, user_id, device_id): diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 73646da025..e06b0bc56d 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -169,7 +169,7 @@ class DeviceInboxStore(BackgroundUpdateStore): local_by_user_then_device = {} for user_id, messages_by_device in messages_by_user_then_device.items(): messages_json_for_user = {} - devices = messages_by_device.keys() + devices = list(messages_by_device.keys()) if len(devices) == 1 and devices[0] == "*": # Handle wildcard device_ids. sql = ( diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index c0943ecf91..d10ff9e4b9 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -24,7 +24,7 @@ from synapse.api.errors import StoreError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList -from ._base import Cache, SQLBaseStore +from ._base import Cache, SQLBaseStore, db_to_json logger = logging.getLogger(__name__) @@ -411,7 +411,7 @@ class DeviceStore(SQLBaseStore): if device is not None: key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name @@ -466,7 +466,7 @@ class DeviceStore(SQLBaseStore): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(json.loads(content)) + defer.returnValue(db_to_json(content)) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -479,7 +479,7 @@ class DeviceStore(SQLBaseStore): desc="_get_cached_devices_for_user", ) defer.returnValue({ - device["device_id"]: json.loads(device["content"]) + device["device_id"]: db_to_json(device["content"]) for device in devices }) @@ -511,7 +511,7 @@ class DeviceStore(SQLBaseStore): key_json = device.get("key_json", None) if key_json: - result["keys"] = json.loads(key_json) + result["keys"] = db_to_json(key_json) device_display_name = device.get("device_display_name", None) if device_display_name: result["device_display_name"] = device_display_name diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 808194236a..cfb687cb53 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -75,7 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore): }, retcol="creator", desc="get_room_alias_creator", - allow_none=True ) @cached(max_entries=5000) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 523b4360c3..1f1721e820 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -14,13 +14,13 @@ # limitations under the License. from six import iteritems -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.util.caches.descriptors import cached -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class EndToEndKeyStore(SQLBaseStore): @@ -90,7 +90,7 @@ class EndToEndKeyStore(SQLBaseStore): for user_id, device_keys in iteritems(results): for device_id, device_info in iteritems(device_keys): - device_info["keys"] = json.loads(device_info.pop("key_json")) + device_info["keys"] = db_to_json(device_info.pop("key_json")) defer.returnValue(results) diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 8a0386c1a4..42225f8a2a 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -41,13 +41,18 @@ class PostgresEngine(object): db_conn.set_isolation_level( self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ ) + + # Set the bytea output to escape, vs the default of hex + cursor = db_conn.cursor() + cursor.execute("SET bytea_output TO escape") + # Asynchronous commit, don't wait for the server to call fsync before # ending the transaction. # https://www.postgresql.org/docs/current/static/wal-async-commit.html if not self.synchronous_commit: - cursor = db_conn.cursor() cursor.execute("SET synchronous_commit TO OFF") - cursor.close() + + cursor.close() def is_deadlock(self, error): if isinstance(error, self.module.DatabaseError): diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f39c8c8461..03cedf3a75 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -19,7 +19,7 @@ import logging from collections import OrderedDict, deque, namedtuple from functools import wraps -from six import iteritems +from six import iteritems, text_type from six.moves import range from canonicaljson import json @@ -38,6 +38,7 @@ from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id +from synapse.util import batch_iter from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks from synapse.util.frozenutils import frozendict_json_encoder @@ -386,12 +387,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) for room_id, ev_ctx_rm in iteritems(events_by_room): - # Work out new extremities by recursively adding and removing - # the new events. latest_event_ids = yield self.get_latest_event_ids_in_room( room_id ) - new_latest_event_ids = yield self._calculate_new_extremeties( + new_latest_event_ids = yield self._calculate_new_extremities( room_id, ev_ctx_rm, latest_event_ids ) @@ -400,6 +399,12 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore # No change in extremities, so no change in state continue + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + new_forward_extremeties[room_id] = new_latest_event_ids len_1 = ( @@ -517,44 +522,79 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ) @defer.inlineCallbacks - def _calculate_new_extremeties(self, room_id, event_contexts, latest_event_ids): - """Calculates the new forward extremeties for a room given events to + def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): + """Calculates the new forward extremities for a room given events to persist. Assumes that we are only persisting events for one room at a time. """ - new_latest_event_ids = set(latest_event_ids) - # First, add all the new events to the list - new_latest_event_ids.update( - event.event_id for event, ctx in event_contexts + + # we're only interested in new events which aren't outliers and which aren't + # being rejected. + new_events = [ + event for event, ctx in event_contexts if not event.internal_metadata.is_outlier() and not ctx.rejected + ] + + # start with the existing forward extremities + result = set(latest_event_ids) + + # add all the new events to the list + result.update( + event.event_id for event in new_events ) - # Now remove all events that are referenced by the to-be-added events - new_latest_event_ids.difference_update( + + # Now remove all events which are prev_events of any of the new events + result.difference_update( e_id - for event, ctx in event_contexts + for event in new_events for e_id, _ in event.prev_events - if not event.internal_metadata.is_outlier() and not ctx.rejected ) - # And finally remove any events that are referenced by previously added - # events. - rows = yield self._simple_select_many_batch( - table="event_edges", - column="prev_event_id", - iterable=list(new_latest_event_ids), - retcols=["prev_event_id"], - keyvalues={ - "is_state": False, - }, - desc="_calculate_new_extremeties", - ) + # Finally, remove any events which are prev_events of any existing events. + existing_prevs = yield self._get_events_which_are_prevs(result) + result.difference_update(existing_prevs) - new_latest_event_ids.difference_update( - row["prev_event_id"] for row in rows - ) + defer.returnValue(result) - defer.returnValue(new_latest_event_ids) + @defer.inlineCallbacks + def _get_events_which_are_prevs(self, event_ids): + """Filter the supplied list of event_ids to get those which are prev_events of + existing (non-outlier/rejected) events. + + Args: + event_ids (Iterable[str]): event ids to filter + + Returns: + Deferred[List[str]]: filtered event ids + """ + results = [] + + def _get_events(txn, batch): + sql = """ + SELECT prev_event_id + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + AND rejections.event_id IS NULL + """ % ( + ",".join("?" for _ in batch), + ) + + txn.execute(sql, batch) + results.extend(r[0] for r in txn) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events, + chunk, + ) + + defer.returnValue(results) @defer.inlineCallbacks def _get_new_state_after_events(self, room_id, events_context, old_latest_event_ids, @@ -586,10 +626,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore the new current state is only returned if we've already calculated it. """ - - if not new_latest_event_ids: - return - # map from state_group to ((type, key) -> event_id) state map state_groups_map = {} @@ -929,6 +965,10 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore txn, self.get_users_in_room, (room_id,) ) + self._invalidate_cache_and_stream( + txn, self.get_room_summary, (room_id,) + ) + self._invalidate_cache_and_stream( txn, self.get_current_state_ids, (room_id,) ) @@ -1220,7 +1260,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore "sender": event.sender, "contains_url": ( "url" in event.content - and isinstance(event.content["url"], basestring) + and isinstance(event.content["url"], text_type) ), } for event, _ in events_and_contexts @@ -1529,7 +1569,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore contains_url = "url" in content if contains_url: - contains_url &= isinstance(content["url"], basestring) + contains_url &= isinstance(content["url"], text_type) except (KeyError, AttributeError): # If the event is missing a necessary field then # skip over it. @@ -1886,6 +1926,57 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore ")" ) + # First ensure that we're not about to delete all the forward extremeties + txn.execute( + "SELECT e.event_id, e.depth FROM events as e " + "INNER JOIN event_forward_extremities as f " + "ON e.event_id = f.event_id " + "AND e.room_id = f.room_id " + "WHERE f.room_id = ?", + (room_id,) + ) + rows = txn.fetchall() + max_depth = max(row[1] for row in rows) + + if max_depth < token.topological: + # We need to ensure we don't delete all the events from the database + # otherwise we wouldn't be able to send any events (due to not + # having any backwards extremeties) + raise SynapseError( + 400, "topological_ordering is greater than forward extremeties" + ) + + logger.info("[purge] looking for events to delete") + + should_delete_expr = "state_key IS NULL" + should_delete_params = () + if not delete_local_events: + should_delete_expr += " AND event_id NOT LIKE ?" + + # We include the parameter twice since we use the expression twice + should_delete_params += ( + "%:" + self.hs.hostname, + "%:" + self.hs.hostname, + ) + + should_delete_params += (room_id, token.topological) + + # Note that we insert events that are outliers and aren't going to be + # deleted, as nothing will happen to them. + txn.execute( + "INSERT INTO events_to_purge" + " SELECT event_id, %s" + " FROM events AS e LEFT JOIN state_events USING (event_id)" + " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" + % ( + should_delete_expr, + should_delete_expr, + ), + should_delete_params, + ) + + # We create the indices *after* insertion as that's a lot faster. + # create an index on should_delete because later we'll be looking for # the should_delete / shouldn't_delete subsets txn.execute( @@ -1900,45 +1991,6 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore " ON events_to_purge(event_id)", ) - # First ensure that we're not about to delete all the forward extremeties - txn.execute( - "SELECT e.event_id, e.depth FROM events as e " - "INNER JOIN event_forward_extremities as f " - "ON e.event_id = f.event_id " - "AND e.room_id = f.room_id " - "WHERE f.room_id = ?", - (room_id,) - ) - rows = txn.fetchall() - max_depth = max(row[0] for row in rows) - - if max_depth <= token.topological: - # We need to ensure we don't delete all the events from the database - # otherwise we wouldn't be able to send any events (due to not - # having any backwards extremeties) - raise SynapseError( - 400, "topological_ordering is greater than forward extremeties" - ) - - logger.info("[purge] looking for events to delete") - - should_delete_expr = "state_key IS NULL" - should_delete_params = () - if not delete_local_events: - should_delete_expr += " AND event_id NOT LIKE ?" - should_delete_params += ("%:" + self.hs.hostname, ) - - should_delete_params += (room_id, token.topological) - - txn.execute( - "INSERT INTO events_to_purge" - " SELECT event_id, %s" - " FROM events AS e LEFT JOIN state_events USING (event_id)" - " WHERE e.room_id = ? AND topological_ordering < ?" % ( - should_delete_expr, - ), - should_delete_params, - ) txn.execute( "SELECT event_id, should_delete FROM events_to_purge" ) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 59822178ff..a8326f5296 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import itertools import logging from collections import namedtuple @@ -265,7 +266,7 @@ class EventsWorkerStore(SQLBaseStore): """ with Measure(self._clock, "_fetch_event_list"): try: - event_id_lists = zip(*event_list)[0] + event_id_lists = list(zip(*event_list))[0] event_ids = [ item for sublist in event_id_lists for item in sublist ] @@ -299,14 +300,14 @@ class EventsWorkerStore(SQLBaseStore): logger.exception("do_fetch") # We only want to resolve deferreds from the main thread - def fire(evs): + def fire(evs, exc): for _, d in evs: if not d.called: with PreserveLoggingContext(): - d.errback(e) + d.errback(exc) with PreserveLoggingContext(): - self.hs.get_reactor().callFromThread(fire, event_list) + self.hs.get_reactor().callFromThread(fire, event_list, e) @defer.inlineCallbacks def _enqueue_events(self, events, check_redacted=True, allow_rejected=False): diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index 2d5896c5b4..6ddcc909bf 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -13,14 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json class FilteringStore(SQLBaseStore): @@ -44,7 +44,7 @@ class FilteringStore(SQLBaseStore): desc="get_user_filter", ) - defer.returnValue(json.loads(bytes(def_json).decode("utf-8"))) + defer.returnValue(db_to_json(def_json)) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index f547977600..a1331c1a61 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -134,6 +134,7 @@ class KeyStore(SQLBaseStore): """ key_id = "%s:%s" % (verify_key.alg, verify_key.version) + # XXX fix this to not need a lock (#3819) def _txn(txn): self._simple_upsert_txn( txn, diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index d178f5c5ba..0fe8c8e24c 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -36,7 +36,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def initialise_reserved_users(self, threepids): - # TODO Why can't I do this in init? store = self.hs.get_datastore() reserved_user_list = [] @@ -147,6 +146,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): return count return self.runInteraction("count_users", _count_users) + @defer.inlineCallbacks + def get_registered_reserved_users_count(self): + """Of the reserved threepids defined in config, how many are associated + with registered users? + + Returns: + Defered[int]: Number of real reserved users + """ + count = 0 + for tp in self.hs.config.mau_limits_reserved_threepids: + user_id = yield self.hs.get_datastore().get_user_id_by_threepid( + tp["medium"], tp["address"] + ) + if user_id: + count = count + 1 + defer.returnValue(count) + @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): """ @@ -156,6 +172,10 @@ class MonthlyActiveUsersStore(SQLBaseStore): Deferred[bool]: True if a new entry was created, False if an existing one was updated. """ + # Am consciously deciding to lock the table on the basis that is ought + # never be a big table and alternative approaches (batching multiple + # upserts into a single txn) introduced a lot of extra complexity. + # See https://github.com/matrix-org/synapse/issues/3854 for more is_insert = yield self._simple_upsert( desc="upsert_monthly_active_user", table="monthly_active_users", @@ -165,7 +185,6 @@ class MonthlyActiveUsersStore(SQLBaseStore): values={ "timestamp": int(self._clock.time_msec()), }, - lock=False, ) if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) @@ -200,10 +219,14 @@ class MonthlyActiveUsersStore(SQLBaseStore): Args: user_id(str): the user_id to query """ + if self.hs.config.limit_usage_by_mau: + # Trial users and guests should not be included as part of MAU group + is_guest = yield self.is_guest(user_id) + if is_guest: + return is_trial = yield self.is_trial_user(user_id) if is_trial: - # we don't track trial users in the MAU table. return last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 8443bd4c1b..c7987bfcdd 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -15,7 +15,8 @@ # limitations under the License. import logging -import types + +import six from canonicaljson import encode_canonical_json, json @@ -27,6 +28,11 @@ from ._base import SQLBaseStore logger = logging.getLogger(__name__) +if six.PY2: + db_binary_type = buffer +else: + db_binary_type = memoryview + class PusherWorkerStore(SQLBaseStore): def _decode_pushers_rows(self, rows): @@ -34,18 +40,18 @@ class PusherWorkerStore(SQLBaseStore): dataJson = r['data'] r['data'] = None try: - if isinstance(dataJson, types.BufferType): + if isinstance(dataJson, db_binary_type): dataJson = str(dataJson).decode("UTF8") r['data'] = json.loads(dataJson) except Exception as e: logger.warn( "Invalid JSON in data for pusher %d: %s, %s", - r['id'], dataJson, e.message, + r['id'], dataJson, e.args[0], ) pass - if isinstance(r['pushkey'], types.BufferType): + if isinstance(r['pushkey'], db_binary_type): r['pushkey'] = str(r['pushkey']).decode("UTF8") return rows diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 9b4e6d6aa8..0707f9a86a 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -51,6 +51,12 @@ ProfileInfo = namedtuple( "ProfileInfo", ("avatar_url", "display_name") ) +# "members" points to a truncated list of (user_id, event_id) tuples for users of +# a given membership type, suitable for use in calculating heroes for a room. +# "count" points to the total numberr of users of a given membership type. +MemberSummary = namedtuple( + "MemberSummary", ("members", "count") +) _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" @@ -82,6 +88,65 @@ class RoomMemberWorkerStore(EventsWorkerStore): return [to_ascii(r[0]) for r in txn] return self.runInteraction("get_users_in_room", f) + @cached(max_entries=100000) + def get_room_summary(self, room_id): + """ Get the details of a room roughly suitable for use by the room + summary extension to /sync. Useful when lazy loading room members. + Args: + room_id (str): The room ID to query + Returns: + Deferred[dict[str, MemberSummary]: + dict of membership states, pointing to a MemberSummary named tuple. + """ + + def _get_room_summary_txn(txn): + # first get counts. + # We do this all in one transaction to keep the cache small. + # FIXME: get rid of this when we have room_stats + sql = """ + SELECT count(*), m.membership FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + res = {} + for count, membership in txn: + summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) + + # we order by membership and then fairly arbitrarily by event_id so + # heroes are consistent + sql = """ + SELECT m.user_id, m.membership, m.event_id + FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + ORDER BY + CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + m.event_id ASC + LIMIT ? + """ + + # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. + txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) + for user_id, membership, event_id in txn: + summary = res[to_ascii(membership)] + # we will always have a summary for this membership type at this + # point given the summary currently contains the counts. + members = summary.members + members.append((to_ascii(user_id), to_ascii(event_id))) + + return res + + return self.runInteraction("get_room_summary", _get_room_summary_txn) + @cached() def get_invited_rooms_for_user(self, user_id): """ Get all the rooms the user is invited to diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 4b971efdba..3f4cbd61c4 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -255,7 +255,17 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) @defer.inlineCallbacks - def get_state_groups_ids(self, room_id, event_ids): + def get_state_groups_ids(self, _room_id, event_ids): + """Get the event IDs of all the state for the state groups for the given events + + Args: + _room_id (str): id of the room for these events + event_ids (iterable[str]): ids of the events + + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) + """ if not event_ids: defer.returnValue({}) @@ -270,7 +280,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): - """Get the state IDs for the given state group + """Get the event IDs of all the state in the given state group Args: state_group (int) @@ -286,7 +296,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def get_state_groups(self, room_id, event_ids): """ Get the state groups for the given list of event_ids - The return value is a dict mapping group names to lists of events. + Returns: + Deferred[dict[int, list[EventBase]]]: + dict of state_group_id -> list of state events. """ if not event_ids: defer.returnValue({}) @@ -324,7 +336,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): member events (if True), or to exclude member events (if False) Returns: - dictionary state_group -> (dict of (type, state_key) -> event id) + Returns: + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ results = {} @@ -732,8 +746,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types is not None: non_member_types = [t for t in types if t[0] != EventTypes.Member] @@ -788,8 +802,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): If None, `types` filtering is applied to all events. Returns: - Deferred[dict[int, dict[(type, state_key), EventBase]]] - a dictionary mapping from state group to state dictionary. + Deferred[dict[int, dict[tuple[str, str], str]]]: + dict of state_group_id -> (dict of (type, state_key) -> event id) """ if types: types = frozenset(types) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index 428e7fa36e..a3032cdce9 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -18,14 +18,14 @@ from collections import namedtuple import six -from canonicaljson import encode_canonical_json, json +from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.util.caches.descriptors import cached +from synapse.util.caches.expiringcache import ExpiringCache -from ._base import SQLBaseStore +from ._base import SQLBaseStore, db_to_json # py2 sqlite has buffer hardcoded as only binary type, so we must use it, # despite being deprecated and removed in favor of memoryview @@ -50,6 +50,8 @@ _UpdateTransactionRow = namedtuple( ) ) +SENTINEL = object() + class TransactionStore(SQLBaseStore): """A collection of queries for handling PDUs. @@ -60,6 +62,12 @@ class TransactionStore(SQLBaseStore): self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) + self._destination_retry_cache = ExpiringCache( + cache_name="get_destination_retry_timings", + clock=self._clock, + expiry_ms=5 * 60 * 1000, + ) + def get_received_txn_response(self, transaction_id, origin): """For an incoming transaction from a given origin, check if we have already responded to it. If so, return the response code and response @@ -95,7 +103,8 @@ class TransactionStore(SQLBaseStore): ) if result and result["response_code"]: - return result["response_code"], json.loads(str(result["response_json"])) + return result["response_code"], db_to_json(result["response_json"]) + else: return None @@ -155,7 +164,7 @@ class TransactionStore(SQLBaseStore): """ pass - @cached(max_entries=10000) + @defer.inlineCallbacks def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. @@ -166,10 +175,20 @@ class TransactionStore(SQLBaseStore): None if not retrying Otherwise a dict for the retry scheme """ - return self.runInteraction( + + result = self._destination_retry_cache.get(destination, SENTINEL) + if result is not SENTINEL: + defer.returnValue(result) + + result = yield self.runInteraction( "get_destination_retry_timings", self._get_destination_retry_timings, destination) + # We don't hugely care about race conditions between getting and + # invalidating the cache, since we time out fairly quickly anyway. + self._destination_retry_cache[destination] = result + defer.returnValue(result) + def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( txn, @@ -197,8 +216,7 @@ class TransactionStore(SQLBaseStore): retry_interval (int) - how long until next retry in ms """ - # XXX: we could chose to not bother persisting this if our cache thinks - # this is a NOOP + self._destination_retry_cache.pop(destination, None) return self.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings, @@ -211,10 +229,6 @@ class TransactionStore(SQLBaseStore): retry_last_ts, retry_interval): self.database_engine.lock_table(txn, "destinations") - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - # We need to be careful here as the data may have changed from under us # due to a worker setting the timings. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 9b3f2f4b96..ec7b2c9672 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -374,29 +374,25 @@ class ReadWriteLock(object): defer.returnValue(_ctx_manager()) -class DeferredTimeoutError(Exception): - """ - This error is raised by default when a L{Deferred} times out. - """ +def _cancelled_to_timed_out_error(value, timeout): + if isinstance(value, failure.Failure): + value.trap(CancelledError) + raise defer.TimeoutError(timeout, "Deferred") + return value -def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): - """ - Add a timeout to a deferred by scheduling it to be cancelled after - timeout seconds. +def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): + """The in built twisted `Deferred.addTimeout` fails to time out deferreds + that have a canceller that throws exceptions. This method creates a new + deferred that wraps and times out the given deferred, correctly handling + the case where the given deferred's canceller throws. - This is essentially a backport of deferred.addTimeout, which was introduced - in twisted 16.5. - - If the deferred gets timed out, it errbacks with a DeferredTimeoutError, - unless a cancelable function was passed to its initialization or unless - a different on_timeout_cancel callable is provided. + NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred Args: - deferred (defer.Deferred): deferred to be timed out - timeout (Number): seconds to time out after - reactor (twisted.internet.reactor): the Twisted reactor to use - + deferred (Deferred) + timeout (float): Timeout in seconds + reactor (twisted.internet.reactor): The twisted reactor to use on_timeout_cancel (callable): A callable which is called immediately after the deferred times out, and not if this deferred is otherwise cancelled before the timeout. @@ -406,13 +402,26 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): the timeout. The default callable (if none is provided) will translate a - CancelledError Failure into a DeferredTimeoutError. + CancelledError Failure into a defer.TimeoutError. + + Returns: + Deferred """ + + new_d = defer.Deferred() + timed_out = [False] def time_it_out(): timed_out[0] = True - deferred.cancel() + + try: + deferred.cancel() + except: # noqa: E722, if we throw any exception it'll break time outs + logger.exception("Canceller failed during timeout") + + if not new_d.called: + new_d.errback(defer.TimeoutError(timeout, "Deferred")) delayed_call = reactor.callLater(timeout, time_it_out) @@ -432,9 +441,14 @@ def add_timeout_to_deferred(deferred, timeout, reactor, on_timeout_cancel=None): deferred.addBoth(cancel_timeout) + def success_cb(val): + if not new_d.called: + new_d.callback(val) -def _cancelled_to_timed_out_error(value, timeout): - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise DeferredTimeoutError(timeout, "Deferred") - return value + def failure_cb(val): + if not new_d.called: + new_d.errback(val) + + deferred.addCallbacks(success_cb, failure_cb) + + return new_d diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 7b065b195e..f37d5bec08 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import os import six @@ -20,6 +21,8 @@ from six.moves import intern from prometheus_client.core import REGISTRY, Gauge, GaugeMetricFamily +logger = logging.getLogger(__name__) + CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.5)) @@ -76,16 +79,20 @@ def register_cache(cache_type, cache_name, cache): return [] def collect(self): - if cache_type == "response_cache": - response_cache_size.labels(cache_name).set(len(cache)) - response_cache_hits.labels(cache_name).set(self.hits) - response_cache_evicted.labels(cache_name).set(self.evicted_size) - response_cache_total.labels(cache_name).set(self.hits + self.misses) - else: - cache_size.labels(cache_name).set(len(cache)) - cache_hits.labels(cache_name).set(self.hits) - cache_evicted.labels(cache_name).set(self.evicted_size) - cache_total.labels(cache_name).set(self.hits + self.misses) + try: + if cache_type == "response_cache": + response_cache_size.labels(cache_name).set(len(cache)) + response_cache_hits.labels(cache_name).set(self.hits) + response_cache_evicted.labels(cache_name).set(self.evicted_size) + response_cache_total.labels(cache_name).set(self.hits + self.misses) + else: + cache_size.labels(cache_name).set(len(cache)) + cache_hits.labels(cache_name).set(self.hits) + cache_evicted.labels(cache_name).set(self.evicted_size) + cache_total.labels(cache_name).set(self.hits + self.misses) + except Exception as e: + logger.warn("Error calculating metrics for %s: %s", cache_name, e) + raise yield GaugeMetricFamily("__unused", "") diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index ce85b2ae11..f369780277 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -16,12 +16,17 @@ import logging from collections import OrderedDict +from six import iteritems, itervalues + from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.caches import register_cache logger = logging.getLogger(__name__) +SENTINEL = object() + + class ExpiringCache(object): def __init__(self, cache_name, clock, max_len=0, expiry_ms=0, reset_expiry_on_get=False, iterable=False): @@ -54,11 +59,8 @@ class ExpiringCache(object): self.iterable = iterable - self._size_estimate = 0 - self.metrics = register_cache("expiring", cache_name, self) - def start(self): if not self._expiry_ms: # Don't bother starting the loop if things never expire return @@ -75,16 +77,11 @@ class ExpiringCache(object): now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) - if self.iterable: - self._size_estimate += len(value) - # Evict if there are now too many items while self._max_len and len(self) > self._max_len: _key, value = self._cache.popitem(last=False) if self.iterable: - removed_len = len(value.value) - self.metrics.inc_evictions(removed_len) - self._size_estimate -= removed_len + self.metrics.inc_evictions(len(value.value)) else: self.metrics.inc_evictions() @@ -101,6 +98,21 @@ class ExpiringCache(object): return entry.value + def pop(self, key, default=SENTINEL): + """Removes and returns the value with the given key from the cache. + + If the key isn't in the cache then `default` will be returned if + specified, otherwise `KeyError` will get raised. + + Identical functionality to `dict.pop(..)`. + """ + + value = self._cache.pop(key, default) + if value is SENTINEL: + raise KeyError(key) + + return value + def __contains__(self, key): return key in self._cache @@ -128,14 +140,16 @@ class ExpiringCache(object): keys_to_delete = set() - for key, cache_entry in self._cache.items(): + for key, cache_entry in iteritems(self._cache): if now - cache_entry.time > self._expiry_ms: keys_to_delete.add(key) for k in keys_to_delete: value = self._cache.pop(k) if self.iterable: - self._size_estimate -= len(value.value) + self.metrics.inc_evictions(len(value.value)) + else: + self.metrics.inc_evictions() logger.debug( "[%s] _prune_cache before: %d, after len: %d", @@ -144,12 +158,14 @@ class ExpiringCache(object): def __len__(self): if self.iterable: - return self._size_estimate + return sum(len(entry.value) for entry in itervalues(self._cache)) else: return len(self._cache) class _CacheEntry(object): + __slots__ = ["time", "value"] + def __init__(self, time, value): self.time = time self.value = value diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index a0c2d37610..89224b26cc 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -200,7 +200,7 @@ class LoggingContext(object): sentinel = Sentinel() - def __init__(self, name=None, parent_context=None): + def __init__(self, name=None, parent_context=None, request=None): self.previous_context = LoggingContext.current_context() self.name = name @@ -218,6 +218,13 @@ class LoggingContext(object): self.parent_context = parent_context + if self.parent_context is not None: + self.parent_context.copy_to(self) + + if request is not None: + # the request param overrides the request from the parent context + self.request = request + def __str__(self): return "%s@%x" % (self.name, id(self)) @@ -256,9 +263,6 @@ class LoggingContext(object): ) self.alive = True - if self.parent_context is not None: - self.parent_context.copy_to(self) - return self def __exit__(self, type, value, traceback): @@ -439,6 +443,35 @@ class PreserveLoggingContext(object): ) +def nested_logging_context(suffix, parent_context=None): + """Creates a new logging context as a child of another. + + The nested logging context will have a 'request' made up of the parent context's + request, plus the given suffix. + + CPU/db usage stats will be added to the parent context's on exit. + + Normal usage looks like: + + with nested_logging_context(suffix): + # ... do stuff + + Args: + suffix (str): suffix to add to the parent context's 'request'. + parent_context (LoggingContext|None): parent context. Will use the current context + if None. + + Returns: + LoggingContext: new logging context. + """ + if parent_context is None: + parent_context = LoggingContext.current_context() + return LoggingContext( + parent_context=parent_context, + request=parent_context.request + "-" + suffix, + ) + + def preserve_fn(f): """Function decorator which wraps the function with run_in_background""" def g(*args, **kwargs): diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 14be3c7396..8d0f2a8918 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -19,22 +19,40 @@ from twisted.conch.ssh.keys import Key from twisted.cred import checkers, portal PUBLIC_KEY = ( - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az" - "64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJS" - "kbh/C+BR3utDS555mV" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5" + "XALqeK+7385NlLja3DE/DO9mGhnd9+bAy39EKT3sTV6+WXQ4yD0TvEEyUEMtjWkSEm6U32+C" + "DaS3TW/vPBUMeJQwq+Ydcif1UlnpXrDDTamD0AU9VaEvHq+3HAkipqn0TGpKON6aqk4vauDx" + "oXSsV5TXBVrxP/y7HpMOpU4GUWsaaacBTKKNnUaQB4UflvydaPJUuwdaCUJGTMjbhWrjVfK+" + "jslseSPxU6XvrkZMyCr4znxvuDxjMk1RGIdO7v+rbBMLEgqtSMNqJbYeVCnj2CFgc3fcTcld" + "X2uOJDrJb/WRlHulthCh" ) PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY----- -MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW -4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw -vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb -Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 -xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 -PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 -gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu -DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML -pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP -EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== +MIIEpQIBAAKCAQEAx4RgE2luCoRNt/u56x+Ixcd8i6vTo2hLOVwC6nivu9/OTZS4 +2twxPwzvZhoZ3ffmwMt/RCk97E1evll0OMg9E7xBMlBDLY1pEhJulN9vgg2kt01v +7zwVDHiUMKvmHXIn9VJZ6V6ww02pg9AFPVWhLx6vtxwJIqap9ExqSjjemqpOL2rg +8aF0rFeU1wVa8T/8ux6TDqVOBlFrGmmnAUyijZ1GkAeFH5b8nWjyVLsHWglCRkzI +24Vq41Xyvo7JbHkj8VOl765GTMgq+M58b7g8YzJNURiHTu7/q2wTCxIKrUjDaiW2 +HlQp49ghYHN33E3JXV9rjiQ6yW/1kZR7pbYQoQIDAQABAoIBAQC8KJ0q8Wzzwh5B +esa1dQHZ8+4DEsL/Amae66VcVwD0X3cCN1W2IZ7X5W0Ij2kBqr8V51RYhcR+S+Ek +BtzSiBUBvbKGrqcMGKaUgomDIMzai99hd0gvCCyZnEW1OQhFkNkaRNXCfqiZJ27M +fqvSUiU2eOwh9fCvmxoA6Of8o3FbzcJ+1GMcobWRllDtLmj6lgVbDzuA+0jC5daB +9Tj1pBzu3wn3ufxiS+gBnJ+7NcXH3E73lqCcPa2ufbZ1haxfiGCnRIhFXuQDgxFX +vKdEfDgtvas6r1ahGbc+b/q8E8fZT7cABuIU4yfOORK+MhpyWbvoyyzuVGKj3PKt +KSPJu5CZAoGBAOkoJfAVyYteqKcmGTanGqQnAY43CaYf6GdSPX/jg+JmKZg0zqMC +jWZUtPb93i+jnOInbrnuHOiHAxI8wmhEPed28H2lC/LU8PzlqFkZXKFZ4vLOhhRB +/HeHCFIDosPFlohWi3b+GAjD7sXgnIuGmnXWe2ea/TS3yersifDEoKKjAoGBANsQ +gJX2cJv1c3jhdgcs8vAt5zIOKcCLTOr/QPmVf/kxjNgndswcKHwsxE/voTO9q+TF +v/6yCSTxAdjuKz1oIYWgi/dZo82bBKWxNRpgrGviU3/zwxiHlyIXUhzQu78q3VS/ +7S1XVbc7qMV++XkYKHPVD+nVG/gGzFxumX7MLXfrAoGBAJit9cn2OnjNj9uFE1W6 +r7N254ndeLAUjPe73xH0RtTm2a4WRopwjW/JYIetTuYbWgyujc+robqTTuuOZjAp +H/CG7o0Ym251CypQqaFO/l2aowclPp/dZhpPjp9GSjuxFBZLtiBB3DNBOwbRQzIK +/vLTdRQvZkgzYkI4i0vjNt3JAoGBANP8HSKBLymMlShlrSx2b8TB9tc2Y2riohVJ +2ttqs0M2kt/dGJWdrgOz4mikL+983Olt/0P9juHDoxEEMK2kpcPEv40lnmBpYU7h +s8yJvnBLvJe2EJYdJ8AipyAhUX1FgpbvfxmASP8eaUxsegeXvBWTGWojAoS6N2o+ +0KSl+l3vAoGAFqm0gO9f/Q1Se60YQd4l2PZeMnJFv0slpgHHUwegmd6wJhOD7zJ1 +CkZcXwiv7Nog7AI9qKJEUXLjoqL+vJskBzSOqU3tcd670YQMi1aXSXJqYE202K7o +EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs= -----END RSA PRIVATE KEY-----""" diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 97f1267380..4b4ac5f6c7 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -20,6 +20,7 @@ from prometheus_client import Counter from twisted.internet import defer +from synapse.metrics import InFlightGauge from synapse.util.logcontext import LoggingContext logger = logging.getLogger(__name__) @@ -45,6 +46,13 @@ block_db_txn_duration = Counter( block_db_sched_duration = Counter( "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) +# Tracks the number of blocks currently active +in_flight = InFlightGauge( + "synapse_util_metrics_block_in_flight", "", + labels=["block_name"], + sub_metrics=["real_time_max", "real_time_sum"], +) + def measure_func(name): def wrapper(func): @@ -82,10 +90,14 @@ class Measure(object): self.start_usage = self.start_context.get_resource_usage() + in_flight.register((self.name,), self._update_in_flight) + def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: return + in_flight.unregister((self.name,), self._update_in_flight) + duration = self.clock.time() - self.start block_counter.labels(self.name).inc() @@ -120,3 +132,13 @@ class Measure(object): if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb) + + def _update_in_flight(self, metrics): + """Gets called when processing in flight metrics + """ + duration = self.clock.time() - self.start + + metrics.real_time_max = max(metrics.real_time_max, duration) + metrics.real_time_sum += duration + + # TODO: Add other in flight metrics. diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 8a3a06fd74..26cce7d197 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -188,7 +188,7 @@ class RetryDestinationLimiter(object): else: self.retry_interval = self.min_retry_interval - logger.debug( + logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", self.destination, exc_type, exc_val, self.retry_interval ) diff --git a/synapse/visibility.py b/synapse/visibility.py index d4680863d3..c64ad2144c 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -324,14 +324,13 @@ def filter_events_for_server(store, server_name, events): # server's domain. # # event_to_state_ids contains lots of duplicates, so it turns out to be - # cheaper to build a complete set of unique - # ((type, state_key), event_id) tuples, and then filter out the ones we - # don't want. + # cheaper to build a complete event_id => (type, state_key) dict, and then + # filter out the ones we don't want # - state_key_to_event_id_set = { - e + event_id_to_state_key = { + event_id: key for key_to_eid in itervalues(event_to_state_ids) - for e in key_to_eid.items() + for key, event_id in iteritems(key_to_eid) } def include(typ, state_key): @@ -346,7 +345,7 @@ def filter_events_for_server(store, server_name, events): event_map = yield store.get_events([ e_id - for key, e_id in state_key_to_event_id_set + for e_id, key in iteritems(event_id_to_state_key) if include(key[0], key[1]) ]) diff --git a/synctl b/synctl deleted file mode 120000 index 1bdceda20a..0000000000 --- a/synctl +++ /dev/null @@ -1 +0,0 @@ -./synapse/app/synctl.py \ No newline at end of file diff --git a/synctl b/synctl new file mode 100755 index 0000000000..356e5cb6a7 --- /dev/null +++ b/synctl @@ -0,0 +1,294 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import errno +import glob +import os +import os.path +import signal +import subprocess +import sys +import time + +from six import iteritems + +import yaml + +SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] + +GREEN = "\x1b[1;32m" +YELLOW = "\x1b[1;33m" +RED = "\x1b[1;31m" +NORMAL = "\x1b[m" + + +def pid_running(pid): + try: + os.kill(pid, 0) + return True + except OSError as err: + if err.errno == errno.EPERM: + return True + return False + + +def write(message, colour=NORMAL, stream=sys.stdout): + if colour == NORMAL: + stream.write(message + "\n") + else: + stream.write(colour + message + NORMAL + "\n") + + +def abort(message, colour=RED, stream=sys.stderr): + write(message, colour, stream) + sys.exit(1) + + +def start(configfile): + write("Starting ...") + args = SYNAPSE + args.extend(["--daemonize", "-c", configfile]) + + try: + subprocess.check_call(args) + write("started synapse.app.homeserver(%r)" % + (configfile,), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting (exit code: %d); see above for logs" % e.returncode, + colour=RED, + ) + + +def start_worker(app, configfile, worker_configfile): + args = [ + "python", "-B", + "-m", app, + "-c", configfile, + "-c", worker_configfile + ] + + try: + subprocess.check_call(args) + write("started %s(%r)" % (app, worker_configfile), colour=GREEN) + except subprocess.CalledProcessError as e: + write( + "error starting %s(%r) (exit code: %d); see above for logs" % ( + app, worker_configfile, e.returncode, + ), + colour=RED, + ) + + +def stop(pidfile, app): + if os.path.exists(pidfile): + pid = int(open(pidfile).read()) + try: + os.kill(pid, signal.SIGTERM) + write("stopped %s" % (app,), colour=GREEN) + except OSError as err: + if err.errno == errno.ESRCH: + write("%s not running" % (app,), colour=YELLOW) + elif err.errno == errno.EPERM: + abort("Cannot stop %s: Operation not permitted" % (app,)) + else: + abort("Cannot stop %s: Unknown error" % (app,)) + + +Worker = collections.namedtuple("Worker", [ + "app", "configfile", "pidfile", "cache_factor", "cache_factors", +]) + + +def main(): + + parser = argparse.ArgumentParser() + + parser.add_argument( + "action", + choices=["start", "stop", "restart"], + help="whether to start, stop or restart the synapse", + ) + parser.add_argument( + "configfile", + nargs="?", + default="homeserver.yaml", + help="the homeserver config file, defaults to homeserver.yaml", + ) + parser.add_argument( + "-w", "--worker", + metavar="WORKERCONFIG", + help="start or stop a single worker", + ) + parser.add_argument( + "-a", "--all-processes", + metavar="WORKERCONFIGDIR", + help="start or stop all the workers in the given directory" + " and the main synapse process", + ) + + options = parser.parse_args() + + if options.worker and options.all_processes: + write( + 'Cannot use "--worker" with "--all-processes"', + stream=sys.stderr + ) + sys.exit(1) + + configfile = options.configfile + + if not os.path.exists(configfile): + write( + "No config file found\n" + "To generate a config file, run '%s -c %s --generate-config" + " --server-name='\n" % ( + " ".join(SYNAPSE), options.configfile + ), + stream=sys.stderr, + ) + sys.exit(1) + + with open(configfile) as stream: + config = yaml.load(stream) + + pidfile = config["pid_file"] + cache_factor = config.get("synctl_cache_factor") + start_stop_synapse = True + + if cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) + + cache_factors = config.get("synctl_cache_factors", {}) + for cache_name, factor in iteritems(cache_factors): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + worker_configfiles = [] + if options.worker: + start_stop_synapse = False + worker_configfile = options.worker + if not os.path.exists(worker_configfile): + write( + "No worker config found at %r" % (worker_configfile,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.append(worker_configfile) + + if options.all_processes: + # To start the main synapse with -a you need to add a worker file + # with worker_app == "synapse.app.homeserver" + start_stop_synapse = False + worker_configdir = options.all_processes + if not os.path.isdir(worker_configdir): + write( + "No worker config directory found at %r" % (worker_configdir,), + stream=sys.stderr, + ) + sys.exit(1) + worker_configfiles.extend(sorted(glob.glob( + os.path.join(worker_configdir, "*.yaml") + ))) + + workers = [] + for worker_configfile in worker_configfiles: + with open(worker_configfile) as stream: + worker_config = yaml.load(stream) + worker_app = worker_config["worker_app"] + if worker_app == "synapse.app.homeserver": + # We need to special case all of this to pick up options that may + # be set in the main config file or in this worker config file. + worker_pidfile = ( + worker_config.get("pid_file") + or pidfile + ) + worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor + worker_cache_factors = ( + worker_config.get("synctl_cache_factors") + or cache_factors + ) + daemonize = worker_config.get("daemonize") or config.get("daemonize") + assert daemonize, "Main process must have daemonize set to true" + + # The master process doesn't support using worker_* config. + for key in worker_config: + if key == "worker_app": # But we allow worker_app + continue + assert not key.startswith("worker_"), \ + "Main process cannot use worker_* config" + else: + worker_pidfile = worker_config["worker_pid_file"] + worker_daemonize = worker_config["worker_daemonize"] + assert worker_daemonize, "In config %r: expected '%s' to be True" % ( + worker_configfile, "worker_daemonize") + worker_cache_factor = worker_config.get("synctl_cache_factor") + worker_cache_factors = worker_config.get("synctl_cache_factors", {}) + workers.append(Worker( + worker_app, worker_configfile, worker_pidfile, worker_cache_factor, + worker_cache_factors, + )) + + action = options.action + + if action == "stop" or action == "restart": + for worker in workers: + stop(worker.pidfile, worker.app) + + if start_stop_synapse: + stop(pidfile, "synapse.app.homeserver") + + # Wait for synapse to actually shutdown before starting it again + if action == "restart": + running_pids = [] + if start_stop_synapse and os.path.exists(pidfile): + running_pids.append(int(open(pidfile).read())) + for worker in workers: + if os.path.exists(worker.pidfile): + running_pids.append(int(open(worker.pidfile).read())) + if len(running_pids) > 0: + write("Waiting for process to exit before restarting...") + for running_pid in running_pids: + while pid_running(running_pid): + time.sleep(0.2) + write("All processes exited; now restarting...") + + if action == "start" or action == "restart": + if start_stop_synapse: + # Check if synapse is already running + if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): + abort("synapse.app.homeserver already running") + start(configfile) + + for worker in workers: + env = os.environ.copy() + + if worker.cache_factor: + os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) + + for cache_name, factor in worker.cache_factors.iteritems(): + os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) + + start_worker(worker.app, configfile, worker.configfile) + + # Reset env back to the original + os.environ.clear() + os.environ.update(env) + + +if __name__ == "__main__": + main() diff --git a/test_postgresql.sh b/test_postgresql.sh new file mode 100755 index 0000000000..1ffcaabd31 --- /dev/null +++ b/test_postgresql.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# This script builds the Docker image to run the PostgreSQL tests, and then runs +# the tests. + +set -e + +# Build, and tag +docker build docker/ -f docker/Dockerfile-pgtests -t synapsepgtests + +# Run, mounting the current directory into /src +docker run --rm -it -v $(pwd)\:/src synapsepgtests diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ed960090c4..379e9c4ab1 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -457,7 +457,7 @@ class AuthTestCase(unittest.TestCase): with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() - self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) + self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) @@ -467,13 +467,31 @@ class AuthTestCase(unittest.TestCase): ) yield self.auth.check_auth_blocking() + @defer.inlineCallbacks + def test_reserved_threepid(self): + self.hs.config.limit_usage_by_mau = True + self.hs.config.max_mau_value = 1 + self.store.get_monthly_active_count = lambda: defer.succeed(2) + threepid = {'medium': 'email', 'address': 'reserved@server.com'} + unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'} + self.hs.config.mau_limits_reserved_threepids = [threepid] + + yield self.store.register(user_id='user1', token="123", password_hash=None) + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking() + + with self.assertRaises(ResourceLimitError): + yield self.auth.check_auth_blocking(threepid=unknown_threepid) + + yield self.auth.check_auth_blocking(threepid=threepid) + @defer.inlineCallbacks def test_hs_disabled(self): self.hs.config.hs_disabled = True self.hs.config.hs_disabled_message = "Reason for being disabled" with self.assertRaises(ResourceLimitError) as e: yield self.auth.check_auth_blocking() - self.assertEquals(e.exception.admin_uri, self.hs.config.admin_uri) + self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact) self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEquals(e.exception.code, 403) diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index 76b5090fff..a83f567ebd 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -47,7 +47,7 @@ class FrontendProxyTests(HomeserverTestCase): self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] self.resource = ( - site.resource.children["_matrix"].children["client"].children["r0"] + site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] ) request, channel = self.make_request("PUT", "presence/a/status") @@ -77,7 +77,7 @@ class FrontendProxyTests(HomeserverTestCase): self.assertEqual(len(self.reactor.tcpServers), 1) site = self.reactor.tcpServers[0][1] self.resource = ( - site.resource.children["_matrix"].children["client"].children["r0"] + site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] ) request, channel = self.make_request("PUT", "presence/a/status") diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 56e7acd37c..a3aa0a1cf2 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,79 +14,79 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - import synapse.api.errors import synapse.handlers.device import synapse.storage -from tests import unittest, utils +from tests import unittest user1 = "@boris:aaa" user2 = "@theresa:bbb" -class DeviceTestCase(unittest.TestCase): - def __init__(self, *args, **kwargs): - super(DeviceTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore - self.handler = None # type: synapse.handlers.device.DeviceHandler - self.clock = None # type: utils.MockClock - - @defer.inlineCallbacks - def setUp(self): - hs = yield utils.setup_test_homeserver(self.addCleanup) +class DeviceTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) self.handler = hs.get_device_handler() self.store = hs.get_datastore() - self.clock = hs.get_clock() + return hs + + def prepare(self, reactor, clock, hs): + # These tests assume that it starts 1000 seconds in. + self.reactor.advance(1000) - @defer.inlineCallbacks def test_device_is_created_if_doesnt_exist(self): - res = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="display name", + res = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="display name", + ) ) self.assertEqual(res, "fco") - dev = yield self.handler.store.get_device("@boris:foo", "fco") + dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - @defer.inlineCallbacks def test_device_is_preserved_if_exists(self): - res1 = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="display name", + res1 = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="display name", + ) ) self.assertEqual(res1, "fco") - res2 = yield self.handler.check_device_registered( - user_id="@boris:foo", - device_id="fco", - initial_device_display_name="new display name", + res2 = self.get_success( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="fco", + initial_device_display_name="new display name", + ) ) self.assertEqual(res2, "fco") - dev = yield self.handler.store.get_device("@boris:foo", "fco") + dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - @defer.inlineCallbacks def test_device_id_is_made_up_if_unspecified(self): - device_id = yield self.handler.check_device_registered( - user_id="@theresa:foo", - device_id=None, - initial_device_display_name="display", + device_id = self.get_success( + self.handler.check_device_registered( + user_id="@theresa:foo", + device_id=None, + initial_device_display_name="display", + ) ) - dev = yield self.handler.store.get_device("@theresa:foo", device_id) + dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id)) self.assertEqual(dev["display_name"], "display") - @defer.inlineCallbacks def test_get_devices_by_user(self): - yield self._record_users() + self._record_users() + + res = self.get_success(self.handler.get_devices_by_user(user1)) - res = yield self.handler.get_devices_by_user(user1) self.assertEqual(3, len(res)) device_map = {d["device_id"]: d for d in res} self.assertDictContainsSubset( @@ -119,11 +120,10 @@ class DeviceTestCase(unittest.TestCase): device_map["abc"], ) - @defer.inlineCallbacks def test_get_device(self): - yield self._record_users() + self._record_users() - res = yield self.handler.get_device(user1, "abc") + res = self.get_success(self.handler.get_device(user1, "abc")) self.assertDictContainsSubset( { "user_id": user1, @@ -135,59 +135,66 @@ class DeviceTestCase(unittest.TestCase): res, ) - @defer.inlineCallbacks def test_delete_device(self): - yield self._record_users() + self._record_users() # delete the device - yield self.handler.delete_device(user1, "abc") + self.get_success(self.handler.delete_device(user1, "abc")) # check the device was deleted - with self.assertRaises(synapse.api.errors.NotFoundError): - yield self.handler.get_device(user1, "abc") + res = self.handler.get_device(user1, "abc") + self.pump() + self.assertIsInstance( + self.failureResultOf(res).value, synapse.api.errors.NotFoundError + ) # we'd like to check the access token was invalidated, but that's a # bit of a PITA. - @defer.inlineCallbacks def test_update_device(self): - yield self._record_users() + self._record_users() update = {"display_name": "new display"} - yield self.handler.update_device(user1, "abc", update) + self.get_success(self.handler.update_device(user1, "abc", update)) - res = yield self.handler.get_device(user1, "abc") + res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") - @defer.inlineCallbacks def test_update_unknown_device(self): update = {"display_name": "new_display"} - with self.assertRaises(synapse.api.errors.NotFoundError): - yield self.handler.update_device("user_id", "unknown_device_id", update) + res = self.handler.update_device("user_id", "unknown_device_id", update) + self.pump() + self.assertIsInstance( + self.failureResultOf(res).value, synapse.api.errors.NotFoundError + ) - @defer.inlineCallbacks def _record_users(self): # check this works for both devices which have a recorded client_ip, # and those which don't. - yield self._record_user(user1, "xyz", "display 0") - yield self._record_user(user1, "fco", "display 1", "token1", "ip1") - yield self._record_user(user1, "abc", "display 2", "token2", "ip2") - yield self._record_user(user1, "abc", "display 2", "token3", "ip3") + self._record_user(user1, "xyz", "display 0") + self._record_user(user1, "fco", "display 1", "token1", "ip1") + self._record_user(user1, "abc", "display 2", "token2", "ip2") + self._record_user(user1, "abc", "display 2", "token3", "ip3") - yield self._record_user(user2, "def", "dispkay", "token4", "ip4") + self._record_user(user2, "def", "dispkay", "token4", "ip4") + + self.reactor.advance(10000) - @defer.inlineCallbacks def _record_user( self, user_id, device_id, display_name, access_token=None, ip=None ): - device_id = yield self.handler.check_device_registered( - user_id=user_id, - device_id=device_id, - initial_device_display_name=display_name, + device_id = self.get_success( + self.handler.check_device_registered( + user_id=user_id, + device_id=device_id, + initial_device_display_name=display_name, + ) ) if ip is not None: - yield self.store.insert_client_ip( - user_id, access_token, ip, "user_agent", device_id + self.get_success( + self.store.insert_client_ip( + user_id, access_token, ip, "user_agent", device_id + ) ) - self.clock.advance_time(1000) + self.reactor.advance(1000) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index ad58073a14..36e136cded 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -33,7 +33,7 @@ from ..utils import ( ) -def _expect_edu(destination, edu_type, content, origin="test"): +def _expect_edu_transaction(edu_type, content, origin="test"): return { "origin": origin, "origin_server_ts": 1000000, @@ -42,10 +42,8 @@ def _expect_edu(destination, edu_type, content, origin="test"): } -def _make_edu_json(origin, edu_type, content): - return json.dumps(_expect_edu("test", edu_type, content, origin=origin)).encode( - 'utf8' - ) +def _make_edu_transaction_json(edu_type, content): + return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8') class TypingNotificationsTestCase(unittest.TestCase): @@ -190,8 +188,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, @@ -221,11 +218,10 @@ class TypingNotificationsTestCase(unittest.TestCase): self.assertEquals(self.event_source.get_current_key(), 0) - yield self.mock_federation_resource.trigger( + (code, response) = yield self.mock_federation_resource.trigger( "PUT", "/_matrix/federation/v1/send/1000000/", - _make_edu_json( - "farm", + _make_edu_transaction_json( "m.typing", content={ "room_id": self.room_id, @@ -233,7 +229,7 @@ class TypingNotificationsTestCase(unittest.TestCase): "typing": True, }, ), - federation_auth=True, + federation_auth_origin=b'farm', ) self.on_new_event.assert_has_calls( @@ -264,8 +260,7 @@ class TypingNotificationsTestCase(unittest.TestCase): call( "farm", path="/_matrix/federation/v1/send/1000000/", - data=_expect_edu( - "farm", + data=_expect_edu_transaction( "m.typing", content={ "room_id": self.room_id, diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py new file mode 100644 index 0000000000..f3cb1423f0 --- /dev/null +++ b/tests/http/test_fedclient.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet.defer import TimeoutError +from twisted.internet.error import ConnectingCancelledError, DNSLookupError +from twisted.web.client import ResponseNeverReceived +from twisted.web.http import HTTPChannel + +from synapse.http.matrixfederationclient import ( + MatrixFederationHttpClient, + MatrixFederationRequest, +) + +from tests.server import FakeTransport +from tests.unittest import HomeserverTestCase + + +class FederationClientTests(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + + hs = self.setup_test_homeserver(reactor=reactor, clock=clock) + hs.tls_client_options_factory = None + return hs + + def prepare(self, reactor, clock, homeserver): + + self.cl = MatrixFederationHttpClient(self.hs) + self.reactor.lookups["testserv"] = "1.2.3.4" + + def test_dns_error(self): + """ + If the DNS raising returns an error, it will bubble up. + """ + d = self.cl.get_json("testserv2:8008", "foo/bar", timeout=10000) + self.pump() + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, DNSLookupError) + + def test_client_never_connect(self): + """ + If the HTTP request is not connected and is timed out, it'll give a + ConnectingCancelledError or TimeoutError. + """ + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, (ConnectingCancelledError, TimeoutError)) + + def test_client_connect_no_response(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) + + self.pump() + + # Nothing happened yet + self.assertFalse(d.called) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], '1.2.3.4') + self.assertEqual(clients[0][1], 8008) + + conn = Mock() + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred is still without a result + self.assertFalse(d.called) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, ResponseNeverReceived) + + def test_client_gets_headers(self): + """ + Once the client gets the headers, _request returns successfully. + """ + request = MatrixFederationRequest( + method="GET", + destination="testserv:8008", + path="foo/bar", + ) + d = self.cl._send_request(request, timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n") + + # We should get a successful response + r = self.successResultOf(d) + self.assertEqual(r.code, 200) + + def test_client_headers_no_body(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) + + self.pump() + + conn = Mock() + clients = self.reactor.tcpClients + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred does not have a result + self.assertFalse(d.called) + + # Send it the HTTP response + client.dataReceived( + (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" + b"Server: Fake\r\n\r\n") + ) + + # Push by enough to time it out + self.reactor.advance(10.5) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, TimeoutError) + + def test_client_sends_body(self): + self.cl.post_json( + "testserv:8008", "foo/bar", timeout=10000, + data={"a": "b"} + ) + + self.pump() + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + client = clients[0][2].buildProtocol(None) + server = HTTPChannel() + + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + self.pump(0.1) + + self.assertEqual(len(server.requests), 1) + request = server.requests[0] + content = request.content.read() + self.assertEqual(content, b'{"a":"b"}') diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 65df116efc..9e9fbbfe93 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -1,4 +1,5 @@ # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,89 +12,62 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import tempfile from mock import Mock, NonCallableMock -from twisted.internet import defer, reactor -from twisted.internet.defer import Deferred - from synapse.replication.tcp.client import ( ReplicationClientFactory, ReplicationClientHandler, ) from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory -from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable from tests import unittest -from tests.utils import setup_test_homeserver +from tests.server import FakeTransport -class TestReplicationClientHandler(ReplicationClientHandler): - """Overrides on_rdata so that we can wait for it to happen""" +class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): - def __init__(self, store): - super(TestReplicationClientHandler, self).__init__(store) - self._rdata_awaiters = [] - - def await_replication(self): - d = Deferred() - self._rdata_awaiters.append(d) - return make_deferred_yieldable(d) - - def on_rdata(self, stream_name, token, rows): - awaiters = self._rdata_awaiters - self._rdata_awaiters = [] - super(TestReplicationClientHandler, self).on_rdata(stream_name, token, rows) - with PreserveLoggingContext(): - for a in awaiters: - a.callback(None) - - -class BaseSlavedStoreTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - self.hs = yield setup_test_homeserver( - self.addCleanup, + hs = self.setup_test_homeserver( "blue", - http_client=None, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=["send_message"]), ) - self.hs.get_ratelimiter().send_message.return_value = (True, 0) + + hs.get_ratelimiter().send_message.return_value = (True, 0) + + return hs + + def prepare(self, reactor, clock, hs): self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) - # XXX: mktemp is unsafe and should never be used. but we're just a test. - path = tempfile.mktemp(prefix="base_slaved_store_test_case_socket") - listener = reactor.listenUNIX(path, server_factory) - self.addCleanup(listener.stopListening) self.streamer = server_factory.streamer - self.replication_handler = TestReplicationClientHandler(self.slaved_store) + self.replication_handler = ReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) - client_connector = reactor.connectUNIX(path, client_factory) - self.addCleanup(client_factory.stopTrying) - self.addCleanup(client_connector.disconnect) + + server = server_factory.buildProtocol(None) + client = client_factory.buildProtocol(None) + + client.makeConnection(FakeTransport(server, reactor)) + server.makeConnection(FakeTransport(client, reactor)) def replicate(self): """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ - # xxx: should we be more specific in what we wait for? - d = self.replication_handler.await_replication() self.streamer.on_notifier_poke() - return d + self.pump(0.1) - @defer.inlineCallbacks def check(self, method, args, expected_result=None): - master_result = yield getattr(self.master_store, method)(*args) - slaved_result = yield getattr(self.slaved_store, method)(*args) + master_result = self.get_success(getattr(self.master_store, method)(*args)) + slaved_result = self.get_success(getattr(self.slaved_store, method)(*args)) if expected_result is not None: self.assertEqual(master_result, expected_result) self.assertEqual(slaved_result, expected_result) diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py index 87cc2b2fba..43e3248703 100644 --- a/tests/replication/slave/storage/test_account_data.py +++ b/tests/replication/slave/storage/test_account_data.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from twisted.internet import defer - from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from ._base import BaseSlavedStoreTestCase @@ -27,16 +24,19 @@ class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedAccountDataStore - @defer.inlineCallbacks def test_user_account_data(self): - yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) - yield self.replicate() - yield self.check( + self.get_success( + self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) + ) + self.replicate() + self.check( "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 1} ) - yield self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) - yield self.replicate() - yield self.check( + self.get_success( + self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) + ) + self.replicate() + self.check( "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 2} ) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 2ba80ccdcf..41be5d5a1a 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from canonicaljson import encode_canonical_json from synapse.events import FrozenEvent, _EventInternalMetadata from synapse.events.snapshot import EventContext @@ -28,7 +28,9 @@ ROOM_ID = "!room:blue" def dict_equals(self, other): - return self.__dict__ == other.__dict__ + me = encode_canonical_json(self._event_dict) + them = encode_canonical_json(other._event_dict) + return me == them def patch__eq__(cls): @@ -55,70 +57,66 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): def tearDown(self): [unpatch() for unpatch in self.unpatches] - @defer.inlineCallbacks def test_get_latest_event_ids_in_room(self): - create = yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.replicate() - yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) + create = self.persist(type="m.room.create", key="", creator=USER_ID) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) - join = yield self.persist( + join = self.persist( type="m.room.member", key=USER_ID, membership="join", prev_events=[(create.event_id, {})], ) - yield self.replicate() - yield self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) + self.replicate() + self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) - @defer.inlineCallbacks def test_redactions(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.member", key=USER_ID, membership="join") + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") - msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello") - yield self.replicate() - yield self.check("get_event", [msg.event_id], msg) + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) - redaction = yield self.persist(type="m.room.redaction", redacts=msg.event_id) - yield self.replicate() + redaction = self.persist(type="m.room.redaction", redacts=msg.event_id) + self.replicate() msg_dict = msg.get_dict() msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) - yield self.check("get_event", [msg.event_id], redacted) + self.check("get_event", [msg.event_id], redacted) - @defer.inlineCallbacks def test_backfilled_redactions(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.member", key=USER_ID, membership="join") + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.member", key=USER_ID, membership="join") - msg = yield self.persist(type="m.room.message", msgtype="m.text", body="Hello") - yield self.replicate() - yield self.check("get_event", [msg.event_id], msg) + msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello") + self.replicate() + self.check("get_event", [msg.event_id], msg) - redaction = yield self.persist( + redaction = self.persist( type="m.room.redaction", redacts=msg.event_id, backfill=True ) - yield self.replicate() + self.replicate() msg_dict = msg.get_dict() msg_dict["content"] = {} msg_dict["unsigned"]["redacted_by"] = redaction.event_id msg_dict["unsigned"]["redacted_because"] = redaction redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict()) - yield self.check("get_event", [msg.event_id], redacted) + self.check("get_event", [msg.event_id], redacted) - @defer.inlineCallbacks def test_invites(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.check("get_invited_rooms_for_user", [USER_ID_2], []) - event = yield self.persist( - type="m.room.member", key=USER_ID_2, membership="invite" - ) - yield self.replicate() - yield self.check( + self.persist(type="m.room.create", key="", creator=USER_ID) + self.check("get_invited_rooms_for_user", [USER_ID_2], []) + event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") + + self.replicate() + + self.check( "get_invited_rooms_for_user", [USER_ID_2], [ @@ -132,37 +130,34 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ], ) - @defer.inlineCallbacks def test_push_actions_for_user(self): - yield self.persist(type="m.room.create", key="", creator=USER_ID) - yield self.persist(type="m.room.join", key=USER_ID, membership="join") - yield self.persist( + self.persist(type="m.room.create", key="", creator=USER_ID) + self.persist(type="m.room.join", key=USER_ID, membership="join") + self.persist( type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join" ) - event1 = yield self.persist( - type="m.room.message", msgtype="m.text", body="hello" - ) - yield self.replicate() - yield self.check( + event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello") + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 0, "notify_count": 0}, ) - yield self.persist( + self.persist( type="m.room.message", msgtype="m.text", body="world", push_actions=[(USER_ID_2, ["notify"])], ) - yield self.replicate() - yield self.check( + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 0, "notify_count": 1}, ) - yield self.persist( + self.persist( type="m.room.message", msgtype="m.text", body="world", @@ -170,8 +165,8 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): (USER_ID_2, ["notify", {"set_tweak": "highlight", "value": True}]) ], ) - yield self.replicate() - yield self.check( + self.replicate() + self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], {"highlight_count": 1, "notify_count": 2}, @@ -179,7 +174,6 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): event_id = 0 - @defer.inlineCallbacks def persist( self, sender=USER_ID, @@ -206,8 +200,8 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): depth = self.event_id if not prev_events: - latest_event_ids = yield self.master_store.get_latest_event_ids_in_room( - room_id + latest_event_ids = self.get_success( + self.master_store.get_latest_event_ids_in_room(room_id) ) prev_events = [(ev_id, {}) for ev_id in latest_event_ids] @@ -240,19 +234,23 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ) else: state_handler = self.hs.get_state_handler() - context = yield state_handler.compute_event_context(event) + context = self.get_success(state_handler.compute_event_context(event)) - yield self.master_store.add_push_actions_to_staging( + self.master_store.add_push_actions_to_staging( event.event_id, {user_id: actions for user_id, actions in push_actions} ) ordering = None if backfill: - yield self.master_store.persist_events([(event, context)], backfilled=True) + self.get_success( + self.master_store.persist_events([(event, context)], backfilled=True) + ) else: - ordering, _ = yield self.master_store.persist_event(event, context) + ordering, _ = self.get_success( + self.master_store.persist_event(event, context) + ) if ordering: event.internal_metadata.stream_ordering = ordering - defer.returnValue(event) + return event diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/replication/slave/storage/test_receipts.py index ae1adeded1..f47d94f690 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/replication/slave/storage/test_receipts.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from ._base import BaseSlavedStoreTestCase @@ -27,13 +25,10 @@ class SlavedReceiptTestCase(BaseSlavedStoreTestCase): STORE_TYPE = SlavedReceiptsStore - @defer.inlineCallbacks def test_receipt(self): - yield self.check("get_receipts_for_user", [USER_ID, "m.read"], {}) - yield self.master_store.insert_receipt( - ROOM_ID, "m.read", USER_ID, [EVENT_ID], {} - ) - yield self.replicate() - yield self.check( - "get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID} + self.check("get_receipts_for_user", [USER_ID, "m.read"], {}) + self.get_success( + self.master_store.insert_receipt(ROOM_ID, "m.read", USER_ID, [EVENT_ID], {}) ) + self.replicate() + self.check("get_receipts_for_user", [USER_ID, "m.read"], {ROOM_ID: EVENT_ID}) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 9fe0760496..359f7777ff 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -22,39 +22,24 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer -import synapse.rest.client.v1.room from synapse.api.constants import Membership -from synapse.http.server import JsonResource -from synapse.types import UserID -from synapse.util import Clock +from synapse.rest.client.v1 import room from tests import unittest -from tests.server import ( - ThreadedMemoryReactorClock, - make_request, - render, - setup_test_homeserver, -) - -from .utils import RestHelper PATH_PREFIX = b"/_matrix/client/api/v1" -class RoomBase(unittest.TestCase): +class RoomBase(unittest.HomeserverTestCase): rmcreator_id = None - def setUp(self): + servlets = [room.register_servlets, room.register_deprecated_servlets] - self.clock = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.clock) + def make_homeserver(self, reactor, clock): - self.hs = setup_test_homeserver( - self.addCleanup, + self.hs = self.setup_test_homeserver( "red", http_client=None, - clock=self.hs_clock, - reactor=self.clock, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=["send_message"]), ) @@ -63,42 +48,21 @@ class RoomBase(unittest.TestCase): self.hs.get_federation_handler = Mock(return_value=Mock()) - def get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(self.helper.auth_user_id), - "token_id": 1, - "is_guest": False, - } - - def get_user_by_req(request, allow_guest=False, rights="access"): - return synapse.types.create_requester( - UserID.from_string(self.helper.auth_user_id), 1, False, None - ) - - self.hs.get_auth().get_user_by_req = get_user_by_req - self.hs.get_auth().get_user_by_access_token = get_user_by_access_token - self.hs.get_auth().get_access_token_from_request = Mock(return_value=b"1234") - def _insert_client_ip(*args, **kwargs): return defer.succeed(None) self.hs.get_datastore().insert_client_ip = _insert_client_ip - self.resource = JsonResource(self.hs) - synapse.rest.client.v1.room.register_servlets(self.hs, self.resource) - synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource) - self.helper = RestHelper(self.hs, self.resource, self.user_id) + return self.hs class RoomPermissionsTestCase(RoomBase): """ Tests room permissions. """ - user_id = b"@sid1:red" - rmcreator_id = b"@notme:red" + user_id = "@sid1:red" + rmcreator_id = "@notme:red" - def setUp(self): - - super(RoomPermissionsTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id @@ -114,22 +78,20 @@ class RoomPermissionsTestCase(RoomBase): self.created_rmid_msg_path = ( "rooms/%s/send/m.room.message/a1" % (self.created_rmid) ).encode('ascii') - request, channel = make_request( - b"PUT", - self.created_rmid_msg_path, - b'{"msgtype":"m.text","body":"test msg"}', + request, channel = self.make_request( + "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - render(request, self.resource, self.clock) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) # set topic for public room - request, channel = make_request( - b"PUT", + request, channel = self.make_request( + "PUT", ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode('ascii'), b'{"topic":"Public Room Topic"}', ) - render(request, self.resource, self.clock) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) # auth as user_id now self.helper.auth_user_id = self.user_id @@ -140,128 +102,128 @@ class RoomPermissionsTestCase(RoomBase): seq = iter(range(100)) def send_msg_path(): - return b"/rooms/%s/send/m.room.message/mid%s" % ( + return "/rooms/%s/send/m.room.message/mid%s" % ( self.created_rmid, - str(next(seq)).encode('ascii'), + str(next(seq)), ) # send message in uncreated room, expect 403 - request, channel = make_request( - b"PUT", - b"/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), + request, channel = self.make_request( + "PUT", + "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", send_msg_path(), msg_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", send_msg_path(), msg_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_topic_perms(self): topic_content = b'{"topic":"My Topic Name"}' - topic_path = b"/rooms/%s/state/m.room.topic" % self.created_rmid + topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid # set/get topic in uncreated room, expect 403 - request, channel = make_request( - b"PUT", b"/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content + request, channel = self.make_request( + "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request( - b"GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request( + "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) - self.assert_dict(json.loads(topic_content), channel.json_body) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assert_dict(json.loads(topic_content.decode('utf8')), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) - request, channel = make_request(b"PUT", topic_path, topic_content) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) - request, channel = make_request(b"GET", topic_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", topic_path, topic_content) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) + request, channel = self.make_request("GET", topic_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 - request, channel = make_request( - b"GET", b"/rooms/%s/state/m.room.topic" % self.created_public_rmid + request, channel = self.make_request( + "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 - request, channel = make_request( - b"PUT", - b"/rooms/%s/state/m.room.topic" % self.created_public_rmid, + request, channel = self.make_request( + "PUT", + "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def _test_get_membership(self, room=None, members=[], expect_code=None): for member in members: - path = b"/rooms/%s/state/m.room.member/%s" % (room, member) - request, channel = make_request(b"GET", path) - render(request, self.resource, self.clock) - self.assertEquals(expect_code, int(channel.result["code"])) + path = "/rooms/%s/state/m.room.member/%s" % (room, member) + request, channel = self.make_request("GET", path) + self.render(request) + self.assertEquals(expect_code, channel.code) def test_membership_basic_room_perms(self): # === room does not exist === @@ -428,217 +390,211 @@ class RoomPermissionsTestCase(RoomBase): class RoomsMemberListTestCase(RoomBase): """ Tests /rooms/$room_id/members/list REST events.""" - user_id = b"@sid1:red" + user_id = "@sid1:red" def test_get_member_list(self): room_id = self.helper.create_room_as(self.user_id) - request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self): - request, channel = make_request(b"GET", b"/rooms/roomdoesnotexist/members") - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self): - room_id = self.helper.create_room_as(b"@some_other_guy:red") - request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + room_id = self.helper.create_room_as("@some_other_guy:red") + request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self): - room_creator = b"@some_other_guy:red" + room_creator = "@some_other_guy:red" room_id = self.helper.create_room_as(room_creator) - room_path = b"/rooms/%s/members" % room_id + room_path = "/rooms/%s/members" % room_id self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(403, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left - request, channel = make_request(b"GET", room_path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", room_path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) class RoomsCreateTestCase(RoomBase): """ Tests /rooms and /rooms/$room_id REST events. """ - user_id = b"@sid1:red" + user_id = "@sid1:red" def test_post_room_no_keys(self): # POST with no config keys, expect new room id - request, channel = make_request(b"POST", b"/createRoom", b"{}") + request, channel = self.make_request("POST", "/createRoom", "{}") - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), channel.result) + self.render(request) + self.assertEquals(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) def test_post_room_visibility_key(self): # POST with visibility config key, expect new room id - request, channel = make_request( - b"POST", b"/createRoom", b'{"visibility":"private"}' + request, channel = self.make_request( + "POST", "/createRoom", b'{"visibility":"private"}' ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_custom_key(self): # POST with custom config keys, expect new room id - request, channel = make_request(b"POST", b"/createRoom", b'{"custom":"stuff"}') - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + request, channel = self.make_request( + "POST", "/createRoom", b'{"custom":"stuff"}' + ) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_known_and_unknown_keys(self): # POST with custom + known config keys, expect new room id - request, channel = make_request( - b"POST", b"/createRoom", b'{"visibility":"private","custom":"things"}' + request, channel = self.make_request( + "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self): # POST with invalid content / paths, expect 400 - request, channel = make_request(b"POST", b"/createRoom", b'{"visibili') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"])) + request, channel = self.make_request("POST", "/createRoom", b'{"visibili') + self.render(request) + self.assertEquals(400, channel.code) - request, channel = make_request(b"POST", b"/createRoom", b'["hello"]') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"])) + request, channel = self.make_request("POST", "/createRoom", b'["hello"]') + self.render(request) + self.assertEquals(400, channel.code) class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ - user_id = b"@sid1:red" - - def setUp(self): - - super(RoomTopicTestCase, self).setUp() + user_id = "@sid1:red" + def prepare(self, reactor, clock, hs): # create the room self.room_id = self.helper.create_room_as(self.user_id) - self.path = b"/rooms/%s/state/m.room.topic" % (self.room_id,) + self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,) def test_invalid_puts(self): # missing keys or invalid json - request, channel = make_request(b"PUT", self.path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, 'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", self.path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, '') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid key, wrong type content = '{"topic":["Topic name"]}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_topic(self): # nothing should be there - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(404, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(404, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) def test_rooms_topic_with_extra_keys(self): # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' - request, channel = make_request(b"PUT", self.path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", self.path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # valid get - request, channel = make_request(b"GET", self.path) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", self.path) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) class RoomMemberStateTestCase(RoomBase): """ Tests /rooms/$room_id/members/$user_id/state REST events. """ - user_id = b"@sid1:red" + user_id = "@sid1:red" - def setUp(self): - - super(RoomMemberStateTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) - def tearDown(self): - pass - def test_invalid_puts(self): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json - request, channel = make_request(b"PUT", path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", path, b'[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, 'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, '') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # valid keys, wrong types content = '{"membership":["%s","%s","%s"]}' % ( @@ -646,9 +602,9 @@ class RoomMemberStateTestCase(RoomBase): Membership.JOIN, Membership.LEAVE, ) - request, channel = make_request(b"PUT", path, content.encode('ascii')) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content.encode('ascii')) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_members_self(self): path = "/rooms/%s/state/m.room.member/%s" % ( @@ -658,13 +614,13 @@ class RoomMemberStateTestCase(RoomBase): # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN - request, channel = make_request(b"PUT", path, content.encode('ascii')) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content.encode('ascii')) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEquals(expected_response, channel.json_body) @@ -678,13 +634,13 @@ class RoomMemberStateTestCase(RoomBase): # valid invite message content = '{"membership":"%s"}' % Membership.INVITE - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) def test_rooms_members_other_custom_keys(self): @@ -699,13 +655,13 @@ class RoomMemberStateTestCase(RoomBase): Membership.INVITE, "Join us!", ) - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"GET", path, None) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("GET", path, None) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) self.assertEquals(json.loads(content), channel.json_body) @@ -714,60 +670,58 @@ class RoomMessagesTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomMessagesTestCase, self).setUp() - + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) def test_invalid_puts(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json - request, channel = make_request(b"PUT", path, '{}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"_name":"bob"}') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{"_name":"bo"}') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '{"nao') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'{"nao') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request( - b"PUT", path, '[{"_name":"bob"},{"_name":"jill"}]' + request, channel = self.make_request( + "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' ) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, 'text only') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'text only') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) - request, channel = make_request(b"PUT", path, '') - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + request, channel = self.make_request("PUT", path, b'') + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) def test_rooms_messages_sent(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) - content = '{"body":"test","msgtype":{"type":"a"}}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test","msgtype":{"type":"a"}}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(400, channel.code, msg=channel.result["body"]) # custom message types - content = '{"body":"test","msgtype":"test.custom.text"}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test","msgtype":"test.custom.text"}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) - content = '{"body":"test2","msgtype":"m.text"}' - request, channel = make_request(b"PUT", path, content) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) + content = b'{"body":"test2","msgtype":"m.text"}' + request, channel = self.make_request("PUT", path, content) + self.render(request) + self.assertEquals(200, channel.code, msg=channel.result["body"]) class RoomInitialSyncTestCase(RoomBase): @@ -775,16 +729,16 @@ class RoomInitialSyncTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomInitialSyncTestCase, self).setUp() - + def prepare(self, reactor, clock, hs): # create the room self.room_id = self.helper.create_room_as(self.user_id) def test_initial_sync(self): - request, channel = make_request(b"GET", "/rooms/%s/initialSync" % self.room_id) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + request, channel = self.make_request( + "GET", "/rooms/%s/initialSync" % self.room_id + ) + self.render(request) + self.assertEquals(200, channel.code) self.assertEquals(self.room_id, channel.json_body["room_id"]) self.assertEquals("join", channel.json_body["membership"]) @@ -819,17 +773,16 @@ class RoomMessageListTestCase(RoomBase): user_id = "@sid1:red" - def setUp(self): - super(RoomMessageListTestCase, self).setUp() + def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) def test_topo_token_is_accepted(self): token = "t1-0_0_0_0_0_0_0_0_0" - request, channel = make_request( - b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) + request, channel = self.make_request( + "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) @@ -837,11 +790,11 @@ class RoomMessageListTestCase(RoomBase): def test_stream_token_is_accepted_for_fwd_pagianation(self): token = "s0_0_0_0_0_0_0_0_0" - request, channel = make_request( - b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) + request, channel = self.make_request( + "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - render(request, self.resource, self.clock) - self.assertEquals(200, int(channel.result["code"])) + self.render(request) + self.assertEquals(200, channel.code) self.assertTrue("start" in channel.json_body) self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 40dc4ea256..530dc8ba6d 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -240,7 +240,6 @@ class RestHelper(object): self.assertEquals(200, code) defer.returnValue(response) - @defer.inlineCallbacks def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200): if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -248,9 +247,16 @@ class RestHelper(object): body = "body_text_here" path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id) - content = '{"msgtype":"m.text","body":"%s"}' % body + content = {"msgtype": "m.text", "body": body} if tok: path = path + "?access_token=%s" % tok - (code, response) = yield self.mock_resource.trigger("PUT", path, content) - self.assertEquals(expect_code, code, msg=str(response)) + request, channel = make_request("PUT", path, json.dumps(content).encode('utf8')) + render(request, self.resource, self.hs.get_reactor()) + + assert int(channel.result["code"]) == expect_code, ( + "Expected: %d, got: %d, resp: %r" + % (expect_code, int(channel.result["code"]), channel.result["body"]) + ) + + return channel.json_body diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 560b1fba96..4c30c5f258 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -62,12 +62,6 @@ class FilterTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertTrue( set( - [ - "next_batch", - "rooms", - "account_data", - "to_device", - "device_lists", - ] + ["next_batch", "rooms", "account_data", "to_device", "device_lists"] ).issubset(set(channel.json_body.keys())) ) diff --git a/tests/server.py b/tests/server.py index 7dbdb7f8ea..7bee58dff1 100644 --- a/tests/server.py +++ b/tests/server.py @@ -4,9 +4,14 @@ from io import BytesIO from six import text_type import attr +from zope.interface import implementer -from twisted.internet import address, threads +from twisted.internet import address, threads, udp +from twisted.internet._resolver import HostResolution +from twisted.internet.address import IPv4Address from twisted.internet.defer import Deferred +from twisted.internet.error import DNSLookupError +from twisted.internet.interfaces import IReactorPluggableNameResolver from twisted.python.failure import Failure from twisted.test.proto_helpers import MemoryReactorClock @@ -65,7 +70,7 @@ class FakeChannel(object): def getPeer(self): # We give an address so that getClientIP returns a non null entry, # causing us to record the MAU - return address.IPv4Address(b"TCP", "127.0.0.1", 3423) + return address.IPv4Address("TCP", "127.0.0.1", 3423) def getHost(self): return None @@ -93,7 +98,7 @@ class FakeSite: return FakeLogger() -def make_request(method, path, content=b"", access_token=None): +def make_request(method, path, content=b"", access_token=None, request=SynapseRequest): """ Make a web request using the given method and path, feed it the content, and return the Request and the Channel underneath. @@ -115,14 +120,16 @@ def make_request(method, path, content=b"", access_token=None): site = FakeSite() channel = FakeChannel() - req = SynapseRequest(site, channel) + req = request(site, channel) req.process = lambda: b"" req.content = BytesIO(content) if access_token: req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) - req.requestHeaders.addRawHeader(b"X-Forwarded-For", b"127.0.0.1") + if content: + req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + req.requestReceived(method, path, b"1.1") return req, channel @@ -154,11 +161,46 @@ def render(request, resource, clock): wait_until_result(clock, request) +@implementer(IReactorPluggableNameResolver) class ThreadedMemoryReactorClock(MemoryReactorClock): """ A MemoryReactorClock that supports callFromThread. """ + def __init__(self): + self._udp = [] + self.lookups = {} + + class Resolver(object): + def resolveHostName( + _self, + resolutionReceiver, + hostName, + portNumber=0, + addressTypes=None, + transportSemantics='TCP', + ): + + resolution = HostResolution(hostName) + resolutionReceiver.resolutionBegan(resolution) + if hostName not in self.lookups: + raise DNSLookupError("OH NO") + + resolutionReceiver.addressResolved( + IPv4Address('TCP', self.lookups[hostName], portNumber) + ) + resolutionReceiver.resolutionComplete() + return resolution + + self.nameResolver = Resolver() + super(ThreadedMemoryReactorClock, self).__init__() + + def listenUDP(self, port, protocol, interface='', maxPacketSize=8196): + p = udp.Port(port, protocol, interface, maxPacketSize, self) + p.startListening() + self._udp.append(p) + return p + def callFromThread(self, callback, *args, **kwargs): """ Make the callback fire in the next reactor iteration. @@ -232,6 +274,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): clock.threadpool = ThreadPool() pool.threadpool = ThreadPool() + pool.running = True return d @@ -239,3 +282,84 @@ def get_clock(): clock = ThreadedMemoryReactorClock() hs_clock = Clock(clock) return (clock, hs_clock) + + +@attr.s +class FakeTransport(object): + """ + A twisted.internet.interfaces.ITransport implementation which sends all its data + straight into an IProtocol object: it exists to connect two IProtocols together. + + To use it, instantiate it with the receiving IProtocol, and then pass it to the + sending IProtocol's makeConnection method: + + server = HTTPChannel() + client.makeConnection(FakeTransport(server, self.reactor)) + + If you want bidirectional communication, you'll need two instances. + """ + + other = attr.ib() + """The Protocol object which will receive any data written to this transport. + + :type: twisted.internet.interfaces.IProtocol + """ + + _reactor = attr.ib() + """Test reactor + + :type: twisted.internet.interfaces.IReactorTime + """ + + disconnecting = False + buffer = attr.ib(default=b'') + producer = attr.ib(default=None) + + def getPeer(self): + return None + + def getHost(self): + return None + + def loseConnection(self): + self.disconnecting = True + + def abortConnection(self): + self.disconnecting = True + + def pauseProducing(self): + self.producer.pauseProducing() + + def unregisterProducer(self): + if not self.producer: + return + + self.producer = None + + def registerProducer(self, producer, streaming): + self.producer = producer + self.producerStreaming = streaming + + def _produce(): + d = self.producer.resumeProducing() + d.addCallback(lambda x: self._reactor.callLater(0.1, _produce)) + + if not streaming: + self._reactor.callLater(0.0, _produce) + + def write(self, byt): + self.buffer = self.buffer + byt + + def _write(): + if getattr(self.other, "transport") is not None: + self.other.dataReceived(self.buffer) + self.buffer = b"" + return + + self._reactor.callLater(0.0, _write) + + _write() + + def writeSequence(self, seq): + for x in seq: + self.write(x) diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py new file mode 100644 index 0000000000..95badc985e --- /dev/null +++ b/tests/server_notices/test_consent.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v2_alpha import sync + +from tests import unittest + + +class ConsentNoticesTests(unittest.HomeserverTestCase): + + servlets = [ + sync.register_servlets, + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + self.consent_notice_message = "consent %(consent_uri)s" + config = self.default_config() + config.user_consent_version = "1" + config.user_consent_server_notice_content = { + "msgtype": "m.text", + "body": self.consent_notice_message, + } + config.public_baseurl = "https://example.com/" + config.form_secret = "123abc" + + config.server_notices_mxid = "@notices:test" + config.server_notices_mxid_display_name = "test display name" + config.server_notices_mxid_avatar_url = None + config.server_notices_room_name = "Server Notices" + + hs = self.setup_test_homeserver(config=config) + + return hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("bob", "abc123") + self.access_token = self.login("bob", "abc123") + + def test_get_sync_message(self): + """ + When user consent server notices are enabled, a sync will cause a notice + to fire (in a room which the user is invited to). The notice contains + the notice URL + an authentication code. + """ + # Initial sync, to get the user consent room invite + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the Room ID to join + room_id = list(channel.json_body["rooms"]["invite"].keys())[0] + + # Join the room + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/" + room_id + "/join", + access_token=self.access_token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Sync again, to get the message in the room + request, channel = self.make_request( + "GET", "/_matrix/client/r0/sync", access_token=self.access_token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # Get the message + room = channel.json_body["rooms"]["join"][room_id] + messages = [ + x for x in room["timeline"]["events"] if x["type"] == "m.room.message" + ] + + # One message, with the consent URL + self.assertEqual(len(messages), 1) + self.assertTrue( + messages[0]["content"]["body"].startswith( + "consent https://example.com/_matrix/consent" + ) + ) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index fede3f52a9..4701eedd45 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -55,7 +55,8 @@ class TestResourceLimitsServerNotices(unittest.TestCase): returnValue="" ) self._rlsn._store.add_tag_to_room = Mock() - self.hs.config.admin_uri = "mailto:user@test.com" + self._rlsn._store.get_tags_for_room = Mock(return_value={}) + self.hs.config.admin_contact = "mailto:user@test.com" @defer.inlineCallbacks def test_maybe_send_server_notice_to_user_flag_off(self): @@ -79,12 +80,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase): self._rlsn._auth.check_auth_blocking = Mock() mock_event = Mock( - type=EventTypes.Message, - content={"msgtype": ServerNoticeMsgType}, + type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} + ) + self._rlsn._store.get_events = Mock( + return_value=defer.succeed({"123": mock_event}) ) - self._rlsn._store.get_events = Mock(return_value=defer.succeed( - {"123": mock_event} - )) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) # Would be better to check the content, but once == remove blocking event @@ -98,12 +98,11 @@ class TestResourceLimitsServerNotices(unittest.TestCase): ) mock_event = Mock( - type=EventTypes.Message, - content={"msgtype": ServerNoticeMsgType}, + type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} + ) + self._rlsn._store.get_events = Mock( + return_value=defer.succeed({"123": mock_event}) ) - self._rlsn._store.get_events = Mock(return_value=defer.succeed( - {"123": mock_event} - )) yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) self._send_notice.assert_not_called() @@ -172,17 +171,13 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): self.user_id = "@user_id:test" - self.hs.config.admin_uri = "mailto:user@test.com" + self.hs.config.admin_contact = "mailto:user@test.com" @defer.inlineCallbacks def test_server_notice_only_sent_once(self): - self.store.get_monthly_active_count = Mock( - return_value=1000, - ) + self.store.get_monthly_active_count = Mock(return_value=1000) - self.store.user_last_seen_monthly_active = Mock( - return_value=1000, - ) + self.store.user_last_seen_monthly_active = Mock(return_value=1000) # Call the function multiple times to ensure we only send the notice once yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) @@ -192,12 +187,12 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase): # Now lets get the last load of messages in the service notice room and # check that there is only one server notice room_id = yield self.server_notices_manager.get_notice_room_for_user( - self.user_id, + self.user_id ) token = yield self.event_source.get_current_token() events, _ = yield self.store.get_recent_events_for_room( - room_id, limit=100, end_token=token.room_key, + room_id, limit=100, end_token=token.room_key ) count = 0 diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index c893990454..3f0083831b 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -37,18 +37,14 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.as_yaml_files = [] - config = Mock( - app_service_config_files=self.as_yaml_files, - event_cache_size=1, - password_providers=[], - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = self.as_yaml_files + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + self.as_token = "token1" self.as_url = "some_url" self.as_id = "as1" @@ -58,7 +54,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): self._add_appservice("token2", "as2", "some_url", "some_hs_token", "bob") self._add_appservice("token3", "as3", "some_url", "some_hs_token", "bob") # must be done after inserts - self.store = ApplicationServiceStore(None, hs) + self.store = ApplicationServiceStore(hs.get_db_conn(), hs) def tearDown(self): # TODO: suboptimal that we need to create files for tests! @@ -105,18 +101,16 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): def setUp(self): self.as_yaml_files = [] - config = Mock( - app_service_config_files=self.as_yaml_files, - event_cache_size=1, - password_providers=[], - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + + hs.config.app_service_config_files = self.as_yaml_files + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + self.db_pool = hs.get_db_pool() + self.engine = hs.database_engine self.as_list = [ {"token": "token1", "url": "https://matrix-as.org", "id": "id_1"}, @@ -129,7 +123,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files = [] - self.store = TestTransactionStore(None, hs) + self.store = TestTransactionStore(hs.get_db_conn(), hs) def _add_service(self, url, as_token, id): as_yaml = dict( @@ -146,29 +140,35 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.as_yaml_files.append(as_token) def _set_state(self, id, state, txn=None): - return self.db_pool.runQuery( - "INSERT INTO application_services_state(as_id, state, last_txn) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_state(as_id, state, last_txn) " + "VALUES(?,?,?)" + ), (id, state, txn), ) def _insert_txn(self, as_id, txn_id, events): - return self.db_pool.runQuery( - "INSERT INTO application_services_txns(as_id, txn_id, event_ids) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_txns(as_id, txn_id, event_ids) " + "VALUES(?,?,?)" + ), (as_id, txn_id, json.dumps([e.event_id for e in events])), ) def _set_last_txn(self, as_id, txn_id): - return self.db_pool.runQuery( - "INSERT INTO application_services_state(as_id, last_txn, state) " - "VALUES(?,?,?)", + return self.db_pool.runOperation( + self.engine.convert_param_style( + "INSERT INTO application_services_state(as_id, last_txn, state) " + "VALUES(?,?,?)" + ), (as_id, txn_id, ApplicationServiceState.UP), ) @defer.inlineCallbacks def test_get_appservice_state_none(self): - service = Mock(id=999) + service = Mock(id="999") state = yield self.store.get_appservice_state(service) self.assertEquals(None, state) @@ -200,7 +200,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): service = Mock(id=self.as_list[1]["id"]) yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN) rows = yield self.db_pool.runQuery( - "SELECT as_id FROM application_services_state WHERE state=?", + self.engine.convert_param_style( + "SELECT as_id FROM application_services_state WHERE state=?" + ), (ApplicationServiceState.DOWN,), ) self.assertEquals(service.id, rows[0][0]) @@ -212,7 +214,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN) yield self.store.set_appservice_state(service, ApplicationServiceState.UP) rows = yield self.db_pool.runQuery( - "SELECT as_id FROM application_services_state WHERE state=?", + self.engine.convert_param_style( + "SELECT as_id FROM application_services_state WHERE state=?" + ), (ApplicationServiceState.UP,), ) self.assertEquals(service.id, rows[0][0]) @@ -279,14 +283,19 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.complete_appservice_txn(txn_id=txn_id, service=service) res = yield self.db_pool.runQuery( - "SELECT last_txn FROM application_services_state WHERE as_id=?", + self.engine.convert_param_style( + "SELECT last_txn FROM application_services_state WHERE as_id=?" + ), (service.id,), ) self.assertEquals(1, len(res)) self.assertEquals(txn_id, res[0][0]) res = yield self.db_pool.runQuery( - "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,) + self.engine.convert_param_style( + "SELECT * FROM application_services_txns WHERE txn_id=?" + ), + (txn_id,), ) self.assertEquals(0, len(res)) @@ -300,7 +309,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): yield self.store.complete_appservice_txn(txn_id=txn_id, service=service) res = yield self.db_pool.runQuery( - "SELECT last_txn, state FROM application_services_state WHERE " "as_id=?", + self.engine.convert_param_style( + "SELECT last_txn, state FROM application_services_state WHERE as_id=?" + ), (service.id,), ) self.assertEquals(1, len(res)) @@ -308,7 +319,10 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): self.assertEquals(ApplicationServiceState.UP, res[0][1]) res = yield self.db_pool.runQuery( - "SELECT * FROM application_services_txns WHERE txn_id=?", (txn_id,) + self.engine.convert_param_style( + "SELECT * FROM application_services_txns WHERE txn_id=?" + ), + (txn_id,), ) self.assertEquals(0, len(res)) @@ -394,37 +408,31 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(suffix="1") f2 = self._write_config(suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) - ApplicationServiceStore(None, hs) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + + ApplicationServiceStore(hs.get_db_conn(), hs) @defer.inlineCallbacks def test_duplicate_ids(self): f1 = self._write_config(id="id", suffix="1") f2 = self._write_config(id="id", suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(None, hs) + ApplicationServiceStore(hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) @@ -436,19 +444,16 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): f1 = self._write_config(as_token="as_token", suffix="1") f2 = self._write_config(as_token="as_token", suffix="2") - config = Mock( - app_service_config_files=[f1, f2], event_cache_size=1, password_providers=[] - ) hs = yield setup_test_homeserver( - self.addCleanup, - config=config, - datastore=Mock(), - federation_sender=Mock(), - federation_client=Mock(), + self.addCleanup, federation_sender=Mock(), federation_client=Mock() ) + hs.config.app_service_config_files = [f1, f2] + hs.config.event_cache_size = 1 + hs.config.password_providers = [] + with self.assertRaises(ConfigError) as cm: - ApplicationServiceStore(None, hs) + ApplicationServiceStore(hs.get_db_conn(), hs) e = cm.exception self.assertIn(f1, str(e)) diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 7cb5f0e4cf..829f47d2e8 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -20,11 +20,11 @@ from mock import Mock from twisted.internet import defer -from synapse.server import HomeServer from synapse.storage._base import SQLBaseStore from synapse.storage.engines import create_engine from tests import unittest +from tests.utils import TestHomeServer class SQLBaseStoreTestCase(unittest.TestCase): @@ -51,7 +51,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): config = Mock() config.event_cache_size = 1 config.database_config = {"name": "sqlite3"} - hs = HomeServer( + hs = TestHomeServer( "test", db_pool=self.db_pool, config=config, diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c2e88bdbaf..4577e9422b 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,35 +13,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from mock import Mock from twisted.internet import defer -import tests.unittest -import tests.utils +from synapse.http.site import XForwardedForRequest +from synapse.rest.client.v1 import admin, login + +from tests import unittest -class ClientIpStoreTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(ClientIpStoreTestCase, self).__init__(*args, **kwargs) - self.store = None # type: synapse.storage.DataStore - self.clock = None # type: tests.utils.MockClock +class ClientIpStoreTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs - @defer.inlineCallbacks - def setUp(self): - self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup) + def prepare(self, hs, reactor, clock): self.store = self.hs.get_datastore() - self.clock = self.hs.get_clock() - @defer.inlineCallbacks def test_insert_new_client_ip(self): - self.clock.now = 12345678 + self.reactor.advance(12345678) + user_id = "@user:id" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - result = yield self.store.get_last_client_ip_by_device(user_id, "device_id") + # Trigger the storage loop + self.reactor.advance(10) + + result = self.get_success( + self.store.get_last_client_ip_by_device(user_id, "device_id") + ) r = result[(user_id, "device_id")] self.assertDictContainsSubset( @@ -55,18 +62,18 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): r, ) - @defer.inlineCallbacks def test_disabled_monthly_active_user(self): self.hs.config.limit_usage_by_mau = False self.hs.config.max_mau_value = 50 user_id = "@user:server" - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_full(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 @@ -76,40 +83,119 @@ class ClientIpStoreTestCase(tests.unittest.TestCase): self.store.get_monthly_active_count = Mock( return_value=defer.succeed(lots_of_users) ) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - @defer.inlineCallbacks def test_adding_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) - @defer.inlineCallbacks def test_updating_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" + self.get_success( + self.store.register(user_id=user_id, token="123", password_hash=None) + ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + # Trigger the saving loop + self.reactor.advance(10) + + self.get_success( + self.store.insert_client_ip( + user_id, "access_token", "ip", "user_agent", "device_id" + ) ) - yield self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" - ) - active = yield self.store.user_last_seen_monthly_active(user_id) + active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertTrue(active) + + +class ClientIpAuthTestCase(unittest.HomeserverTestCase): + + servlets = [admin.register_servlets, login.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def prepare(self, hs, reactor, clock): + self.store = self.hs.get_datastore() + self.user_id = self.register_user("bob", "abc123", True) + + def test_request_with_xforwarded(self): + """ + The IP in X-Forwarded-For is entered into the client IPs table. + """ + self._runtest( + {b"X-Forwarded-For": b"127.9.0.1"}, + "127.9.0.1", + {"request": XForwardedForRequest}, + ) + + def test_request_from_getPeer(self): + """ + The IP returned by getPeer is entered into the client IPs table, if + there's no X-Forwarded-For header. + """ + self._runtest({}, "127.0.0.1", {}) + + def _runtest(self, headers, expected_ip, make_request_args): + device_id = "bleb" + + access_token = self.login("bob", "abc123", device_id=device_id) + + # Advance to a known time + self.reactor.advance(123456 - self.reactor.seconds()) + + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/admin/users/" + self.user_id, + access_token=access_token, + **make_request_args + ) + request.requestHeaders.addRawHeader(b"User-Agent", b"Mozzila pizza") + + # Add the optional headers + for h, v in headers.items(): + request.requestHeaders.addRawHeader(h, v) + self.render(request) + + # Advance so the save loop occurs + self.reactor.advance(100) + + result = self.get_success( + self.store.get_last_client_ip_by_device(self.user_id, device_id) + ) + r = result[(self.user_id, device_id)] + self.assertDictContainsSubset( + { + "user_id": self.user_id, + "device_id": device_id, + "ip": expected_ip, + "user_agent": "Mozzila pizza", + "last_seen": 123456100, + }, + r, + ) diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index b4510c1c8d..4e128e1047 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -16,7 +16,6 @@ from twisted.internet import defer -from synapse.storage.directory import DirectoryStore from synapse.types import RoomAlias, RoomID from tests import unittest @@ -28,7 +27,7 @@ class DirectoryStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(self.addCleanup) - self.store = DirectoryStore(None, hs) + self.store = hs.get_datastore() self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#my-room:test") diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 2fdf34fdf6..0d4e74d637 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -37,10 +37,10 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase): ( "INSERT INTO events (" " room_id, event_id, type, depth, topological_ordering," - " content, processed, outlier) " - "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?)" + " content, processed, outlier, stream_ordering) " + "VALUES (?, ?, 'm.test', ?, ?, 'test', ?, ?, ?)" ), - (room_id, event_id, i, i, True, False), + (room_id, event_id, i, i, True, False, i), ) txn.execute( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index f2ed866ae7..686f12a0dc 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -12,26 +12,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock from twisted.internet import defer -import tests.unittest -import tests.utils -from tests.utils import setup_test_homeserver +from tests.unittest import HomeserverTestCase FORTY_DAYS = 40 * 24 * 60 * 60 -class MonthlyActiveUsersTestCase(tests.unittest.TestCase): - def __init__(self, *args, **kwargs): - super(MonthlyActiveUsersTestCase, self).__init__(*args, **kwargs) +class MonthlyActiveUsersTestCase(HomeserverTestCase): + def make_homeserver(self, reactor, clock): - @defer.inlineCallbacks - def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = self.hs.get_datastore() + hs = self.setup_test_homeserver() + self.store = hs.get_datastore() + hs.config.limit_usage_by_mau = True + hs.config.max_mau_value = 50 + # Advance the clock a bit + reactor.advance(FORTY_DAYS) + + return hs - @defer.inlineCallbacks def test_initialise_reserved_users(self): self.hs.config.max_mau_value = 5 user1 = "@user1:server" @@ -44,88 +45,172 @@ class MonthlyActiveUsersTestCase(tests.unittest.TestCase): ] user_num = len(threepids) - yield self.store.register(user_id=user1, token="123", password_hash=None) - - yield self.store.register(user_id=user2, token="456", password_hash=None) + self.store.register(user_id=user1, token="123", password_hash=None) + self.store.register(user_id=user2, token="456", password_hash=None) + self.pump() now = int(self.hs.get_clock().time_msec()) - yield self.store.user_add_threepid(user1, "email", user1_email, now, now) - yield self.store.user_add_threepid(user2, "email", user2_email, now, now) - yield self.store.initialise_reserved_users(threepids) + self.store.user_add_threepid(user1, "email", user1_email, now, now) + self.store.user_add_threepid(user2, "email", user2_email, now, now) + self.store.initialise_reserved_users(threepids) + self.pump() - active_count = yield self.store.get_monthly_active_count() + active_count = self.store.get_monthly_active_count() # Test total counts - self.assertEquals(active_count, user_num) + self.assertEquals(self.get_success(active_count), user_num) # Test user is marked as active - - timestamp = yield self.store.user_last_seen_monthly_active(user1) - self.assertTrue(timestamp) - timestamp = yield self.store.user_last_seen_monthly_active(user2) - self.assertTrue(timestamp) + timestamp = self.store.user_last_seen_monthly_active(user1) + self.assertTrue(self.get_success(timestamp)) + timestamp = self.store.user_last_seen_monthly_active(user2) + self.assertTrue(self.get_success(timestamp)) # Test that users are never removed from the db. self.hs.config.max_mau_value = 0 - self.hs.get_clock().advance_time(FORTY_DAYS) + self.reactor.advance(FORTY_DAYS) - yield self.store.reap_monthly_active_users() + self.store.reap_monthly_active_users() + self.pump() - active_count = yield self.store.get_monthly_active_count() - self.assertEquals(active_count, user_num) + active_count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(active_count), user_num) - # Test that regalar users are removed from the db + # Test that regular users are removed from the db ru_count = 2 - yield self.store.upsert_monthly_active_user("@ru1:server") - yield self.store.upsert_monthly_active_user("@ru2:server") - active_count = yield self.store.get_monthly_active_count() + self.store.upsert_monthly_active_user("@ru1:server") + self.store.upsert_monthly_active_user("@ru2:server") + self.pump() - self.assertEqual(active_count, user_num + ru_count) + active_count = self.store.get_monthly_active_count() + self.assertEqual(self.get_success(active_count), user_num + ru_count) self.hs.config.max_mau_value = user_num - yield self.store.reap_monthly_active_users() + self.store.reap_monthly_active_users() + self.pump() - active_count = yield self.store.get_monthly_active_count() - self.assertEquals(active_count, user_num) + active_count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(active_count), user_num) - @defer.inlineCallbacks def test_can_insert_and_count_mau(self): - count = yield self.store.get_monthly_active_count() - self.assertEqual(0, count) + count = self.store.get_monthly_active_count() + self.assertEqual(0, self.get_success(count)) - yield self.store.upsert_monthly_active_user("@user:server") - count = yield self.store.get_monthly_active_count() + self.store.upsert_monthly_active_user("@user:server") + self.pump() - self.assertEqual(1, count) + count = self.store.get_monthly_active_count() + self.assertEqual(1, self.get_success(count)) - @defer.inlineCallbacks def test_user_last_seen_monthly_active(self): user_id1 = "@user1:server" user_id2 = "@user2:server" user_id3 = "@user3:server" - result = yield self.store.user_last_seen_monthly_active(user_id1) - self.assertFalse(result == 0) - yield self.store.upsert_monthly_active_user(user_id1) - yield self.store.upsert_monthly_active_user(user_id2) - result = yield self.store.user_last_seen_monthly_active(user_id1) - self.assertTrue(result > 0) - result = yield self.store.user_last_seen_monthly_active(user_id3) - self.assertFalse(result == 0) + result = self.store.user_last_seen_monthly_active(user_id1) + self.assertFalse(self.get_success(result) == 0) + + self.store.upsert_monthly_active_user(user_id1) + self.store.upsert_monthly_active_user(user_id2) + self.pump() + + result = self.store.user_last_seen_monthly_active(user_id1) + self.assertGreater(self.get_success(result), 0) + + result = self.store.user_last_seen_monthly_active(user_id3) + self.assertNotEqual(self.get_success(result), 0) - @defer.inlineCallbacks def test_reap_monthly_active_users(self): self.hs.config.max_mau_value = 5 initial_users = 10 for i in range(initial_users): - yield self.store.upsert_monthly_active_user("@user%d:server" % i) - count = yield self.store.get_monthly_active_count() - self.assertTrue(count, initial_users) - yield self.store.reap_monthly_active_users() - count = yield self.store.get_monthly_active_count() - self.assertEquals(count, initial_users - self.hs.config.max_mau_value) + self.store.upsert_monthly_active_user("@user%d:server" % i) + self.pump() - self.hs.get_clock().advance_time(FORTY_DAYS) - yield self.store.reap_monthly_active_users() - count = yield self.store.get_monthly_active_count() - self.assertEquals(count, 0) + count = self.store.get_monthly_active_count() + self.assertTrue(self.get_success(count), initial_users) + + self.store.reap_monthly_active_users() + self.pump() + count = self.store.get_monthly_active_count() + self.assertEquals( + self.get_success(count), initial_users - self.hs.config.max_mau_value + ) + + self.reactor.advance(FORTY_DAYS) + self.store.reap_monthly_active_users() + self.pump() + + count = self.store.get_monthly_active_count() + self.assertEquals(self.get_success(count), 0) + + def test_populate_monthly_users_is_guest(self): + # Test that guest users are not added to mau list + user_id = "user_id" + self.store.register( + user_id=user_id, token="123", password_hash=None, make_guest=True + ) + self.store.upsert_monthly_active_user = Mock() + self.store.populate_monthly_active_users(user_id) + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() + + def test_populate_monthly_users_should_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed(None) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_called_once() + + def test_populate_monthly_users_should_not_update(self): + self.store.upsert_monthly_active_user = Mock() + + self.store.is_trial_user = Mock( + return_value=defer.succeed(False) + ) + self.store.user_last_seen_monthly_active = Mock( + return_value=defer.succeed( + self.hs.get_clock().time_msec() + ) + ) + self.store.populate_monthly_active_users('user_id') + self.pump() + self.store.upsert_monthly_active_user.assert_not_called() + + def test_get_reserved_real_user_account(self): + # Test no reserved users, or reserved threepids + count = self.store.get_registered_reserved_users_count() + self.assertEquals(self.get_success(count), 0) + # Test reserved users but no registered users + + user1 = '@user1:example.com' + user2 = '@user2:example.com' + user1_email = 'user1@example.com' + user2_email = 'user2@example.com' + threepids = [ + {'medium': 'email', 'address': user1_email}, + {'medium': 'email', 'address': user2_email}, + ] + self.hs.config.mau_limits_reserved_threepids = threepids + self.store.initialise_reserved_users(threepids) + self.pump() + count = self.store.get_registered_reserved_users_count() + self.assertEquals(self.get_success(count), 0) + + # Test reserved registed users + self.store.register(user_id=user1, token="123", password_hash=None) + self.store.register(user_id=user2, token="456", password_hash=None) + self.pump() + + now = int(self.hs.get_clock().time_msec()) + self.store.user_add_threepid(user1, "email", user1_email, now, now) + self.store.user_add_threepid(user2, "email", user2_email, now, now) + count = self.store.get_registered_reserved_users_count() + self.assertEquals(self.get_success(count), len(threepids)) diff --git a/tests/storage/test_presence.py b/tests/storage/test_presence.py index b5b58ff660..c7a63f39b9 100644 --- a/tests/storage/test_presence.py +++ b/tests/storage/test_presence.py @@ -16,19 +16,18 @@ from twisted.internet import defer -from synapse.storage.presence import PresenceStore from synapse.types import UserID from tests import unittest -from tests.utils import MockClock, setup_test_homeserver +from tests.utils import setup_test_homeserver class PresenceStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - hs = yield setup_test_homeserver(self.addCleanup, clock=MockClock()) + hs = yield setup_test_homeserver(self.addCleanup) - self.store = PresenceStore(None, hs) + self.store = hs.get_datastore() self.u_apple = UserID.from_string("@apple:test") self.u_banana = UserID.from_string("@banana:test") diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index a1f6618bf9..45824bd3b2 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -28,7 +28,7 @@ class ProfileStoreTestCase(unittest.TestCase): def setUp(self): hs = yield setup_test_homeserver(self.addCleanup) - self.store = ProfileStore(None, hs) + self.store = ProfileStore(hs.get_db_conn(), hs) self.u_frank = UserID.from_string("@frank:test") diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py new file mode 100644 index 0000000000..f671599cb8 --- /dev/null +++ b/tests/storage/test_purge.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest.client.v1 import room + +from tests.unittest import HomeserverTestCase + + +class PurgeTests(HomeserverTestCase): + + user_id = "@red:server" + servlets = [room.register_servlets] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + return hs + + def prepare(self, reactor, clock, hs): + self.room_id = self.helper.create_room_as(self.user_id) + + def test_purge(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Get the topological token + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + self.assertEqual(self.successResultOf(purge), None) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # 1-3 should fail and last will succeed, meaning that 1-3 are deleted + # and last is not. + self.failureResultOf(get_first) + self.failureResultOf(get_second) + self.failureResultOf(get_third) + self.successResultOf(get_last) + + def test_purge_wont_delete_extrems(self): + """ + Purging a room will delete everything before the topological point. + """ + # Send four messages to the room + first = self.helper.send(self.room_id, body="test1") + second = self.helper.send(self.room_id, body="test2") + third = self.helper.send(self.room_id, body="test3") + last = self.helper.send(self.room_id, body="test4") + + storage = self.hs.get_datastore() + + # Set the topological token higher than it should be + event = storage.get_topological_token_for_event(last["event_id"]) + self.pump() + event = self.successResultOf(event) + event = "t{}-{}".format( + *list(map(lambda x: x + 1, map(int, event[1:].split("-")))) + ) + + # Purge everything before this topological token + purge = storage.purge_history(self.room_id, event, True) + self.pump() + f = self.failureResultOf(purge) + self.assertIn("greater than forward", f.value.args[0]) + + # Try and get the events + get_first = storage.get_event(first["event_id"]) + get_second = storage.get_event(second["event_id"]) + get_third = storage.get_event(third["event_id"]) + get_last = storage.get_event(last["event_id"]) + self.pump() + + # Nothing is deleted. + self.successResultOf(get_first) + self.successResultOf(get_second) + self.successResultOf(get_third) + self.successResultOf(get_last) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index d717b9f94e..b9c5b39d59 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -74,6 +74,45 @@ class StateStoreTestCase(tests.unittest.TestCase): self.assertEqual(s1[t].event_id, s2[t].event_id) self.assertEqual(len(s1), len(s2)) + @defer.inlineCallbacks + def test_get_state_groups_ids(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_map = list(state_group_map.values())[0] + self.assertDictEqual( + state_map, + { + (EventTypes.Create, ''): e1.event_id, + (EventTypes.Name, ''): e2.event_id, + }, + ) + + @defer.inlineCallbacks + def test_get_state_groups(self): + e1 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, '', {} + ) + e2 = yield self.inject_state_event( + self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} + ) + + state_group_map = yield self.store.get_state_groups( + self.room, [e2.event_id]) + self.assertEqual(len(state_group_map), 1) + state_list = list(state_group_map.values())[0] + + self.assertEqual( + {ev.event_id for ev in state_list}, + {e1.event_id, e2.event_id}, + ) + @defer.inlineCallbacks def test_get_state_for_event(self): @@ -185,8 +224,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, - group, [], filtered_types=[EventTypes.Member] + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) self.assertEqual(is_all, True) @@ -200,19 +238,20 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [], filtered_types=[EventTypes.Member] + group, + [], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) - self.assertDictEqual( - {}, - state_dict, - ) + self.assertDictEqual({}, state_dict) # test _get_some_state_from_cache correctly filters in members with wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -226,7 +265,9 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -264,18 +305,15 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, True) @@ -305,9 +343,7 @@ class StateStoreTestCase(tests.unittest.TestCase): key=group, value=state_dict_ids, # list fetched keys so it knows it's partial - fetched_keys=( - (e1.type, e1.state_key), - ), + fetched_keys=((e1.type, e1.state_key),), ) (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get( @@ -315,20 +351,8 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, False) - self.assertEqual( - known_absent, - set( - [ - (e1.type, e1.state_key), - ] - ), - ) - self.assertDictEqual( - state_dict_ids, - { - (e1.type, e1.state_key): e1.event_id, - }, - ) + self.assertEqual(known_absent, set([(e1.type, e1.state_key)])) + self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) ############################################ # test that things work with a partial cache @@ -336,8 +360,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters out members with types=[] room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, - group, [], filtered_types=[EventTypes.Member] + self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] ) self.assertEqual(is_all, False) @@ -346,7 +369,9 @@ class StateStoreTestCase(tests.unittest.TestCase): room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [], filtered_types=[EventTypes.Member] + group, + [], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -355,20 +380,19 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_some_state_from_cache correctly filters in members wildcard types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, False) - self.assertDictEqual( - { - (e1.type, e1.state_key): e1.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] + group, + [(EventTypes.Member, None)], + filtered_types=[EventTypes.Member], ) self.assertEqual(is_all, True) @@ -389,12 +413,7 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, False) - self.assertDictEqual( - { - (e1.type, e1.state_key): e1.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, @@ -404,18 +423,15 @@ class StateStoreTestCase(tests.unittest.TestCase): ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) # test _get_some_state_from_cache correctly filters in members with specific types # and no filtered_types (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, False) @@ -423,13 +439,10 @@ class StateStoreTestCase(tests.unittest.TestCase): (state_dict, is_all) = yield self.store._get_some_state_from_cache( self.store._state_group_members_cache, - group, [(EventTypes.Member, e5.state_key)], filtered_types=None + group, + [(EventTypes.Member, e5.state_key)], + filtered_types=None, ) self.assertEqual(is_all, True) - self.assertDictEqual( - { - (e5.type, e5.state_key): e5.event_id, - }, - state_dict, - ) + self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py new file mode 100644 index 0000000000..14169afa96 --- /dev/null +++ b/tests/storage/test_transactions.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tests.unittest import HomeserverTestCase + + +class TransactionStoreTestCase(HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + + def test_get_set_transactions(self): + """Tests that we can successfully get a non-existent entry for + destination retries, as well as testing tht we can set and get + correctly. + """ + d = self.store.get_destination_retry_timings("example.com") + r = self.get_success(d) + self.assertIsNone(r) + + d = self.store.set_destination_retry_timings("example.com", 50, 100) + self.get_success(d) + + d = self.store.get_destination_retry_timings("example.com") + r = self.get_success(d) + + self.assert_dict({"retry_last_ts": 50, "retry_interval": 100}, r) + + def test_initial_set_transactions(self): + """Tests that we can successfully set the destination retries (there + was a bug around invalidating the cache that broke this) + """ + d = self.store.set_destination_retry_timings("example.com", 50, 100) + self.get_success(d) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index b46e0ea7e2..0dde1ab2fe 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -30,7 +30,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.hs = yield setup_test_homeserver(self.addCleanup) - self.store = UserDirectoryStore(None, self.hs) + self.store = UserDirectoryStore(self.hs.get_db_conn(), self.hs) # alice and bob are both in !room_id. bobby is not but shares # a homeserver with alice. diff --git a/tests/test_federation.py b/tests/test_federation.py index 2540604fcc..952a0a7b51 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -6,6 +6,7 @@ from twisted.internet.defer import maybeDeferred, succeed from synapse.events import FrozenEvent from synapse.types import Requester, UserID from synapse.util import Clock +from synapse.util.logcontext import LoggingContext from tests import unittest from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver @@ -117,9 +118,10 @@ class MessageAcceptTests(unittest.TestCase): } ) - d = self.handler.on_receive_pdu( - "test.serv", lying_event, sent_to_us_directly=True - ) + with LoggingContext(request="lying_event"): + d = self.handler.on_receive_pdu( + "test.serv", lying_event, sent_to_us_directly=True + ) # Step the reactor, so the database fetches come back self.reactor.advance(1) @@ -139,107 +141,3 @@ class MessageAcceptTests(unittest.TestCase): self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id ) self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv") - - def test_cant_hide_past_history(self): - """ - If you send a message, you must be able to provide the direct - prev_events that said event references. - """ - - def post_json(destination, path, data, headers=None, timeout=0): - if path.startswith("/_matrix/federation/v1/get_missing_events/"): - return { - "events": [ - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "three:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [("four:test.serv", {})], - } - ] - } - - self.http_client.post_json = post_json - - def get_json(destination, path, args, headers=None): - if path.startswith("/_matrix/federation/v1/state_ids/"): - d = self.successResultOf( - self.homeserver.datastore.get_state_ids_for_event("one:test.serv") - ) - - return succeed( - { - "pdu_ids": [ - y - for x, y in d.items() - if x == ("m.room.member", "@us:test") - ], - "auth_chain_ids": list(d.values()), - } - ) - - self.http_client.get_json = get_json - - # Figure out what the most recent event is - most_recent = self.successResultOf( - maybeDeferred( - self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id - ) - )[0] - - # Make a good event - good_event = FrozenEvent( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "one:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [(most_recent, {})], - } - ) - - d = self.handler.on_receive_pdu( - "test.serv", good_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - self.assertEqual(self.successResultOf(d), None) - - bad_event = FrozenEvent( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "two:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": "hewwo?", - "auth_events": [], - "prev_events": [("one:test.serv", {}), ("three:test.serv", {})], - } - ) - - d = self.handler.on_receive_pdu( - "test.serv", bad_event, sent_to_us_directly=True - ) - self.reactor.advance(1) - - extrem = maybeDeferred( - self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id - ) - self.assertEqual(self.successResultOf(extrem)[0], "two:test.serv") - - state = self.homeserver.get_state_handler().get_current_state_ids(self.room_id) - self.reactor.advance(1) - self.assertIn(("m.room.member", "@us:test"), self.successResultOf(state).keys()) diff --git a/tests/test_mau.py b/tests/test_mau.py index 0732615447..bdbacb8448 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -185,20 +185,20 @@ class TestMauLimit(unittest.TestCase): self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def create_user(self, localpart): - request_data = json.dumps({ - "username": localpart, - "password": "monkey", - "auth": {"type": LoginType.DUMMY}, - }) + request_data = json.dumps( + { + "username": localpart, + "password": "monkey", + "auth": {"type": LoginType.DUMMY}, + } + ) - request, channel = make_request(b"POST", b"/register", request_data) + request, channel = make_request("POST", "/register", request_data) render(request, self.resource, self.reactor) - if channel.result["code"] != b"200": + if channel.code != 200: raise HttpResponseException( - int(channel.result["code"]), - channel.result["reason"], - channel.result["body"], + channel.code, channel.result["reason"], channel.result["body"] ).to_synapse_error() access_token = channel.json_body["access_token"] @@ -206,12 +206,12 @@ class TestMauLimit(unittest.TestCase): return access_token def do_sync_for_user(self, token): - request, channel = make_request(b"GET", b"/sync", access_token=token) + request, channel = make_request( + "GET", "/sync", access_token=token.encode('ascii') + ) render(request, self.resource, self.reactor) - if channel.result["code"] != b"200": + if channel.code != 200: raise HttpResponseException( - int(channel.result["code"]), - channel.result["reason"], - channel.result["body"], + channel.code, channel.result["reason"], channel.result["body"] ).to_synapse_error() diff --git a/tests/test_metrics.py b/tests/test_metrics.py new file mode 100644 index 0000000000..17897711a1 --- /dev/null +++ b/tests/test_metrics.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from synapse.metrics import InFlightGauge + +from tests import unittest + + +class TestMauLimit(unittest.TestCase): + def test_basic(self): + gauge = InFlightGauge( + "test1", "", + labels=["test_label"], + sub_metrics=["foo", "bar"], + ) + + def handle1(metrics): + metrics.foo += 2 + metrics.bar = max(metrics.bar, 5) + + def handle2(metrics): + metrics.foo += 3 + metrics.bar = max(metrics.bar, 7) + + gauge.register(("key1",), handle1) + + self.assert_dict({ + "test1_total": {("key1",): 1}, + "test1_foo": {("key1",): 2}, + "test1_bar": {("key1",): 5}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.unregister(("key1",), handle1) + + self.assert_dict({ + "test1_total": {("key1",): 0}, + "test1_foo": {("key1",): 0}, + "test1_bar": {("key1",): 0}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.register(("key1",), handle1) + gauge.register(("key2",), handle2) + + self.assert_dict({ + "test1_total": {("key1",): 1, ("key2",): 1}, + "test1_foo": {("key1",): 2, ("key2",): 3}, + "test1_bar": {("key1",): 5, ("key2",): 7}, + }, self.get_metrics_from_gauge(gauge)) + + gauge.unregister(("key2",), handle2) + gauge.register(("key1",), handle2) + + self.assert_dict({ + "test1_total": {("key1",): 2, ("key2",): 0}, + "test1_foo": {("key1",): 5, ("key2",): 0}, + "test1_bar": {("key1",): 7, ("key2",): 0}, + }, self.get_metrics_from_gauge(gauge)) + + def get_metrics_from_gauge(self, gauge): + results = {} + + for r in gauge.collect(): + results[r.name] = { + tuple(labels[x] for x in gauge.labels): value + for _, labels, value in r.samples + } + + return results diff --git a/tests/test_server.py b/tests/test_server.py index ef74544e93..4045fdadc3 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -1,14 +1,35 @@ +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging import re +from six import StringIO + from twisted.internet.defer import Deferred -from twisted.test.proto_helpers import MemoryReactorClock +from twisted.python.failure import Failure +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError from synapse.http.server import JsonResource +from synapse.http.site import SynapseSite, logger from synapse.util import Clock from tests import unittest -from tests.server import make_request, render, setup_test_homeserver +from tests.server import FakeTransport, make_request, render, setup_test_homeserver class JsonResourceTests(unittest.TestCase): @@ -121,3 +142,52 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.result["code"], b'400') self.assertEqual(channel.json_body["error"], "Unrecognized request") self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") + + +class SiteTestCase(unittest.HomeserverTestCase): + def test_lose_connection(self): + """ + We log the URI correctly redacted when we lose the connection. + """ + + class HangingResource(Resource): + """ + A Resource that strategically hangs, as if it were processing an + answer. + """ + + def render(self, request): + return NOT_DONE_YET + + # Set up a logging handler that we can inspect afterwards + output = StringIO() + handler = logging.StreamHandler(output) + logger.addHandler(handler) + old_level = logger.level + logger.setLevel(10) + self.addCleanup(logger.setLevel, old_level) + self.addCleanup(logger.removeHandler, handler) + + # Make a resource and a Site, the resource will hang and allow us to + # time out the request while it's 'processing' + base_resource = Resource() + base_resource.putChild(b'', HangingResource()) + site = SynapseSite("test", "site_tag", {}, base_resource, "1.0") + + server = site.buildProtocol(None) + client = AccumulatingProtocol() + client.makeConnection(FakeTransport(server, self.reactor)) + server.makeConnection(FakeTransport(client, self.reactor)) + + # Send a request with an access token that will get redacted + server.dataReceived(b"GET /?access_token=bar HTTP/1.0\r\n\r\n") + self.pump() + + # Lose the connection + e = Failure(Exception("Failed123")) + server.connectionLost(e) + handler.flush() + + # Our access token is redacted and the failure reason is logged. + self.assertIn("/?access_token=", output.getvalue()) + self.assertIn("Failed123", output.getvalue()) diff --git a/tests/test_state.py b/tests/test_state.py index 452a123c3a..e20c33322a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -180,7 +180,7 @@ class StateTestCase(unittest.TestCase): graph = Graph( nodes={ "START": DictObj( - type=EventTypes.Create, state_key="", content={}, depth=1, + type=EventTypes.Create, state_key="", content={}, depth=1 ), "A": DictObj(type=EventTypes.Message, depth=2), "B": DictObj(type=EventTypes.Message, depth=3), diff --git a/tests/test_types.py b/tests/test_types.py index be072d402b..0f5c8bfaf9 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -14,12 +14,12 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.server import HomeServer from synapse.types import GroupID, RoomAlias, UserID from tests import unittest +from tests.utils import TestHomeServer -mock_homeserver = HomeServer(hostname="my.domain") +mock_homeserver = TestHomeServer(hostname="my.domain") class UserIDTestCase(unittest.TestCase): diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 8d8ce0cab9..2eea3b098b 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -96,7 +96,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): events_to_filter.append(evt) # the erasey user gets erased - self.hs.get_datastore().mark_user_erased("@erased:local_hs") + yield self.hs.get_datastore().mark_user_erased("@erased:local_hs") # ... and the filtering happens. filtered = yield filter_events_for_server( diff --git a/tests/unittest.py b/tests/unittest.py index d852e2465a..a59291cc60 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import hashlib +import hmac import logging from mock import Mock @@ -22,14 +24,17 @@ from canonicaljson import json import twisted import twisted.logger +from twisted.internet.defer import Deferred from twisted.trial import unittest from synapse.http.server import JsonResource +from synapse.http.site import SynapseRequest from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util.logcontext import LoggingContextFilter from tests.server import get_clock, make_request, render, setup_test_homeserver +from tests.utils import default_config # Set up putting Synapse's logs into Trial's. rootLogger = logging.getLogger() @@ -151,6 +156,7 @@ class HomeserverTestCase(TestCase): hijack_auth (bool): Whether to hijack auth to return the user specified in user_id. """ + servlets = [] hijack_auth = True @@ -217,7 +223,17 @@ class HomeserverTestCase(TestCase): Function to be overridden in subclasses. """ - raise NotImplementedError() + hs = self.setup_test_homeserver() + return hs + + def default_config(self, name="test"): + """ + Get a default HomeServer config object. + + Args: + name (str): The homeserver name/domain. + """ + return default_config(name) def prepare(self, reactor, clock, homeserver): """ @@ -234,7 +250,9 @@ class HomeserverTestCase(TestCase): Function to optionally be overridden in subclasses. """ - def make_request(self, method, path, content=b""): + def make_request( + self, method, path, content=b"", access_token=None, request=SynapseRequest + ): """ Create a SynapseRequest at the path using the method and containing the given content. @@ -252,7 +270,7 @@ class HomeserverTestCase(TestCase): if isinstance(content, dict): content = json.dumps(content).encode('utf8') - return make_request(method, path, content) + return make_request(method, path, content, access_token, request) def render(self, request): """ @@ -279,3 +297,81 @@ class HomeserverTestCase(TestCase): kwargs = dict(kwargs) kwargs.update(self._hs_args) return setup_test_homeserver(self.addCleanup, *args, **kwargs) + + def pump(self, by=0.0): + """ + Pump the reactor enough that Deferreds will fire. + """ + self.reactor.pump([by] * 100) + + def get_success(self, d): + if not isinstance(d, Deferred): + return d + self.pump() + return self.successResultOf(d) + + def register_user(self, username, password, admin=False): + """ + Register a user. Requires the Admin API be registered. + + Args: + username (bytes/unicode): The user part of the new user. + password (bytes/unicode): The password of the new user. + admin (bool): Whether the user should be created as an admin + or not. + + Returns: + The MXID of the new user (unicode). + """ + self.hs.config.registration_shared_secret = u"shared" + + # Create the user + request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") + self.render(request) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + nonce_str = b"\x00".join([username.encode('utf8'), password.encode('utf8')]) + if admin: + nonce_str += b"\x00admin" + else: + nonce_str += b"\x00notadmin" + want_mac.update(nonce.encode('ascii') + b"\x00" + nonce_str) + want_mac = want_mac.hexdigest() + + body = json.dumps( + { + "nonce": nonce, + "username": username, + "password": password, + "admin": admin, + "mac": want_mac, + } + ) + request, channel = self.make_request( + "POST", "/_matrix/client/r0/admin/register", body.encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + user_id = channel.json_body["user_id"] + return user_id + + def login(self, username, password, device_id=None): + """ + Log in a user, and get an access token. Requires the Login API be + registered. + + """ + body = {"type": "m.login.password", "user": username, "password": password} + if device_id: + body["device_id"] = device_id + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 200) + + access_token = channel.json_body["access_token"].encode('ascii') + return access_token diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 5cbada4eda..50bc7702d2 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -65,7 +65,6 @@ class ExpiringCacheTestCase(unittest.TestCase): def test_time_eviction(self): clock = MockClock() cache = ExpiringCache("test", clock, expiry_ms=1000) - cache.start() cache["key"] = 1 clock.advance_time(0.5) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 4633db77b3..8adaee3c8d 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -159,6 +159,11 @@ class LoggingContextTestCase(unittest.TestCase): self.assertEqual(r, "bum") self._check_test_key("one") + def test_nested_logging_context(self): + with LoggingContext(request="foo"): + nested_context = logcontext.nested_logging_context(suffix="bar") + self.assertEqual(nested_context.request, "foo-bar") + # a function which returns a deferred which has been "called", but # which had a function which returned another incomplete deferred on diff --git a/tests/utils.py b/tests/utils.py index 9f7ff94575..dd347a0c59 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -16,7 +16,9 @@ import atexit import hashlib import os +import time import uuid +import warnings from inspect import getcallargs from mock import Mock, patch @@ -26,11 +28,12 @@ from twisted.internet import defer, reactor from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error +from synapse.config.server import ServerConfig from synapse.federation.transport import server from synapse.http.server import HttpServer from synapse.server import HomeServer -from synapse.storage import PostgresEngine -from synapse.storage.engines import create_engine +from synapse.storage import DataStore +from synapse.storage.engines import PostgresEngine, create_engine from synapse.storage.prepare_database import ( _get_or_create_schema_state, _setup_new_database, @@ -41,6 +44,7 @@ from synapse.util.ratelimitutils import FederationRateLimiter # set this to True to run the tests against postgres instead of sqlite. USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False) +LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False) POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres") POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),) @@ -92,10 +96,77 @@ def setupdb(): atexit.register(_cleanup) +def default_config(name): + """ + Create a reasonable test config. + """ + config = Mock() + config.signing_key = [MockKey()] + config.event_cache_size = 1 + config.enable_registration = True + config.macaroon_secret_key = "not even a little secret" + config.expire_access_token = False + config.server_name = name + config.trusted_third_party_id_servers = [] + config.room_invite_state_types = [] + config.password_providers = [] + config.worker_replication_url = "" + config.worker_app = None + config.email_enable_notifs = False + config.block_non_admin_invites = False + config.federation_domain_whitelist = None + config.federation_rc_reject_limit = 10 + config.federation_rc_sleep_limit = 10 + config.federation_rc_sleep_delay = 100 + config.federation_rc_concurrent = 10 + config.filter_timeline_limit = 5000 + config.user_directory_search_all_users = False + config.user_consent_server_notice_content = None + config.block_events_without_consent_error = None + config.media_storage_providers = [] + config.auto_join_rooms = [] + config.limit_usage_by_mau = False + config.hs_disabled = False + config.hs_disabled_message = "" + config.hs_disabled_limit_type = "" + config.max_mau_value = 50 + config.mau_trial_days = 0 + config.mau_limits_reserved_threepids = [] + config.admin_contact = None + config.rc_messages_per_second = 10000 + config.rc_message_burst_count = 10000 + + config.use_frozen_dicts = False + + # we need a sane default_room_version, otherwise attempts to create rooms will + # fail. + config.default_room_version = "1" + + # disable user directory updates, because they get done in the + # background, which upsets the test runner. + config.update_user_directory = False + + def is_threepid_reserved(threepid): + return ServerConfig.is_threepid_reserved(config, threepid) + + config.is_threepid_reserved.side_effect = is_threepid_reserved + + return config + + +class TestHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + + @defer.inlineCallbacks def setup_test_homeserver( - cleanup_func, name="test", datastore=None, config=None, reactor=None, - homeserverToUse=HomeServer, **kargs + cleanup_func, + name="test", + datastore=None, + config=None, + reactor=None, + homeserverToUse=TestHomeServer, + **kargs ): """ Setup a homeserver suitable for running tests against. Keyword arguments @@ -111,48 +182,8 @@ def setup_test_homeserver( from twisted.internet import reactor if config is None: - config = Mock() - config.signing_key = [MockKey()] - config.event_cache_size = 1 - config.enable_registration = True - config.macaroon_secret_key = "not even a little secret" - config.expire_access_token = False - config.server_name = name - config.trusted_third_party_id_servers = [] - config.room_invite_state_types = [] - config.password_providers = [] - config.worker_replication_url = "" - config.worker_app = None - config.email_enable_notifs = False - config.block_non_admin_invites = False - config.federation_domain_whitelist = None - config.federation_rc_reject_limit = 10 - config.federation_rc_sleep_limit = 10 - config.federation_rc_sleep_delay = 100 - config.federation_rc_concurrent = 10 - config.filter_timeline_limit = 5000 - config.user_directory_search_all_users = False - config.user_consent_server_notice_content = None - config.block_events_without_consent_error = None - config.media_storage_providers = [] - config.auto_join_rooms = [] - config.limit_usage_by_mau = False - config.hs_disabled = False - config.hs_disabled_message = "" - config.hs_disabled_limit_type = "" - config.max_mau_value = 50 - config.mau_limits_reserved_threepids = [] - config.admin_uri = None + config = default_config(name) - # we need a sane default_room_version, otherwise attempts to create rooms will - # fail. - config.default_room_version = "1" - - # disable user directory updates, because they get done in the - # background, which upsets the test runner. - config.update_user_directory = False - - config.use_frozen_dicts = True config.ldap_enabled = False if "clock" not in kargs: @@ -218,22 +249,44 @@ def setup_test_homeserver( else: # We need to do cleanup on PostgreSQL def cleanup(): + import psycopg2 + # Close all the db pools hs.get_db_pool().close() + dropped = False + # Drop the test database db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER ) db_conn.autocommit = True cur = db_conn.cursor() - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() + + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for x in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + cur.close() db_conn.close() - # Register the cleanup hook - cleanup_func(cleanup) + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + + if not LEAVE_DB: + # Register the cleanup hook + cleanup_func(cleanup) hs.setup() else: @@ -307,7 +360,9 @@ class MockHttpResource(HttpServer): @patch('twisted.web.http.Request') @defer.inlineCallbacks - def trigger(self, http_method, path, content, mock_request, federation_auth=False): + def trigger( + self, http_method, path, content, mock_request, federation_auth_origin=None + ): """ Fire an HTTP event. Args: @@ -316,6 +371,7 @@ class MockHttpResource(HttpServer): content : The HTTP body mock_request : Mocked request to pass to the event so it can get content. + federation_auth_origin (bytes|None): domain to authenticate as, for federation Returns: A tuple of (code, response) Raises: @@ -336,8 +392,10 @@ class MockHttpResource(HttpServer): mock_request.getClientIP.return_value = "-" headers = {} - if federation_auth: - headers[b"Authorization"] = [b"X-Matrix origin=test,key=,sig="] + if federation_auth_origin is not None: + headers[b"Authorization"] = [ + b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,) + ] mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) # return the right path if the event requires it @@ -556,16 +614,16 @@ def create_room(hs, room_id, creator_id): event_builder_factory = hs.get_event_builder_factory() event_creation_handler = hs.get_event_creation_handler() - builder = event_builder_factory.new({ - "type": EventTypes.Create, - "state_key": "", - "sender": creator_id, - "room_id": room_id, - "content": {}, - }) - - event, context = yield event_creation_handler.create_new_client_event( - builder + builder = event_builder_factory.new( + { + "type": EventTypes.Create, + "state_key": "", + "sender": creator_id, + "room_id": room_id, + "content": {}, + } ) + event, context = yield event_creation_handler.create_new_client_event(builder) + yield store.persist_event(event, context) diff --git a/tox.ini b/tox.ini index 085f438989..87b5e4782d 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ envlist = packaging, py27, py36, pep8, check_isort [base] deps = coverage - Twisted>=15.1 + Twisted>=17.1 mock python-subunit junitxml @@ -64,49 +64,41 @@ setenv = {[base]setenv} SYNAPSE_POSTGRES = 1 -[testenv:py36] -usedevelop=true +# A test suite for the oldest supported versions of Python libraries, to catch +# any uses of APIs not available in them. +[testenv:py27-old] +skip_install=True +deps = + # Old automat version for Twisted + Automat == 0.3.0 + + mock + lxml commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete - coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/config \ - tests/api/test_filtering.py \ - tests/api/test_ratelimiting.py \ - tests/appservice \ - tests/crypto \ - tests/events \ - tests/handlers/test_appservice.py \ - tests/handlers/test_auth.py \ - tests/handlers/test_device.py \ - tests/handlers/test_directory.py \ - tests/handlers/test_e2e_keys.py \ - tests/handlers/test_presence.py \ - tests/handlers/test_profile.py \ - tests/handlers/test_register.py \ - tests/replication/slave/storage/test_account_data.py \ - tests/replication/slave/storage/test_receipts.py \ - tests/storage/test_appservice.py \ - tests/storage/test_background_update.py \ - tests/storage/test_base.py \ - tests/storage/test__base.py \ - tests/storage/test_client_ips.py \ - tests/storage/test_devices.py \ - tests/storage/test_end_to_end_keys.py \ - tests/storage/test_event_push_actions.py \ - tests/storage/test_keys.py \ - tests/storage/test_presence.py \ - tests/storage/test_profile.py \ - tests/storage/test_registration.py \ - tests/storage/test_room.py \ - tests/storage/test_user_directory.py \ - tests/test_distributor.py \ - tests/test_dns.py \ - tests/test_preview.py \ - tests/test_test_utils.py \ - tests/test_types.py \ - tests/util} \ - {env:TOXSUFFIX:} - {env:DUMP_COVERAGE_COMMAND:coverage report -m} + # Make all greater-thans equals so we test the oldest version of our direct + # dependencies, but make the pyopenssl 17.0, which can work against an + # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis). + /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install' + # Install Synapse itself. This won't update any libraries. + pip install -e . + {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} + +[testenv:py35] +usedevelop=true + +[testenv:py36] +usedevelop=true + +[testenv:py36-postgres] +usedevelop=true +deps = + {[base]deps} + psycopg2 +setenv = + {[base]setenv} + SYNAPSE_POSTGRES = 1 + [testenv:packaging] deps =