diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index ab96387a0a..224cae9216 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Run the export-data command on the sqlite test database python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ @@ -41,7 +41,7 @@ fi # Port the SQLite databse to postgres so we can check command works against postgres echo "+++ Port SQLite3 databse to postgres" -scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # Run the export-data command on postgres database python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index 797904e64c..91bd966f32 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against test database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, +# but coverage seems unable to find the entrypoints installed by `pip install -e .`. +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # We should be able to run twice against the same database. echo "+++ Run synapse_port_db a second time" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml ##### @@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database" # we do this by deleting the sqlite db, and then doing the same again. rm .ci/test_db.db -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. .ci/scripts/postgres_exec.py \ @@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against empty database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml diff --git a/.dockerignore b/.dockerignore index f6c638b0a2..434231fce9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,11 +3,9 @@ # things to include !docker -!scripts !synapse !MANIFEST.in !README.rst !setup.py -!synctl **/__pycache__ diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index eee3633d50..ed4fc6179d 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -31,7 +31,7 @@ jobs: # if we're running from a tag, get the full list of distros; otherwise just use debian:sid dists='["debian:sid"]' if [[ $GITHUB_REF == refs/tags/* ]]; then - dists=$(scripts-dev/build_debian_packages --show-dists-json) + dists=$(scripts-dev/build_debian_packages.py --show-dists-json) fi echo "::set-output name=distros::$dists" # map the step outputs to job outputs @@ -74,7 +74,7 @@ jobs: # see https://github.com/docker/build-push-action/issues/252 # for the cache magic here run: | - ./src/scripts-dev/build_debian_packages \ + ./src/scripts-dev/build_debian_packages.py \ --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \ --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \ --docker-build-arg=--progress=plain \ @@ -112,7 +112,8 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: files: | - python-dist/* + Sdist/* + Wheel/* debs.tar.xz # if it's not already published, keep the release as a draft. draft: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 35e42bdfab..2602fb6648 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,7 +16,8 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install -e . - - run: scripts-dev/generate_sample_config --check + - run: scripts-dev/generate_sample_config.sh --check + - run: scripts-dev/config-lint.sh lint: runs-on: ubuntu-latest @@ -51,7 +52,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 - run: "pip install 'towncrier>=18.6.0rc1'" - - run: scripts-dev/check-newsfragment + - run: scripts-dev/check-newsfragment.sh env: PULL_REQUEST_NUMBER: ${{ github.event.number }} @@ -369,7 +370,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: @@ -380,34 +381,22 @@ jobs: tests-done: if: ${{ always() }} needs: + - check-sampleconfig - lint - lint-crlf - lint-newsfile - trial - trial-olddeps - sytest + - export-data - portdb - complement runs-on: ubuntu-latest steps: - - name: Set build result - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - # the `jq` incantation dumps out a series of " " lines. - # we set it to an intermediate variable to avoid a pipe, which makes it - # hard to set $rc. - run: | - rc=0 - results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT) - while read job result ; do - # The newsfile lint may be skipped on non PR builds - if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then - continue - fi - - if [ "$result" != "success" ]; then - echo "::set-failed ::Job $job returned $result" - rc=1 - fi - done <<< $results - exit $rc + - uses: matrix-org/done-action@v2 + with: + needs: ${{ toJSON(needs) }} + + # The newsfile lint may be skipped on non PR builds + skippable: + lint-newsfile diff --git a/CHANGES.md b/CHANGES.md index ef671e73f1..6618378c06 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,100 @@ +Synapse 1.55.0 (2022-03-22) +=========================== + +This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. + +This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details. + + +Internal Changes +---------------- + +- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260)) + + +Synapse 1.55.0rc1 (2022-03-15) +============================== + +Features +-------- + +- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) +- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) +- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) +- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) + + +Bugfixes +-------- + +- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) +- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) +- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) +- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) + + +Improved Documentation +---------------------- + +- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) +- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) +- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) +- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) +- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) + + +Deprecations and Removals +------------------------- + +- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** +- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) +- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) + + +Internal Changes +---------------- + +- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) +- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) +- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) +- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) +- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) +- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) +- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) +- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) +- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) +- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) +- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) +- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) +- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) +- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) +- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) +- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) +- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) +- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) +- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) +- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) +- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) +- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) +- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) +- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) +- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) +- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) +- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) +- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) +- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) +- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) +- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) +- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) +- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) +- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) + + Synapse 1.54.0 (2022-03-08) =========================== diff --git a/MANIFEST.in b/MANIFEST.in index b789f96b7d..e1047a0e7a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ -include synctl include sytest-blacklist include LICENSE include VERSION @@ -18,7 +17,6 @@ recursive-include synapse/storage *.txt recursive-include synapse/storage *.md recursive-include docs * -recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py @@ -54,7 +52,6 @@ prune contrib prune debian prune demo/etc prune docker -prune snap prune stubs exclude jenkins* diff --git a/README.rst b/README.rst index 4281c87d1f..595fb5ff62 100644 --- a/README.rst +++ b/README.rst @@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports (to stop, you can use `./demo/stop.sh`) +See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html) +for more information. + If you just want to start a single instance of the app and run it directly:: # Create the homeserver.yaml config once diff --git a/snap/snapcraft.yaml b/contrib/snap/snapcraft.yaml similarity index 97% rename from snap/snapcraft.yaml rename to contrib/snap/snapcraft.yaml index 9a01152c15..dd4c8478d5 100644 --- a/snap/snapcraft.yaml +++ b/contrib/snap/snapcraft.yaml @@ -20,7 +20,7 @@ apps: generate-config: command: generate_config generate-signing-key: - command: generate_signing_key.py + command: generate_signing_key register-new-matrix-user: command: register_new_matrix_user plugs: [network] diff --git a/debian/changelog b/debian/changelog index 868d4e32fd..3f05891c15 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,15 @@ +matrix-synapse-py3 (1.55.0) stable; urgency=medium + + * New synapse release 1.55.0. + + -- Synapse Packaging team Tue, 22 Mar 2022 13:59:26 +0000 + +matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium + + * New synapse release 1.55.0~rc1. + + -- Synapse Packaging team Tue, 15 Mar 2022 10:59:31 +0000 + matrix-synapse-py3 (1.54.0) stable; urgency=medium * New synapse release 1.54.0. diff --git a/demo/.gitignore b/demo/.gitignore index 4d12712343..5663aba8e7 100644 --- a/demo/.gitignore +++ b/demo/.gitignore @@ -1,7 +1,4 @@ -*.db -*.log -*.log.* -*.pid - -/media_store.* -/etc +# Ignore all the temporary files from the demo servers. +8080/ +8081/ +8082/ diff --git a/demo/README b/demo/README deleted file mode 100644 index a5a95bd196..0000000000 --- a/demo/README +++ /dev/null @@ -1,26 +0,0 @@ -DO NOT USE THESE DEMO SERVERS IN PRODUCTION - -Requires you to have done: - python setup.py develop - - -The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required. - -To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs -and are configured in a highly insecure way. Do not use these configuration files in production. - -stop.sh will stop the synapse servers and the webclient. - -clean.sh will delete the databases and log files. - -To start a completely new set of servers, run: - - ./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh - - -Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db} - - - -Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name. - diff --git a/demo/clean.sh b/demo/clean.sh index e9b440d90d..7f1e192021 100755 --- a/demo/clean.sh +++ b/demo/clean.sh @@ -4,6 +4,9 @@ set -e DIR="$( cd "$( dirname "$0" )" && pwd )" +# Ensure that the servers are stopped. +$DIR/stop.sh + PID_FILE="$DIR/servers.pid" if [ -f "$PID_FILE" ]; then diff --git a/demo/start.sh b/demo/start.sh index 8ffb14e30a..55e69685e3 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -6,8 +6,6 @@ CWD=$(pwd) cd "$DIR/.." || exit -mkdir -p demo/etc - PYTHONPATH=$(readlink -f "$(pwd)") export PYTHONPATH @@ -21,22 +19,26 @@ for port in 8080 8081 8082; do mkdir -p demo/$port pushd demo/$port || exit - #rm $DIR/etc/$port.config + # Generate the configuration for the homeserver at localhost:848x. python3 -m synapse.app.homeserver \ --generate-config \ - -H "localhost:$https_port" \ - --config-path "$DIR/etc/$port.config" \ + --server-name "localhost:$port" \ + --config-path "$port.config" \ --report-stats no - if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then - # Generate tls keys - openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix" + if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then + # Generate TLS keys. + openssl req -x509 -newkey rsa:4096 \ + -keyout "localhost:$port.tls.key" \ + -out "localhost:$port.tls.crt" \ + -days 365 -nodes -subj "/O=matrix" - # Regenerate configuration + # Add customisations to the configuration. { - printf '\n\n# Customisation made by demo/start.sh\n' + printf '\n\n# Customisation made by demo/start.sh\n\n' echo "public_baseurl: http://localhost:$port/" echo 'enable_registration: true' + echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. # Please don't accidentaly bork me with your fancy settings. @@ -63,38 +65,34 @@ for port in 8080 8081 8082; do echo "${listeners}" - # Disable tls for the servers - printf '\n\n# Disable tls on the servers.' + # Disable TLS for the servers + printf '\n\n# Disable TLS for the servers.' echo '# DO NOT USE IN PRODUCTION' echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' echo 'federation_verify_certificates: false' - # Set tls paths - echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" - echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" + # Set paths for the TLS certificates. + echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" + echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' echo 'trusted_key_servers:' echo ' - server_name: "matrix.org"' echo ' accept_keys_insecurely: true' - - # Reduce the blacklist - blacklist=$(cat <<-BLACK - # Set the blacklist so that it doesn't include 127.0.0.1, ::1 - federation_ip_range_blacklist: - - '10.0.0.0/8' - - '172.16.0.0/12' - - '192.168.0.0/16' - - '100.64.0.0/10' - - '169.254.0.0/16' - - 'fe80::/64' - - 'fc00::/7' - BLACK + echo '' + + # Allow the servers to communicate over localhost. + allow_list=$(cat <<-ALLOW_LIST + # Allow the servers to communicate over localhost. + ip_range_whitelist: + - '127.0.0.1/8' + - '::1/128' + ALLOW_LIST ) - echo "${blacklist}" - } >> "$DIR/etc/$port.config" + echo "${allow_list}" + } >> "$port.config" fi # Check script parameters @@ -141,19 +139,18 @@ for port in 8080 8081 8082; do burst_count: 1000 RC ) - echo "${ratelimiting}" >> "$DIR/etc/$port.config" + echo "${ratelimiting}" >> "$port.config" fi fi - if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then - echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config" - fi - if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then - echo "report_stats: false" >> "$DIR/etc/$port.config" + # Always disable reporting of stats if the option is not there. + if ! grep -F "report_stats" -q "$port.config" ; then + echo "report_stats: false" >> "$port.config" fi + # Run the homeserver in the background. python3 -m synapse.app.homeserver \ - --config-path "$DIR/etc/$port.config" \ + --config-path "$port.config" \ -D \ popd || exit diff --git a/docker/Dockerfile b/docker/Dockerfile index a8bb9b0e7f..24b5515eb9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,8 +46,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY scripts /synapse/scripts/ -COPY MANIFEST.in README.rst setup.py synctl /synapse/ +COPY MANIFEST.in README.rst setup.py /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ef9cabf555..21f80efc99 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -82,6 +82,7 @@ - [Release Cycle](development/releases.md) - [Git Usage](development/git.md) - [Testing]() + - [Demo scripts](development/demo.md) - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) diff --git a/docs/code_style.md b/docs/code_style.md index 4d8e7c973d..e7c9cd1a5e 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -172,6 +172,6 @@ frobber: ``` Note that the sample configuration is generated from the synapse code -and is maintained by a script, `scripts-dev/generate_sample_config`. +and is maintained by a script, `scripts-dev/generate_sample_config.sh`. Making sure that the output from this script matches the desired format is left as an exercise for the reader! diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 8448685952..071202e196 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs. +### Private Sign off + +If you would like to provide your legal name privately to the Matrix.org +Foundation (instead of in a public commit or comment), you can do so +by emailing your legal name and a link to the pull request to +[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off). +It helps to include "sign off" or similar in the subject line. You will then +be instructed further. + +Once private sign off is complete, doing so for future contributions will not +be required. # 10. Turn feedback into better code. diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index a767d3af9f..d996a7caa2 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -158,9 +158,9 @@ same as integers. There are three separate aspects to this: * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in - `scripts/synapse_port_db`. This tells the port script to cast the integer - value from SQLite to a boolean before writing the value to the postgres - database. + `synapse/_scripts/synapse_port_db.py`. This tells the port script to cast + the integer value from SQLite to a boolean before writing the value to the + postgres database. * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not diff --git a/docs/development/demo.md b/docs/development/demo.md new file mode 100644 index 0000000000..4277252ceb --- /dev/null +++ b/docs/development/demo.md @@ -0,0 +1,41 @@ +# Synapse demo setup + +**DO NOT USE THESE DEMO SERVERS IN PRODUCTION** + +Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies). + +The demo setup allows running three federation Synapse servers, with server +names `localhost:8080`, `localhost:8081`, and `localhost:8082`. + +You can access them via any Matrix client over HTTP at `localhost:8080`, +`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`, +`localhost:8481`, and `localhost:8482`. + +To enable the servers to communicate, self-signed SSL certificates are generated +and the servers are configured in a highly insecure way, including: + +* Not checking certificates over federation. +* Not verifying keys. + +The servers are configured to store their data under `demo/8080`, `demo/8081`, and +`demo/8082`. This includes configuration, logs, SQLite databases, and media. + +Note that when joining a public room on a different HS via "#foo:bar.net", then +you are (in the current impl) joining a room with room_id "foo". This means that +it won't work if your HS already has a room with that name. + +## Using the demo scripts + +There's three main scripts with straightforward purposes: + +* `start.sh` will start the Synapse servers, generating any missing configuration. + * This accepts a single parameter `--no-rate-limit` to "disable" rate limits + (they actually still exist, but are very high). +* `stop.sh` will stop the Synapse servers. +* `clean.sh` will delete the configuration, databases, log files, etc. + +To start a completely new set of servers, run: + +```sh +./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh +``` diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md index cbc7cf2949..3eb4d5acc4 100644 --- a/docs/development/room-dag-concepts.md +++ b/docs/development/room-dag-concepts.md @@ -30,37 +30,72 @@ rather than skipping any that arrived late; whereas if you're looking at a historical section of timeline (i.e. `/messages`), you want to see the best representation of the state of the room as others were seeing it at the time. +## Outliers -## Forward extremity +We mark an event as an `outlier` when we haven't figured out the state for the +room at that point in the DAG yet. They are "floating" events that we haven't +yet correlated to the DAG. -Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet. +Outliers typically arise when we fetch the auth chain or state for a given +event. When that happens, we just grab the events in the state/auth chain, +without calculating the state at those events, or backfilling their +`prev_events`. -The forward extremities of a room are used as the `prev_events` when the next event is sent. +So, typically, we won't have the `prev_events` of an `outlier` in the database, +(though it's entirely possible that we *might* have them for some other +reason). Other things that make outliers different from regular events: + * We don't have state for them, so there should be no entry in + `event_to_state_groups` for an outlier. (In practice this isn't always + the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201). -## Backward extremity + * We don't record entries for them in the `event_edges`, + `event_forward_extremeties` or `event_backward_extremities` tables. -The current marker of where we have backfilled up to and will generally be the -`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when -backfilling history. +Since outliers are not tied into the DAG, they do not normally form part of the +timeline sent down to clients via `/sync` or `/messages`; however there is an +exception: -When we persist a non-outlier event, we clear it as a backward extremity and set -all of its `prev_events` as the new backward extremities if they aren't already -persisted in the `events` table. +### Out-of-band membership events +A special case of outlier events are some membership events for federated rooms +that we aren't full members of. For example: -## Outliers + * invites received over federation, before we join the room + * *rejections* for said invites + * knock events for rooms that we would like to join but have not yet joined. -We mark an event as an `outlier` when we haven't figured out the state for the -room at that point in the DAG yet. +In all the above cases, we don't have the state for the room, which is why they +are treated as outliers. They are a bit special though, in that they are +proactively sent to clients via `/sync`. -We won't *necessarily* have the `prev_events` of an `outlier` in the database, -but it's entirely possible that we *might*. +## Forward extremity + +Most-recent-in-time events in the DAG which are not referenced by any other +events' `prev_events` yet. (In this definition, outliers, rejected events, and +soft-failed events don't count.) + +The forward extremities of a room (or at least, a subset of them, if there are +more than ten) are used as the `prev_events` when the next event is sent. + +The "current state" of a room (ie: the state which would be used if we +generated a new event) is, therefore, the resolution of the room states +at each of the forward extremities. + +## Backward extremity + +The current marker of where we have backfilled up to and will generally be the +`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when +backfilling history. -For example, when we fetch the event auth chain or state for a given event, we -mark all of those claimed auth events as outliers because we haven't done the -state calculation ourself. +Note that, unlike forward extremities, we typically don't have any backward +extremity events themselves in the database - or, if we do, they will be "outliers" (see +above). Either way, we don't expect to have the room state at a backward extremity. +When we persist a non-outlier event, if it was previously a backward extremity, +we clear it as a backward extremity and set all of its `prev_events` as the new +backward extremities if they aren't already persisted as non-outliers. This +therefore keeps the backward extremities up-to-date. ## State groups diff --git a/docs/federate.md b/docs/federate.md index 5107f995be..df4c87da51 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -63,4 +63,5 @@ release of Synapse. If you want to get up and running quickly with a trio of homeservers in a private federation, there is a script in the `demo` directory. This is mainly -useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/). +useful just for development purposes. See +[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html). diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index 09ac838107..1d3c39967f 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c If multiple modules implement this callback, Synapse runs them all in order. +### `check_can_shutdown_room` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_shutdown_room( + user_id: str, room_id: str, +) -> bool: +``` + +Called when an admin user requests the shutdown of a room. The module must return a +boolean indicating whether the shutdown can go through. If the callback returns `False`, +the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + +### `check_can_deactivate_user` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_deactivate_user( + user_id: str, by_admin: bool, +) -> bool: +``` + +Called when the deactivation of a user is requested. User deactivation can be +performed by an admin or the user themselves, so developers are encouraged to check the +requester when implementing this callback. The module must return a +boolean indicating whether the deactivation can go through. If the callback returns `False`, +the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error. + +The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + + ### `on_profile_update` _First introduced in Synapse v1.54.0_ diff --git a/docs/other/running_synapse_on_single_board_computers.md b/docs/other/running_synapse_on_single_board_computers.md index ea14afa8b2..dcf96f0056 100644 --- a/docs/other/running_synapse_on_single_board_computers.md +++ b/docs/other/running_synapse_on_single_board_computers.md @@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml. ``` -# Set to false to disable presence tracking on this homeserver. +# Disable presence tracking, which is currently fairly resource intensive +# More info: https://github.com/matrix-org/synapse/issues/9478 use_presence: false -# When this is enabled, the room "complexity" will be checked before a user -# joins a new remote room. If it is above the complexity limit, the server will -# disallow joining, or will instantly leave. +# Set a small complexity limit, preventing users from joining large rooms +# which may be resource-intensive to remain a part of. +# +# Note that this will not prevent users from joining smaller rooms that +# eventually become complex. limit_remote_rooms: - # Uncomment to enable room complexity checking. - #enabled: true + enabled: true complexity: 3.0 # Database configuration database: + # Use postgres for the best performance name: psycopg2 args: user: matrix-synapse - # Generate a long, secure one with a password manager + # Generate a long, secure password using a password manager password: hunter2 database: matrix-synapse host: localhost - cp_min: 5 - cp_max: 10 ``` Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this: diff --git a/docs/postgres.md b/docs/postgres.md index 0562021da5..de4e2ba4b7 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the copy is complete, restart synapse. For instance: ```sh -./synctl stop +synctl stop cp homeserver.db homeserver.db.snapshot -./synctl start +synctl start ``` Copy the old config file into a new config file: @@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the PostgreSQL database configuration file `homeserver-postgres.yaml`: ```sh -./synctl stop +synctl stop mv homeserver.yaml homeserver-old-sqlite.yaml mv homeserver-postgres.yaml homeserver.yaml -./synctl start +synctl start ``` Synapse should now be running against PostgreSQL. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 9f21fc960d..ae826765d4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2008,8 +2008,14 @@ saml2_config: # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their -# own username (see 'sso_auth_account_details.html' in the 'sso' -# section of this file). +# own username (see the documentation for the +# 'sso_auth_account_details.html' template). This template can +# use the 'localpart_from_email' filter. +# +# confirm_localpart: Whether to prompt the user to validate (or +# change) the generated localpart (see the documentation for the +# 'sso_auth_account_details.html' template), instead of +# registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. @@ -2796,3 +2802,35 @@ redis: # Optional password if configured on the Redis instance # #password: + + +## Background Updates ## + +# Background updates are database updates that are run in the background in batches. +# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to +# sleep can all be configured. This is helpful to speed up or slow down the updates. +# +background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 diff --git a/docs/templates.md b/docs/templates.md index 2b66e9d862..f87692a453 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver' Example: `message.sender_avatar_url|mxc_to_http(32,32)` +```python +localpart_from_email(address: str) -> str +``` + +Returns the local part of an email address (e.g. `alice` in `alice@example.com`). + +Example: `user.email_address|localpart_from_email` ## Email templates @@ -176,8 +183,11 @@ Below are the templates Synapse will look for when generating pages related to S for the brand of the IdP * `user_attributes`: an object containing details about the user that we received from the IdP. May have the following attributes: - * display_name: the user's display_name - * emails: a list of email addresses + * `display_name`: the user's display name + * `emails`: a list of email addresses + * `localpart`: the local part of the Matrix user ID to register, + if `localpart_template` is set in the mapping provider configuration (empty + string if not) The template should render a form which submits the following fields: * `username`: the localpart of the user's chosen user id * `sso_new_user_consent.html`: HTML page allowing the user to consent to the diff --git a/docs/turn-howto.md b/docs/turn-howto.md index eba7ca6124..3a2cd04e36 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse: * If you use synctl: ```sh - cd /where/you/run/synapse - ./synctl restart + # Depending on how Synapse is installed, synctl may already be on + # your PATH. If not, you may need to activate a virtual environment. + synctl restart ``` * If you use systemd: ```sh diff --git a/docs/upgrade.md b/docs/upgrade.md index f9be3ac6bc..f9ac605e7b 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -47,7 +47,7 @@ this document. 3. Restart Synapse: ```bash - ./synctl restart + synctl restart ``` To check whether your update was successful, you can check the running @@ -85,6 +85,49 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.56.0 + +## Groups/communities feature has been deprecated + +The non-standard groups/communities feature in Synapse has been deprecated and will +be disabled by default in Synapse v1.58.0. + +You can test disabling it by adding the following to your homeserver configuration: + +```yaml +experimental_features: + groups_enabled: false +``` + +# Upgrading to v1.55.0 + +## `synctl` script has been moved + +The `synctl` script +[has been made](https://github.com/matrix-org/synapse/pull/12140) an +[entry point](https://packaging.python.org/en/latest/specifications/entry-points/) +and no longer exists at the root of Synapse's source tree. If you wish to use +`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g. +`synctl start` instead of `./synctl start` or `/path/to/synctl start`. + +You will need to ensure `synctl` is on your `PATH`. + - This is automatically the case when using + [Debian packages](https://packages.matrix.org/debian/) or + [docker images](https://hub.docker.com/r/matrixdotorg/synapse) + provided by Matrix.org. + - When installing from a wheel, sdist, or PyPI, a `synctl` executable is added + to your Python installation's `bin`. This should be on your `PATH` + automatically, though you might need to activate a virtual environment + depending on how you installed Synapse. + + +## Compatibility dropped for Mjolnir 1.3.1 and earlier + +Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier. +If you use the Mjolnir module to moderate your homeserver, +please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse. + + # Upgrading to v1.54.0 ## Legacy structured logging configuration removal diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 2fca96f8be..3cbedc5dfa 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` A new server admin user can also be created using the `register_new_matrix_user` -command. This is a script that is located in the `scripts/` directory, or possibly +command. This is a script that is distributed as part of synapse. It is possibly already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. diff --git a/docs/workers.md b/docs/workers.md index b0f8599ef0..8751134e65 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -351,8 +351,11 @@ is only supported with Redis-based replication.) To enable this, the worker must have a HTTP replication listener configured, have a `worker_name` and be listed in the `instance_map` config. The same worker -can handle multiple streams. For example, to move event persistence off to a -dedicated worker, the shared configuration would include: +can handle multiple streams, but unless otherwise documented, each stream can only +have a single writer. + +For example, to move event persistence off to a dedicated worker, the shared +configuration would include: ```yaml instance_map: @@ -370,8 +373,8 @@ streams and the endpoints associated with them: ##### The `events` stream -The `events` stream also experimentally supports having multiple writers, where -work is sharded between them by room ID. Note that you *must* restart all worker +The `events` stream experimentally supports having multiple writers, where work +is sharded between them by room ID. Note that you *must* restart all worker instances when adding or removing event persisters. An example `stream_writers` configuration with multiple writers: @@ -384,38 +387,38 @@ stream_writers: ##### The `typing` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `typing` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `typing` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing ##### The `to_device` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `to_device` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `to_device` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ ##### The `account_data` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `account_data` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `account_data` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data ##### The `receipts` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `receipts` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `receipts` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers ##### The `presence` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `presence` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ diff --git a/mypy.ini b/mypy.ini index 38ff787609..f9c39fcaae 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,7 +11,7 @@ local_partial_types = True no_implicit_optional = True files = - scripts-dev/sign_json, + scripts-dev/, setup.py, synapse/, tests/ @@ -23,6 +23,20 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |scripts-dev/build_debian_packages.py + |scripts-dev/check_signature.py + |scripts-dev/definitions.py + |scripts-dev/federation_client.py + |scripts-dev/hash_history.py + |scripts-dev/list_url_patterns.py + |scripts-dev/release.py + |scripts-dev/tail-synapse.py + + |synapse/_scripts/export_signing_key.py + |synapse/_scripts/move_remote_media_to_new_store.py + |synapse/_scripts/synapse_port_db.py + |synapse/_scripts/update_synapse_database.py + |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py @@ -74,17 +88,8 @@ exclude = (?x) |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py - |tests/rest/client/test_account.py - |tests/rest/client/test_filter.py - |tests/rest/client/test_report_event.py - |tests/rest/client/test_rooms.py - |tests/rest/client/test_third_party_rules.py |tests/rest/client/test_transactions.py - |tests/rest/client/test_typing.py - |tests/rest/key/v2/test_remote_key_resource.py - |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py - |tests/rest/media/v1/test_url_preview.py |tests/scripts/test_new_matrix_user.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py @@ -246,10 +251,7 @@ disallow_untyped_defs = True [mypy-tests.storage.test_user_directory] disallow_untyped_defs = True -[mypy-tests.rest.admin.*] -disallow_untyped_defs = True - -[mypy-tests.rest.client.*] +[mypy-tests.rest.*] disallow_untyped_defs = True [mypy-tests.federation.transport.test_client] @@ -350,3 +352,6 @@ ignore_missing_imports = True [mypy-zope] ignore_missing_imports = True + +[mypy-incremental.*] +ignore_missing_imports = True diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages.py similarity index 100% rename from scripts-dev/build_debian_packages rename to scripts-dev/build_debian_packages.py diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment.sh similarity index 100% rename from scripts-dev/check-newsfragment rename to scripts-dev/check-newsfragment.sh diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 0aecb3daf1..0a79a4063f 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -71,4 +71,4 @@ fi # Run the tests! echo "Images built; running complement" -go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... +go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config deleted file mode 100755 index 4cd1d1d5b8..0000000000 --- a/scripts-dev/generate_sample_config +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# -# Update/check the docs/sample_config.yaml - -set -e - -cd "$(dirname "$0")/.." - -SAMPLE_CONFIG="docs/sample_config.yaml" -SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" - -check() { - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1 -} - -if [ "$1" == "--check" ]; then - diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 - exit 1 - } - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 - exit 1 - } -else - ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" - ./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG" -fi diff --git a/scripts-dev/generate_sample_config.sh b/scripts-dev/generate_sample_config.sh new file mode 100755 index 0000000000..375897eacb --- /dev/null +++ b/scripts-dev/generate_sample_config.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Update/check the docs/sample_config.yaml + +set -e + +cd "$(dirname "$0")/.." + +SAMPLE_CONFIG="docs/sample_config.yaml" +SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" + +check() { + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 +} + +if [ "$1" == "--check" ]; then + diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { + echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 + exit 1 + } + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { + echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 + exit 1 + } +else + synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" + synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" +fi diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index b6554a73c1..c063fafa97 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -84,17 +84,8 @@ else files=( "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them - "scripts/export_signing_key" - "scripts/generate_config" - "scripts/generate_log_config" - "scripts/hash_password" - "scripts/register_new_matrix_user" - "scripts/synapse_port_db" - "scripts/update_synapse_database" "scripts-dev" - "scripts-dev/build_debian_packages" - "scripts-dev/sign_json" - "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" + "contrib" "setup.py" "synmark" "stubs" ".ci" ) fi fi diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index c3c90f4ec6..f0e22d4ca2 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG" +synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." @@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then # No coverage needed - scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" else # Coverage desired - coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" fi # Delete schema_version, applied_schema_deltas and applied_module_schemas tables diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json.py similarity index 100% rename from scripts-dev/sign_json rename to scripts-dev/sign_json.py diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user deleted file mode 100755 index 00104b9d62..0000000000 --- a/scripts/register_new_matrix_user +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.register_new_matrix_user import main - -if __name__ == "__main__": - main() diff --git a/scripts/synapse_review_recent_signups b/scripts/synapse_review_recent_signups deleted file mode 100755 index a36d46e14c..0000000000 --- a/scripts/synapse_review_recent_signups +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.review_recent_signups import main - -if __name__ == "__main__": - main() diff --git a/scripts/sync_room_to_group.pl b/scripts/sync_room_to_group.pl deleted file mode 100755 index f0c2dfadfa..0000000000 --- a/scripts/sync_room_to_group.pl +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use JSON::XS; -use LWP::UserAgent; -use URI::Escape; - -if (@ARGV < 4) { - die "usage: $0 \n"; -} - -my ($hs, $access_token, $room_id, $group_id) = @ARGV; -my $ua = LWP::UserAgent->new(); -$ua->timeout(10); - -if ($room_id =~ /^#/) { - $room_id = uri_escape($room_id); - $room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id}; -} - -my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ]; -my $group_users = [ - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}), - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}), -]; - -die "refusing to sync from empty room" unless (@$room_users); -die "refusing to sync to empty group" unless (@$group_users); - -my $diff = {}; -foreach my $user (@$room_users) { $diff->{$user}++ } -foreach my $user (@$group_users) { $diff->{$user}-- } - -foreach my $user (keys %$diff) { - if ($diff->{$user} == 1) { - warn "inviting $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } - elsif ($diff->{$user} == -1) { - warn "removing $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } -} diff --git a/setup.py b/setup.py index 26f4650348..439ed75d72 100755 --- a/setup.py +++ b/setup.py @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import glob import os from typing import Any, Dict @@ -153,8 +152,20 @@ def exec_file(path_segments): python_requires="~=3.7", entry_points={ "console_scripts": [ + # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + "synctl = synapse._scripts.synctl:main", + # Scripts + "export_signing_key = synapse._scripts.export_signing_key:main", + "generate_config = synapse._scripts.generate_config:main", + "generate_log_config = synapse._scripts.generate_log_config:main", + "generate_signing_key = synapse._scripts.generate_signing_key:main", + "hash_password = synapse._scripts.hash_password:main", + "register_new_matrix_user = synapse._scripts.register_new_matrix_user:main", + "synapse_port_db = synapse._scripts.synapse_port_db:main", + "synapse_review_recent_signups = synapse._scripts.review_recent_signups:main", + "update_synapse_database = synapse._scripts.update_synapse_database:main", ] }, classifiers=[ @@ -167,6 +178,5 @@ def exec_file(path_segments): "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"] + glob.glob("scripts/*"), cmdclass={"test": TestCommand}, ) diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 429234d7ae..2d8ca018fb 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -20,7 +20,7 @@ from twisted.internet import protocol from twisted.internet.defer import Deferred class RedisProtocol(protocol.Protocol): - def publish(self, channel: str, message: bytes): ... + def publish(self, channel: str, message: bytes) -> "Deferred[None]": ... def ping(self) -> "Deferred[None]": ... def set( self, @@ -52,11 +52,14 @@ def lazyConnection( convertNumbers: bool = ..., ) -> RedisProtocol: ... -class ConnectionHandler: ... +# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies +# most methods to it via ConnectionHandler.__getattr__. +class ConnectionHandler(RedisProtocol): + def disconnect(self) -> "Deferred[None]": ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool - handler: RedisProtocol + handler: ConnectionHandler pool: List[RedisProtocol] replyTimeout: Optional[int] def __init__( diff --git a/synapse/__init__.py b/synapse/__init__.py index c6727024f0..f0f224d0bb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -25,6 +25,27 @@ print("Synapse requires Python 3.7 or above.") sys.exit(1) +# Allow using the asyncio reactor via env var. +if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)): + try: + from incremental import Version + + import twisted + + # We need a bugfix that is included in Twisted 21.2.0: + # https://twistedmatrix.com/trac/ticket/9787 + if twisted.version < Version("Twisted", 21, 2, 0): + print("Using asyncio reactor requires Twisted>=21.2.0") + sys.exit(1) + + import asyncio + + from twisted.internet import asyncioreactor + + asyncioreactor.install(asyncio.get_event_loop()) + except ImportError: + pass + # Twisted and canonicaljson will fail to import when this file is executed to # get the __version__ during a fresh install. That's OK and subsequent calls to # actually start Synapse will import these libraries fine. @@ -47,7 +68,7 @@ except ImportError: pass -__version__ = "1.54.0" +__version__ = "1.55.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/scripts/export_signing_key b/synapse/_scripts/export_signing_key.py similarity index 99% rename from scripts/export_signing_key rename to synapse/_scripts/export_signing_key.py index bf0139bd64..3d254348f1 100755 --- a/scripts/export_signing_key +++ b/synapse/_scripts/export_signing_key.py @@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): ) -if __name__ == "__main__": +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -85,7 +85,6 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): else format_plain ) - keys = [] for file in args.key_file: try: res = read_signing_keys(file) @@ -98,3 +97,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): res = [] for key in res: formatter(get_verify_key(key)) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_config b/synapse/_scripts/generate_config.py similarity index 98% rename from scripts/generate_config rename to synapse/_scripts/generate_config.py index 931b40c045..75fce20b12 100755 --- a/scripts/generate_config +++ b/synapse/_scripts/generate_config.py @@ -6,7 +6,8 @@ from synapse.config.homeserver import HomeServerConfig -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( "--config-dir", @@ -76,3 +77,7 @@ shutil.copyfileobj(args.header_file, args.output_file) args.output_file.write(conf) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_log_config b/synapse/_scripts/generate_log_config.py similarity index 98% rename from scripts/generate_log_config rename to synapse/_scripts/generate_log_config.py index e72a0dafb7..82fc763140 100755 --- a/scripts/generate_log_config +++ b/synapse/_scripts/generate_log_config.py @@ -19,7 +19,8 @@ from synapse.config.logger import DEFAULT_LOG_CONFIG -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -42,3 +43,7 @@ out = args.output_file out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) out.flush() + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_signing_key.py b/synapse/_scripts/generate_signing_key.py similarity index 97% rename from scripts/generate_signing_key.py rename to synapse/_scripts/generate_signing_key.py index 07df25a809..bc26d25bfd 100755 --- a/scripts/generate_signing_key.py +++ b/synapse/_scripts/generate_signing_key.py @@ -19,7 +19,8 @@ from synapse.util.stringutils import random_string -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -34,3 +35,7 @@ key_id = "a_" + random_string(4) key = (generate_signing_key(key_id),) write_signing_keys(args.output_file, key) + + +if __name__ == "__main__": + main() diff --git a/scripts/hash_password b/synapse/_scripts/hash_password.py similarity index 96% rename from scripts/hash_password rename to synapse/_scripts/hash_password.py index 1d6fb0d700..708640c7de 100755 --- a/scripts/hash_password +++ b/synapse/_scripts/hash_password.py @@ -8,9 +8,6 @@ import bcrypt import yaml -bcrypt_rounds = 12 -password_pepper = "" - def prompt_for_pass(): password = getpass.getpass("Password: ") @@ -26,7 +23,10 @@ def prompt_for_pass(): return password -if __name__ == "__main__": +def main(): + bcrypt_rounds = 12 + password_pepper = "" + parser = argparse.ArgumentParser( description=( "Calculate the hash of a new password, so that passwords can be reset" @@ -77,3 +77,7 @@ def prompt_for_pass(): ).decode("ascii") print(hashed) + + +if __name__ == "__main__": + main() diff --git a/scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py similarity index 97% rename from scripts/move_remote_media_to_new_store.py rename to synapse/_scripts/move_remote_media_to_new_store.py index 875aa4781f..9667d95dfe 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -28,7 +28,7 @@ To use, pipe the above into:: - PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py + PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py """ import argparse diff --git a/scripts/synapse_port_db b/synapse/_scripts/synapse_port_db.py similarity index 99% rename from scripts/synapse_port_db rename to synapse/_scripts/synapse_port_db.py index 2586c9e6c5..720bcfbe7e 100755 --- a/scripts/synapse_port_db +++ b/synapse/_scripts/synapse_port_db.py @@ -1148,7 +1148,7 @@ def set_state(self, state): ############################################## -if __name__ == "__main__": +def main(): parser = argparse.ArgumentParser( description="A script to port an existing synapse SQLite database to" " a new PostgreSQL database." @@ -1253,3 +1253,7 @@ def run(): sys.stderr.write(end_error) sys.exit(5) + + +if __name__ == "__main__": + main() diff --git a/synctl b/synapse/_scripts/synctl.py similarity index 100% rename from synctl rename to synapse/_scripts/synctl.py diff --git a/scripts/update_synapse_database b/synapse/_scripts/update_synapse_database.py similarity index 100% rename from scripts/update_synapse_database rename to synapse/_scripts/update_synapse_database.py diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 36ace7c613..b0c08a074d 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -178,7 +178,9 @@ class RelationTypes: ANNOTATION: Final = "m.annotation" REPLACE: Final = "m.replace" REFERENCE: Final = "m.reference" - THREAD: Final = "io.element.thread" + THREAD: Final = "m.thread" + # TODO Remove this in Synapse >= v1.57.0. + UNSTABLE_THREAD: Final = "io.element.thread" class LimitBlockingTypes: diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index cb532d7238..27e97d6f37 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -88,7 +88,9 @@ "org.matrix.labels": {"type": "array", "items": {"type": "string"}}, "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}}, # MSC3440, filtering by event relations. + "related_by_senders": {"type": "array", "items": {"type": "string"}}, "io.element.relation_senders": {"type": "array", "items": {"type": "string"}}, + "related_by_rel_types": {"type": "array", "items": {"type": "string"}}, "io.element.relation_types": {"type": "array", "items": {"type": "string"}}, }, } @@ -318,19 +320,18 @@ def __init__(self, hs: "HomeServer", filter_json: JsonDict): self.labels = filter_json.get("org.matrix.labels", None) self.not_labels = filter_json.get("org.matrix.not_labels", []) - # Ideally these would be rejected at the endpoint if they were provided - # and not supported, but that would involve modifying the JSON schema - # based on the homeserver configuration. + self.related_by_senders = self.filter_json.get("related_by_senders", None) + self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None) + + # Fallback to the unstable prefix if the stable version is not given. if hs.config.experimental.msc3440_enabled: - self.relation_senders = self.filter_json.get( + self.related_by_senders = self.related_by_senders or self.filter_json.get( "io.element.relation_senders", None ) - self.relation_types = self.filter_json.get( - "io.element.relation_types", None + self.related_by_rel_types = ( + self.related_by_rel_types + or self.filter_json.get("io.element.relation_types", None) ) - else: - self.relation_senders = None - self.relation_types = None def filters_all_types(self) -> bool: return "*" in self.not_types @@ -461,7 +462,7 @@ async def _check_event_relations( event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined] event_ids_to_keep = set( await self._store.events_have_relations( - event_ids, self.relation_senders, self.relation_types + event_ids, self.related_by_senders, self.related_by_rel_types ) ) @@ -474,7 +475,7 @@ async def _check_event_relations( async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: result = [event for event in events if self._check(event)] - if self.relation_senders or self.relation_types: + if self.related_by_senders or self.related_by_rel_types: return await self._check_event_relations(result) return result diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1536a42723..b6f510ed30 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -322,7 +322,8 @@ def _listen_http(self, listener_config: ListenerConfig) -> None: presence.register_servlets(self, resource) - groups.register_servlets(self, resource) + if self.config.experimental.groups_enabled: + groups.register_servlets(self, resource) resources.update({CLIENT_API_PREFIX: resource}) @@ -417,7 +418,7 @@ def start_listening(self) -> None: else: logger.warning("Unsupported listener type: %s", listener.type) - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) def start(config_options: List[str]) -> None: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a6789a840e..e4dc04c0b4 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -273,7 +273,7 @@ def start_listening(self) -> None: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) for listener in self.config.server.listeners: if listener.type == "http": diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 4d3f8e4923..07ec95f1d6 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,27 +175,14 @@ def _is_exclusive(self, namespace_key: str, test_string: str) -> bool: return namespace.exclusive return False - async def _matches_user(self, event: EventBase, store: "DataStore") -> bool: - if self.is_interested_in_user(event.sender): - return True - - # also check m.room.member state key - if event.type == EventTypes.Member and self.is_interested_in_user( - event.state_key - ): - return True - - does_match = await self.matches_user_in_member_list(event.room_id, store) - return does_match - @cached(num_args=1, cache_context=True) - async def matches_user_in_member_list( + async def _matches_user_in_member_list( self, room_id: str, store: "DataStore", cache_context: _CacheContext, ) -> bool: - """Check if this service is interested a room based upon it's membership + """Check if this service is interested a room based upon its membership Args: room_id: The room to check. @@ -214,47 +201,110 @@ async def matches_user_in_member_list( return True return False - def _matches_room_id(self, event: EventBase) -> bool: - if hasattr(event, "room_id"): - return self.is_interested_in_room(event.room_id) - return False + def is_interested_in_user( + self, + user_id: str, + ) -> bool: + """ + Returns whether the application is interested in a given user ID. + + The appservice is considered to be interested in a user if either: the + user ID is in the appservice's user namespace, or if the user is the + appservice's configured sender_localpart. + + Args: + user_id: The ID of the user to check. + + Returns: + True if the application service is interested in the user, False if not. + """ + return ( + # User is the appservice's sender_localpart user + user_id == self.sender + # User is in the appservice's user namespace + or self.is_user_in_namespace(user_id) + ) + + @cached(num_args=1, cache_context=True) + async def is_interested_in_room( + self, + room_id: str, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: + """ + Returns whether the application service is interested in a given room ID. + + The appservice is considered to be interested in the room if either: the ID or one + of the aliases of the room is in the appservice's room ID or alias namespace + respectively, or if one of the members of the room fall into the appservice's user + namespace. - async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool: - alias_list = await store.get_aliases_for_room(event.room_id) + Args: + room_id: The ID of the room to check. + store: The homeserver's datastore class. + + Returns: + True if the application service is interested in the room, False if not. + """ + # Check if we have interest in this room ID + if self.is_room_id_in_namespace(room_id): + return True + + # likewise with the room's aliases (if it has any) + alias_list = await store.get_aliases_for_room(room_id) for alias in alias_list: - if self.is_interested_in_alias(alias): + if self.is_room_alias_in_namespace(alias): return True - return False + # And finally, perform an expensive check on whether any of the + # users in the room match the appservice's user namespace + return await self._matches_user_in_member_list( + room_id, store, on_invalidate=cache_context.invalidate + ) - async def is_interested(self, event: EventBase, store: "DataStore") -> bool: + @cached(num_args=1, cache_context=True) + async def is_interested_in_event( + self, + event_id: str, + event: EventBase, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: """Check if this service is interested in this event. Args: + event_id: The ID of the event to check. This is purely used for simplifying the + caching of calls to this method. event: The event to check. store: The datastore to query. Returns: - True if this service would like to know about this event. + True if this service would like to know about this event, otherwise False. """ - # Do cheap checks first - if self._matches_room_id(event): + # Check if we're interested in this event's sender by namespace (or if they're the + # sender_localpart user) + if self.is_interested_in_user(event.sender): return True - # This will check the namespaces first before - # checking the store, so should be run before _matches_aliases - if await self._matches_user(event, store): + # additionally, if this is a membership event, perform the same checks on + # the user it references + if event.type == EventTypes.Member and self.is_interested_in_user( + event.state_key + ): return True - # This will check the store, so should be run last - if await self._matches_aliases(event, store): + # This will check the datastore, so should be run last + if await self.is_interested_in_room( + event.room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - @cached(num_args=1) + @cached(num_args=1, cache_context=True) async def is_interested_in_presence( - self, user_id: UserID, store: "DataStore" + self, user_id: UserID, store: "DataStore", cache_context: _CacheContext ) -> bool: """Check if this service is interested a user's presence @@ -272,20 +322,19 @@ async def is_interested_in_presence( # Then find out if the appservice is interested in any of those rooms for room_id in room_ids: - if await self.matches_user_in_member_list(room_id, store): + if await self.is_interested_in_room( + room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - def is_interested_in_user(self, user_id: str) -> bool: - return ( - bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - or user_id == self.sender - ) + def is_user_in_namespace(self, user_id: str) -> bool: + return bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - def is_interested_in_alias(self, alias: str) -> bool: + def is_room_alias_in_namespace(self, alias: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias)) - def is_interested_in_room(self, room_id: str) -> bool: + def is_room_id_in_namespace(self, room_id: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id)) def is_exclusive_user(self, user_id: str) -> bool: diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index a0ea958af6..98fe354014 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -25,7 +25,7 @@ TransactionUnusedFallbackKeys, ) from synapse.events import EventBase -from synapse.events.utils import serialize_event +from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -321,16 +321,18 @@ def _serialize( serialize_event( e, time_now, - as_client_event=True, - # If this is an invite or a knock membership event, and we're interested - # in this user, then include any stripped state alongside the event. - include_stripped_room_state=( - e.type == EventTypes.Member - and ( - e.membership == Membership.INVITE - or e.membership == Membership.KNOCK - ) - and service.is_interested_in_user(e.state_key) + config=SerializeEventConfig( + as_client_event=True, + # If this is an invite or a knock membership event, and we're interested + # in this user, then include any stripped state alongside the event. + include_stripped_room_state=( + e.type == EventTypes.Member + and ( + e.membership == Membership.INVITE + or e.membership == Membership.KNOCK + ) + and service.is_interested_in_user(e.state_key) + ), ), ) for e in events diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1265738dc1..8e19e2fc26 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -383,7 +383,7 @@ def generate_config( Build a default configuration file This is used when the user explicitly asks us to generate a config file - (eg with --generate_config). + (eg with --generate-config). Args: config_dir_path: The path where the config files are kept. Used to diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 1eb5f5a68c..363d8b4554 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -19,6 +19,7 @@ from synapse.config import ( api, appservice, auth, + background_updates, cache, captcha, cas, @@ -113,6 +114,7 @@ class RootConfig: caches: cache.CacheConfig federation: federation.FederationConfig retention: retention.RetentionConfig + background_updates: background_updates.BackgroundUpdateConfig config_classes: List[Type["Config"]] = ... def __init__(self) -> None: ... diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py new file mode 100644 index 0000000000..f6cdeacc4b --- /dev/null +++ b/synapse/config/background_updates.py @@ -0,0 +1,68 @@ +# Copyright 2022 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config + + +class BackgroundUpdateConfig(Config): + section = "background_updates" + + def generate_config_section(self, **kwargs) -> str: + return """\ + ## Background Updates ## + + # Background updates are database updates that are run in the background in batches. + # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to + # sleep can all be configured. This is helpful to speed up or slow down the updates. + # + background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 + """ + + def read_config(self, config, **kwargs) -> None: + bg_update_config = config.get("background_updates") or {} + + self.update_duration_ms = bg_update_config.get( + "background_update_duration_ms", 100 + ) + + self.sleep_enabled = bg_update_config.get("sleep_enabled", True) + + self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000) + + self.min_batch_size = bg_update_config.get("min_batch_size", 1) + + self.default_batch_size = bg_update_config.get("default_batch_size", 100) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 41338b39df..064db4487c 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -74,3 +74,6 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) + + # The deprecated groups feature. + self.groups_enabled: bool = experimental.get("groups_enabled", True) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 001605c265..a4ec706908 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -16,6 +16,7 @@ from .api import ApiConfig from .appservice import AppServiceConfig from .auth import AuthConfig +from .background_updates import BackgroundUpdateConfig from .cache import CacheConfig from .captcha import CaptchaConfig from .cas import CasConfig @@ -99,4 +100,5 @@ class HomeServerConfig(RootConfig): WorkerConfig, RedisConfig, ExperimentalConfig, + BackgroundUpdateConfig, ] diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index f7e4f9ef22..5d571651cb 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -182,8 +182,14 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their - # own username (see 'sso_auth_account_details.html' in the 'sso' - # section of this file). + # own username (see the documentation for the + # 'sso_auth_account_details.html' template). This template can + # use the 'localpart_from_email' filter. + # + # confirm_localpart: Whether to prompt the user to validate (or + # change) the generated localpart (see the documentation for the + # 'sso_auth_account_details.html' template), instead of + # registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 04afd48274..60904a55f5 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -245,8 +245,8 @@ async def check_event_for_spam( """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if - sent by a local user. If it is sent by a user on another server, then - users receive a blank event. + sent by a local user. If it is sent by a user on another server, the + event is soft-failed. Args: event: the event to be checked diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index ede72ee876..bfca454f51 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -38,6 +38,8 @@ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -157,6 +159,12 @@ def __init__(self, hs: "HomeServer"): CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = [] + self._check_can_shutdown_room_callbacks: List[ + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK + ] = [] + self._check_can_deactivate_user_callbacks: List[ + CHECK_CAN_DEACTIVATE_USER_CALLBACK + ] = [] self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = [] self._on_user_deactivation_status_changed_callbacks: List[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -173,6 +181,8 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -198,6 +208,11 @@ def register_third_party_rules_callbacks( if on_new_event is not None: self._on_new_event_callbacks.append(on_new_event) + if check_can_shutdown_room is not None: + self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room) + + if check_can_deactivate_user is not None: + self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user) if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) @@ -369,6 +384,46 @@ async def on_new_event(self, event_id: str) -> None: "Failed to run module API callback %s: %s", callback, e ) + async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool: + """Intercept requests to shutdown a room. If `False` is returned, the + room must not be shut down. + + Args: + requester: The ID of the user requesting the shutdown. + room_id: The ID of the room. + """ + for callback in self._check_can_shutdown_room_callbacks: + try: + if await callback(user_id, room_id) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + + async def check_can_deactivate_user( + self, + user_id: str, + by_admin: bool, + ) -> bool: + """Intercept requests to deactivate a user. If `False` is returned, the + user should not be deactivated. + + Args: + requester + user_id: The ID of the room. + """ + for callback in self._check_can_deactivate_user_callbacks: + try: + if await callback(user_id, by_admin) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: """Given a room ID, return the state events of that room. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9386fa29dd..a0520068e0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -26,6 +26,7 @@ Union, ) +import attr from frozendict import frozendict from synapse.api.constants import EventContentFields, EventTypes, RelationTypes @@ -37,6 +38,7 @@ from . import EventBase if TYPE_CHECKING: + from synapse.server import HomeServer from synapse.storage.databases.main.relations import BundledAggregations @@ -303,29 +305,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict: return d +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SerializeEventConfig: + as_client_event: bool = True + # Function to convert from federation format to client format + event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 + # ID of the user's auth token - used for namespacing of transaction IDs + token_id: Optional[int] = None + # List of event fields to include. If empty, all fields will be returned. + only_event_fields: Optional[List[str]] = None + # Some events can have stripped room state stored in the `unsigned` field. + # This is required for invite and knock functionality. If this option is + # False, that state will be removed from the event before it is returned. + # Otherwise, it will be kept. + include_stripped_room_state: bool = False + + +_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig() + + def serialize_event( e: Union[JsonDict, EventBase], time_now_ms: int, *, - as_client_event: bool = True, - event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1, - token_id: Optional[str] = None, - only_event_fields: Optional[List[str]] = None, - include_stripped_room_state: bool = False, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, ) -> JsonDict: """Serialize event for clients Args: e time_now_ms - as_client_event - event_format - token_id - only_event_fields - include_stripped_room_state: Some events can have stripped room state - stored in the `unsigned` field. This is required for invite and knock - functionality. If this option is False, that state will be removed from the - event before it is returned. Otherwise, it will be kept. + config: Event serialization config Returns: The serialized event dictionary. @@ -348,11 +358,11 @@ def serialize_event( if "redacted_because" in e.unsigned: d["unsigned"]["redacted_because"] = serialize_event( - e.unsigned["redacted_because"], time_now_ms, event_format=event_format + e.unsigned["redacted_because"], time_now_ms, config=config ) - if token_id is not None: - if token_id == getattr(e.internal_metadata, "token_id", None): + if config.token_id is not None: + if config.token_id == getattr(e.internal_metadata, "token_id", None): txn_id = getattr(e.internal_metadata, "txn_id", None) if txn_id is not None: d["unsigned"]["transaction_id"] = txn_id @@ -361,13 +371,14 @@ def serialize_event( # that are meant to provide metadata about a room to an invitee/knocker. They are # intended to only be included in specific circumstances, such as down sync, and # should not be included in any other case. - if not include_stripped_room_state: + if not config.include_stripped_room_state: d["unsigned"].pop("invite_room_state", None) d["unsigned"].pop("knock_room_state", None) - if as_client_event: - d = event_format(d) + if config.as_client_event: + d = config.event_format(d) + only_event_fields = config.only_event_fields if only_event_fields: if not isinstance(only_event_fields, list) or not all( isinstance(f, str) for f in only_event_fields @@ -385,23 +396,26 @@ class EventClientSerializer: clients. """ + def __init__(self, hs: "HomeServer"): + self._msc3440_enabled = hs.config.experimental.msc3440_enabled + def serialize_event( self, event: Union[JsonDict, EventBase], time_now: int, *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - **kwargs: Any, ) -> JsonDict: """Serializes a single event. Args: event: The event being serialized. time_now: The current time in milliseconds + config: Event serialization config bundle_aggregations: Whether to include the bundled aggregations for this event. Only applies to non-state events. (State events never include bundled aggregations.) - **kwargs: Arguments to pass to `serialize_event` Returns: The serialized event @@ -410,7 +424,7 @@ def serialize_event( if not isinstance(event, EventBase): return event - serialized_event = serialize_event(event, time_now, **kwargs) + serialized_event = serialize_event(event, time_now, config=config) # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: @@ -419,6 +433,7 @@ def serialize_event( self._inject_bundled_aggregations( event, time_now, + config, bundle_aggregations[event.event_id], serialized_event, ) @@ -456,6 +471,7 @@ def _inject_bundled_aggregations( self, event: EventBase, time_now: int, + config: SerializeEventConfig, aggregations: "BundledAggregations", serialized_event: JsonDict, ) -> None: @@ -466,6 +482,7 @@ def _inject_bundled_aggregations( time_now: The current time in milliseconds aggregations: The bundled aggregation to serialize. serialized_event: The serialized event which may be modified. + config: Event serialization config """ serialized_aggregations = {} @@ -493,8 +510,8 @@ def _inject_bundled_aggregations( thread = aggregations.thread # Don't bundle aggregations as this could recurse forever. - serialized_latest_event = self.serialize_event( - thread.latest_event, time_now, bundle_aggregations=None + serialized_latest_event = serialize_event( + thread.latest_event, time_now, config=config ) # Manually apply an edit, if one exists. if thread.latest_edit: @@ -502,33 +519,53 @@ def _inject_bundled_aggregations( thread.latest_event, serialized_latest_event, thread.latest_edit ) - serialized_aggregations[RelationTypes.THREAD] = { + thread_summary = { "latest_event": serialized_latest_event, "count": thread.count, "current_user_participated": thread.current_user_participated, } + serialized_aggregations[RelationTypes.THREAD] = thread_summary + if self._msc3440_enabled: + serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary # Include the bundled aggregations in the event. if serialized_aggregations: - serialized_event["unsigned"].setdefault("m.relations", {}).update( - serialized_aggregations - ) + # There is likely already an "unsigned" field, but a filter might + # have stripped it off (via the event_fields option). The server is + # allowed to return additional fields, so add it back. + serialized_event.setdefault("unsigned", {}).setdefault( + "m.relations", {} + ).update(serialized_aggregations) def serialize_events( - self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any + self, + events: Iterable[Union[JsonDict, EventBase]], + time_now: int, + *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, + bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, ) -> List[JsonDict]: """Serializes multiple events. Args: event time_now: The current time in milliseconds - **kwargs: Arguments to pass to `serialize_event` + config: Event serialization config + bundle_aggregations: Whether to include the bundled aggregations for this + event. Only applies to non-state events. (State events never include + bundled aggregations.) Returns: The list of serialized events """ return [ - self.serialize_event(event, time_now=time_now, **kwargs) for event in events + self.serialize_event( + event, + time_now, + config=config, + bundle_aggregations=bundle_aggregations, + ) + for event in events ] diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 64e595e748..467275b98c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1428,7 +1428,7 @@ async def send_request( # Validate children_state of the room. children_state = room.pop("children_state", []) - if not isinstance(children_state, Sequence): + if not isinstance(children_state, list): raise InvalidResponseError("'room.children_state' must be a list") if any(not isinstance(e, dict) for e in children_state): raise InvalidResponseError("Invalid event in 'children_state' list") @@ -1440,14 +1440,14 @@ async def send_request( # Validate the children rooms. children = res.get("children", []) - if not isinstance(children, Sequence): + if not isinstance(children, list): raise InvalidResponseError("'children' must be a list") if any(not isinstance(r, dict) for r in children): raise InvalidResponseError("Invalid room in 'children' list") # Validate the inaccessible children. inaccessible_children = res.get("inaccessible_children", []) - if not isinstance(inaccessible_children, Sequence): + if not isinstance(inaccessible_children, list): raise InvalidResponseError("'inaccessible_children' must be a list") if any(not isinstance(r, str) for r in inaccessible_children): raise InvalidResponseError( @@ -1630,7 +1630,7 @@ def _validate_hierarchy_event(d: JsonDict) -> None: raise ValueError("Invalid event: 'content' must be a dict") via = content.get("via") - if not isinstance(via, Sequence): + if not isinstance(via, list): raise ValueError("Invalid event: 'via' must be a list") if any(not isinstance(v, str) for v in via): raise ValueError("Invalid event: 'via' must be a list of strings") diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0d7c4f5067..d720b5fd3f 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -244,7 +244,7 @@ def send_presence_to_destinations( self.notifier.on_new_replication_data() - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 6106a486d1..30e2421efc 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -118,7 +118,12 @@ def build_and_send_edu( raise NotImplementedError() @abc.abstractmethod - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = True) -> None: + """Tells the sender that a new device message is ready to be sent to the + destination. The `immediate` flag specifies whether the messages should + be tried to be sent immediately, or whether it can be delayed for a + short while (to aid performance). + """ raise NotImplementedError() @abc.abstractmethod @@ -146,9 +151,8 @@ async def get_replication_rows( @attr.s -class _PresenceQueue: - """A queue of destinations that need to be woken up due to new presence - updates. +class _DestinationWakeupQueue: + """A queue of destinations that need to be woken up due to new updates. Staggers waking up of per destination queues to ensure that we don't attempt to start TLS connections with many hosts all at once, leading to pinned CPU. @@ -175,7 +179,7 @@ def add_to_queue(self, destination: str) -> None: if not self.processing: self._handle() - @wrap_as_background_process("_PresenceQueue.handle") + @wrap_as_background_process("_DestinationWakeupQueue.handle") async def _handle(self) -> None: """Background process to drain the queue.""" @@ -297,7 +301,7 @@ def __init__(self, hs: "HomeServer"): self._external_cache = hs.get_external_cache() - self._presence_queue = _PresenceQueue(self, self.clock) + self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock) def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination @@ -614,7 +618,7 @@ def send_presence_to_destinations( states, start_loop=False ) - self._presence_queue.add_to_queue(destination) + self._destination_wakeup_queue.add_to_queue(destination) def build_and_send_edu( self, @@ -667,7 +671,7 @@ def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None: else: queue.send_edu(edu) - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: if destination == self.server_name: logger.warning("Not sending device update to ourselves") return @@ -677,7 +681,11 @@ def send_device_messages(self, destination: str) -> None: ): return - self._get_per_destination_queue(destination).attempt_new_transaction() + if immediate: + self._get_per_destination_queue(destination).attempt_new_transaction() + else: + self._get_per_destination_queue(destination).mark_new_data() + self._destination_wakeup_queue.add_to_queue(destination) def wake_destination(self, destination: str) -> None: """Called when we want to retry sending transactions to a remote. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index c8768f22bc..d80f0ac5e8 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -219,6 +219,16 @@ def send_edu(self, edu: Edu) -> None: self._pending_edus.append(edu) self.attempt_new_transaction() + def mark_new_data(self) -> None: + """Marks that the destination has new data to send, without starting a + new transaction. + + If a transaction loop is already in progress then a new transcation will + be attempted when the current one finishes. + """ + + self._new_data_to_send = True + def attempt_new_transaction(self) -> None: """Try to start a new transaction to this destination diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 67a6347907..71b2f90eb9 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -289,7 +289,7 @@ async def on_GET( return 200, {"sub": user_id} -DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { +SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), "group_server": GROUP_SERVER_SERVLET_CLASSES, @@ -298,6 +298,10 @@ async def on_GET( "openid": (OpenIdUserInfo,), } +DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid") + +GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation") + def register_servlets( hs: "HomeServer", @@ -320,16 +324,19 @@ def register_servlets( Defaults to ``DEFAULT_SERVLET_GROUPS``. """ if not servlet_groups: - servlet_groups = DEFAULT_SERVLET_GROUPS.keys() + servlet_groups = DEFAULT_SERVLET_GROUPS + # Only allow the groups servlets if the deprecated groups feature is enabled. + if hs.config.experimental.groups_enabled: + servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS for servlet_group in servlet_groups: # Skip unknown servlet groups. - if servlet_group not in DEFAULT_SERVLET_GROUPS: + if servlet_group not in SERVLET_GROUPS: raise RuntimeError( f"Attempting to register unknown federation servlet: '{servlet_group}'" ) - for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]: + for servletclass in SERVLET_GROUPS[servlet_group]: # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled if ( servletclass == FederationTimestampLookupServlet diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 87e99c7ddf..2529dee613 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -63,7 +63,7 @@ def __init__(self, hs: "HomeServer"): self.replication_client = None if hs.config.worker.worker_app: - self.replication_client = hs.get_tcp_replication() + self.replication_client = hs.get_replication_command_handler() # A method just so we can pass 'self' as the authenticator to the Servlets async def authenticate_request( diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index e6461cc3c9..bd913e524e 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -571,7 +571,7 @@ async def query_room_alias_exists( room_alias_str = room_alias.to_string() services = self.store.get_app_services() alias_query_services = [ - s for s in services if (s.is_interested_in_alias(room_alias_str)) + s for s in services if (s.is_room_alias_in_namespace(room_alias_str)) ] for alias_service in alias_query_services: is_known_alias = await self.appservice_api.query_alias( @@ -660,7 +660,7 @@ async def _get_services_for_event( # inside of a list comprehension anymore. interested_list = [] for s in services: - if await s.is_interested(event, self.store): + if await s.is_interested_in_event(event.event_id, event, self.store): interested_list.append(s) return interested_list diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index c040eb3519..05ba608621 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -17,7 +17,7 @@ from synapse.api.errors import SynapseError from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import Requester, UserID, create_requester +from synapse.types import Codes, Requester, UserID, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -42,6 +42,7 @@ def __init__(self, hs: "HomeServer"): # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False + self._third_party_rules = hs.get_third_party_event_rules() # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). @@ -74,6 +75,15 @@ async def deactivate_account( Returns: True if identity server supports removing threepids, otherwise False. """ + + # Check if this user can be deactivated + if not await self._third_party_rules.check_can_deactivate_user( + user_id, by_admin + ): + raise SynapseError( + 403, "Deactivation of this user is forbidden", Codes.FORBIDDEN + ) + # FIXME: Theoretically there is a race here wherein user resets # password using threepid. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 934b5bd734..d5ccaa0c37 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -371,7 +371,6 @@ async def delete_device(self, user_id: str, device_id: str) -> None: log_kv( {"reason": "User doesn't have device id.", "device_id": device_id} ) - pass else: raise @@ -414,7 +413,6 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: # no match set_tag("error", True) set_tag("reason", "User doesn't have that device id.") - pass else: raise @@ -506,7 +504,7 @@ async def notify_device_update( "Sending device list update notif for %r to: %r", user_id, hosts ) for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) log_kv({"message": "sent device update to host", "host": host}) async def notify_user_signature_update( diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index b7064c6624..33d827a45b 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -119,7 +119,7 @@ async def create_association( service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias_str): + if not service.is_room_alias_in_namespace(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -221,7 +221,7 @@ async def delete_association( async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias ) -> None: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_room_alias_in_namespace(room_alias.to_string()): raise SynapseError( 400, "This application service has not reserved this kind of alias", @@ -376,7 +376,7 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> b # non-exclusive locks on the alias (or there are no interested services) services = self.store.get_app_services() interested_services = [ - s for s in services if s.is_interested_in_alias(alias.to_string()) + s for s in services if s.is_room_alias_in_namespace(alias.to_string()) ] for service in interested_services: diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 97e75e60c3..d2ccb5c5d3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -19,6 +19,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.handlers.presence import format_user_presence_state from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, UserID @@ -120,7 +121,7 @@ async def get_stream( chunks = self._event_serializer.serialize_events( events, time_now, - as_client_event=as_client_event, + config=SerializeEventConfig(as_client_event=as_client_event), ) chunk = { diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index eb03a5accb..db39aeabde 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -23,8 +23,6 @@ from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 -from twisted.internet import defer - from synapse import event_auth from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import ( @@ -45,11 +43,7 @@ from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict -from synapse.logging.context import ( - make_deferred_yieldable, - nested_logging_context, - preserve_fn, -) +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, @@ -355,56 +349,8 @@ async def try_backfill(domains: List[str]) -> bool: if success: return True - # Huh, well *those* domains didn't work out. Lets try some domains - # from the time. - - tried_domains = set(likely_domains) - tried_domains.add(self.server_name) - - event_ids = list(extremities.keys()) - - logger.debug("calling resolve_state_groups in _maybe_backfill") - resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) - states_list = await make_deferred_yieldable( - defer.gatherResults( - [resolve(room_id, [e]) for e in event_ids], consumeErrors=True - ) - ) - - # A map from event_id to state map of event_ids. - state_ids: Dict[str, StateMap[str]] = dict( - zip(event_ids, [s.state for s in states_list]) - ) - - state_map = await self.store.get_events( - [e_id for ids in state_ids.values() for e_id in ids.values()], - get_prev_content=False, - ) - - # A map from event_id to state map of events. - state_events: Dict[str, StateMap[EventBase]] = { - key: { - k: state_map[e_id] - for k, e_id in state_dict.items() - if e_id in state_map - } - for key, state_dict in state_ids.items() - } - - for e_id in event_ids: - likely_extremeties_domains = get_domains_from_state(state_events[e_id]) - - success = await try_backfill( - [ - dom - for dom, _ in likely_extremeties_domains - if dom not in tried_domains - ] - ) - if success: - return True - - tried_domains.update(dom for dom, _ in likely_extremeties_domains) + # TODO: we could also try servers which were previously in the room, but + # are no longer. return False diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 344f20f37c..a7db8feb57 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -18,6 +18,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state from synapse.handlers.receipts import ReceiptEventSource @@ -152,10 +153,13 @@ async def _snapshot_all_rooms( public_room_ids = await self.store.get_public_room_ids() - limit = pagin_config.limit - if limit is None: + if pagin_config.limit is not None: + limit = pagin_config.limit + else: limit = 10 + serializer_options = SerializeEventConfig(as_client_event=as_client_event) + async def handle_room(event: RoomsForUser) -> None: d: JsonDict = { "room_id": event.room_id, @@ -173,7 +177,7 @@ async def handle_room(event: RoomsForUser) -> None: d["invite"] = self._event_serializer.serialize_event( invite_event, time_now, - as_client_event=as_client_event, + config=serializer_options, ) rooms_ret.append(d) @@ -225,7 +229,7 @@ async def handle_room(event: RoomsForUser) -> None: self._event_serializer.serialize_events( messages, time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) ), "start": await start_token.to_string(self.store), @@ -235,7 +239,7 @@ async def handle_room(event: RoomsForUser) -> None: d["state"] = self._event_serializer.serialize_events( current_state.values(), time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) account_data_events = [] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 61cb133ef2..f9544fe7fb 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1069,6 +1069,9 @@ async def _validate_event_relation(self, event: EventBase) -> None: if relation_type == RelationTypes.ANNOTATION: aggregation_key = relation["key"] + if len(aggregation_key) > 500: + raise SynapseError(400, "Aggregation key is too long") + already_exists = await self.store.has_user_annotated_event( relates_to, event.type, aggregation_key, event.sender ) @@ -1076,7 +1079,10 @@ async def _validate_event_relation(self, event: EventBase) -> None: raise SynapseError(400, "Can't send same reaction twice") # Don't attempt to start a thread if the parent event is a relation. - elif relation_type == RelationTypes.THREAD: + elif ( + relation_type == RelationTypes.THREAD + or relation_type == RelationTypes.UNSTABLE_THREAD + ): if await self.store.event_includes_relation(relates_to): raise SynapseError( 400, "Cannot start threads from an event with a relation" diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 593a2aac66..724b9cfcb4 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -45,6 +45,7 @@ from synapse.util import Clock, json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: from synapse.server import HomeServer @@ -1228,6 +1229,7 @@ class OidcSessionData: class UserAttributeDict(TypedDict): localpart: Optional[str] + confirm_localpart: bool display_name: Optional[str] emails: List[str] @@ -1307,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any: env = Environment(finalize=jinja_finalize) +env.filters.update( + { + "localpart_from_email": _localpart_from_email_filter, + } +) @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -1316,6 +1323,7 @@ class JinjaOidcMappingConfig: display_name_template: Optional[Template] email_template: Optional[Template] extra_attributes: Dict[str, Template] + confirm_localpart: bool = False class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): @@ -1357,12 +1365,17 @@ def parse_template_config(option_name: str) -> Optional[Template]: "invalid jinja template", path=["extra_attributes", key] ) from e + confirm_localpart = config.get("confirm_localpart") or False + if not isinstance(confirm_localpart, bool): + raise ConfigError("must be a bool", path=["confirm_localpart"]) + return JinjaOidcMappingConfig( subject_claim=subject_claim, localpart_template=localpart_template, display_name_template=display_name_template, email_template=email_template, extra_attributes=extra_attributes, + confirm_localpart=confirm_localpart, ) def get_remote_user_id(self, userinfo: UserInfo) -> str: @@ -1398,7 +1411,10 @@ def render_template_field(template: Optional[Template]) -> Optional[str]: emails.append(email) return UserAttributeDict( - localpart=localpart, display_name=display_name, emails=emails + localpart=localpart, + display_name=display_name, + emails=emails, + confirm_localpart=self._config.confirm_localpart, ) async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5c01a426ff..60059fec3e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter +from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter @@ -349,7 +350,7 @@ async def _purge_history( """ self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): await self.storage.purge_events.purge_history( room_id, token, delete_local_events ) @@ -405,7 +406,7 @@ async def purge_room(self, room_id: str, force: bool = False) -> None: room_id: room to be purged force: set true to skip checking for joined users. """ - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): # first check that we have no users in this room if not force: joined = await self.store.is_host_joined(room_id, self._server_name) @@ -447,7 +448,7 @@ async def get_messages( room_token = from_token.room_key - with await self.pagination_lock.read(room_id): + async with self.pagination_lock.read(room_id): ( membership, member_event_id, @@ -541,13 +542,15 @@ async def get_messages( time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(as_client_event=as_client_event) + chunk = { "chunk": ( self._event_serializer.serialize_events( events, time_now, + config=serialize_options, bundle_aggregations=aggregations, - as_client_event=as_client_event, ) ), "start": await from_token.to_string(self.store), @@ -556,7 +559,7 @@ async def get_messages( if state: chunk["state"] = self._event_serializer.serialize_events( - state, time_now, as_client_event=as_client_event + state, time_now, config=serialize_options ) return chunk @@ -612,7 +615,7 @@ async def _shutdown_and_purge_room( self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ delete_id diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c155098bee..34d9411bbf 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -267,7 +267,6 @@ async def update_external_syncs_row( is_syncing: Whether or not the user is now syncing sync_time_msec: Time in ms when the user was last syncing """ - pass async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process @@ -277,7 +276,6 @@ async def update_external_syncs_clear(self, process_id: str) -> None: This is a no-op when presence is handled by a different worker. """ - pass async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: list @@ -424,13 +422,13 @@ def __init__(self, hs: "HomeServer"): async def _on_shutdown(self) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_command( + self.hs.get_replication_command_handler().send_command( ClearUserSyncsCommand(self.instance_id) ) def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_user_sync( + self.hs.get_replication_command_handler().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index b4132c353a..6250bb3bdf 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -269,7 +269,7 @@ async def get_new_events_as( # Then filter down to rooms that the AS can read events = [] for room_id, event in rooms_to_events.items(): - if not await service.matches_user_in_member_list(room_id, self.store): + if not await service.is_interested_in_room(room_id, self.store): continue events.append(event) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7b965b4b96..b9735631fc 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1475,6 +1475,7 @@ def __init__(self, hs: "HomeServer"): self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() + self._third_party_rules = hs.get_third_party_event_rules() self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main @@ -1548,6 +1549,13 @@ async def shutdown_room( if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) + if not await self._third_party_rules.check_can_shutdown_room( + requester_user_id, room_id + ): + raise SynapseError( + 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN + ) + # Action the block first (even if the room doesn't exist yet) if block: # This will work even if the room is already blocked, but that is diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a582837cf0..7cbc484b06 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1736,8 +1736,8 @@ async def _generate_local_out_of_band_leave( txn_id=txn_id, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, + outlier=True, ) - event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True result_event = await self.event_creation_handler.handle_new_client_event( diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 55c2cbdba8..486145f48a 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -295,7 +295,7 @@ async def _get_room_hierarchy( # inaccessible to the requesting user. if room_entry: # Add the room (including the stripped m.space.child events). - rooms_result.append(room_entry.as_json()) + rooms_result.append(room_entry.as_json(for_client=True)) # If this room is not at the max-depth, check if there are any # children to process. @@ -843,21 +843,32 @@ class _RoomEntry: # This may not include all children. children_state_events: Sequence[JsonDict] = () - def as_json(self) -> JsonDict: + def as_json(self, for_client: bool = False) -> JsonDict: """ Returns a JSON dictionary suitable for the room hierarchy endpoint. It returns the room summary including the stripped m.space.child events as a sub-key. + + Args: + for_client: If true, any server-server only fields are stripped from + the result. + """ result = dict(self.room) + + # Before returning to the client, remove the allowed_room_ids key, if it + # exists. + if for_client: + result.pop("allowed_room_ids", False) + result["children_state"] = self.children_state_events return result def _has_valid_via(e: EventBase) -> bool: via = e.content.get("via") - if not via or not isinstance(via, Sequence): + if not via or not isinstance(via, list): return False for v in via: if not isinstance(v, str): diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index ff5b5169ca..4f02a060d9 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -132,6 +132,7 @@ class UserAttributes: # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. localpart: Optional[str] + confirm_localpart: bool = False display_name: Optional[str] = None emails: Collection[str] = attr.Factory(list) @@ -561,9 +562,10 @@ def _get_url_for_next_new_user_step( # Must provide either attributes or session, not both assert (attributes is not None) != (session is not None) - if (attributes and attributes.localpart is None) or ( - session and session.chosen_localpart is None - ): + if ( + attributes + and (attributes.localpart is None or attributes.confirm_localpart is True) + ) or (session and session.chosen_localpart is None): return b"/_synapse/client/pick_username/account_details" elif self._consent_at_registration and not ( session and session.terms_accepted_version diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 843c68eb0f..6854428b7c 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -160,8 +160,9 @@ def process_replication_rows( """Should be called whenever we receive updates for typing stream.""" if self._latest_room_serial > token: - # The master has gone backwards. To prevent inconsistent data, just - # clear everything. + # The typing worker has gone backwards (e.g. it may have restarted). + # To prevent inconsistent data, just clear everything. + logger.info("Typing handler stream went backwards; resetting") self._reset() # Set the latest serial token to whatever the server gave us. @@ -486,9 +487,7 @@ async def get_new_events_as( if handler._room_serials[room_id] <= from_key: continue - if not await service.matches_user_in_member_list( - room_id, self._main_store - ): + if not await service.is_interested_in_room(room_id, self._main_store): continue events.append(self._make_event_for(room_id)) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 40bf1e06d6..6b98d865f5 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -120,7 +120,6 @@ def finish(self) -> T: """Called when response has finished streaming and the parser should return the final result (or error). """ - pass @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -601,7 +600,6 @@ async def _send_request( response.code, response_phrase, ) - pass else: logger.info( "{%s} [%s] Got response headers: %d %s", diff --git a/synapse/http/server.py b/synapse/http/server.py index 09b4125489..31ca841889 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -233,7 +233,6 @@ def register_paths( servlet_classname (str): The name of the handler to be used in prometheus and opentracing logs. """ - pass class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): diff --git a/synapse/logging/context.py b/synapse/logging/context.py index c31c2960ad..88cd8a9e1c 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -29,7 +29,6 @@ from types import TracebackType from typing import ( TYPE_CHECKING, - Any, Awaitable, Callable, Optional, @@ -41,7 +40,7 @@ ) import attr -from typing_extensions import Literal +from typing_extensions import Literal, ParamSpec from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool @@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext: ) +P = ParamSpec("P") R = TypeVar("R") @overload def preserve_fn( # type: ignore[misc] - f: Callable[..., Awaitable[R]], -) -> Callable[..., "defer.Deferred[R]"]: + f: Callable[P, Awaitable[R]], +) -> Callable[P, "defer.Deferred[R]"]: # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" ... @overload -def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]: +def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]: ... def preserve_fn( f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ] -) -> Callable[..., "defer.Deferred[R]"]: +) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" - def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]": + def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]": return run_in_background(f, *args, **kwargs) return g @@ -752,7 +752,7 @@ def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]": @overload def run_in_background( # type: ignore[misc] - f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any + f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" @@ -761,18 +761,22 @@ def run_in_background( # type: ignore[misc] @overload def run_in_background( - f: Callable[..., R], *args: Any, **kwargs: Any + f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": ... -def run_in_background( +def run_in_background( # type: ignore[misc] + # The `type: ignore[misc]` above suppresses + # "Overloaded function implementation does not accept all possible arguments of signature 1" + # "Overloaded function implementation does not accept all possible arguments of signature 2" + # which seems like a bug in mypy. f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ], - *args: Any, - **kwargs: Any, + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the @@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: def defer_to_thread( - reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any + reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": """ Calls the function `f` using a thread from the reactor's default threadpool and @@ -908,9 +912,9 @@ def defer_to_thread( def defer_to_threadpool( reactor: "ISynapseReactor", threadpool: ThreadPool, - f: Callable[..., R], - *args: Any, - **kwargs: Any, + f: Callable[P, R], + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """ A wrapper for twisted.internet.threads.deferToThreadpool, which handles diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index c42eeedd87..d735c1d461 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -54,6 +54,8 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK, ) from synapse.events.third_party_rules import ( + CHECK_CAN_DEACTIVATE_USER_CALLBACK, + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, CHECK_EVENT_ALLOWED_CALLBACK, CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, @@ -283,6 +285,8 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -298,6 +302,8 @@ def register_third_party_rules_callbacks( check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, + check_can_shutdown_room=check_can_shutdown_room, + check_can_deactivate_user=check_can_deactivate_user, on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, ) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index e2c2a62cd1..388210d45e 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -169,7 +169,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "content.msgtype", "pattern": "m.notice", - "_id": "_suppress_notices", + "_cache_key": "_suppress_notices", } ], "actions": ["dont_notify"], @@ -183,13 +183,13 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", }, { "kind": "event_match", "key": "content.membership", "pattern": "invite", - "_id": "_invite_member", + "_cache_key": "_invite_member", }, {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, ], @@ -212,7 +212,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", } ], "actions": ["dont_notify"], @@ -237,12 +237,12 @@ def make_base_prepend_rules( "kind": "event_match", "key": "content.body", "pattern": "@room", - "_id": "_roomnotif_content", + "_cache_key": "_roomnotif_content", }, { "kind": "sender_notification_permission", "key": "room", - "_id": "_roomnotif_pl", + "_cache_key": "_roomnotif_pl", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -254,13 +254,13 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.tombstone", - "_id": "_tombstone", + "_cache_key": "_tombstone", }, { "kind": "event_match", "key": "state_key", "pattern": "", - "_id": "_tombstone_statekey", + "_cache_key": "_tombstone_statekey", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -272,7 +272,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.reaction", - "_id": "_reaction", + "_cache_key": "_reaction", } ], "actions": ["dont_notify"], @@ -288,7 +288,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.call.invite", - "_id": "_call", + "_cache_key": "_call", } ], "actions": [ @@ -302,12 +302,12 @@ def make_base_prepend_rules( { "rule_id": "global/underride/.m.rule.room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", }, ], "actions": [ @@ -321,12 +321,12 @@ def make_base_prepend_rules( { "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", }, ], "actions": [ @@ -342,7 +342,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", } ], "actions": [ @@ -360,7 +360,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", } ], "actions": [ @@ -376,19 +376,19 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "im.vector.modular.widgets", - "_id": "_type_modular_widgets", + "_cache_key": "_type_modular_widgets", }, { "kind": "event_match", "key": "content.type", "pattern": "jitsi", - "_id": "_content_type_jitsi", + "_cache_key": "_content_type_jitsi", }, { "kind": "event_match", "key": "state_key", "pattern": "*", - "_id": "_is_state_event", + "_cache_key": "_is_state_event", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index fecf86034e..8140afcb6b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -274,17 +274,17 @@ def _condition_checker( cache: Dict[str, bool], ) -> bool: for cond in conditions: - _id = cond.get("_id", None) - if _id: - res = cache.get(_id, None) + _cache_key = cond.get("_cache_key", None) + if _cache_key: + res = cache.get(_cache_key, None) if res is False: return False elif res is True: continue res = evaluator.matches(cond, uid, display_name) - if _id: - cache[_id] = bool(res) + if _cache_key: + cache[_cache_key] = bool(res) if not res: return False diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index c5708cd888..63b22d50ae 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -40,7 +40,7 @@ def format_push_rules_for_user( # Remove internal stuff. for c in r["conditions"]: - c.pop("_id", None) + c.pop("_cache_key", None) pattern_type = c.pop("pattern_type", None) if pattern_type == "user_id": diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index b40a7bbb76..1dd39f06cf 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -76,7 +76,8 @@ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", - "typing-extensions>=3.7.4", + # We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0. + "typing-extensions>=3.10.0", # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. "cryptography>=3.4.7", diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 2e697c74a6..f1abb98653 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -21,6 +21,7 @@ from prometheus_client import Counter, Gauge +from twisted.internet.error import ConnectError, DNSLookupError from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError @@ -87,6 +88,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): `_handle_request` must return a Deferred. RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504 is received. + RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when + a connection error is received. + RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when + receiving connection errors, each will backoff exponentially longer. """ NAME: str = abc.abstractproperty() # type: ignore @@ -94,6 +99,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): METHOD = "POST" CACHE = True RETRY_ON_TIMEOUT = True + RETRY_ON_CONNECT_ERROR = True + RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1) def __init__(self, hs: "HomeServer"): if self.CACHE: @@ -236,18 +243,20 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: "/".join(url_args), ) + headers: Dict[bytes, List[bytes]] = {} + # Add an authorization header, if configured. + if replication_secret: + headers[b"Authorization"] = [b"Bearer " + replication_secret] + opentracing.inject_header_dict(headers, check_destination=False) + try: + # Keep track of attempts made so we can bail if we don't manage to + # connect to the target after N tries. + attempts = 0 # We keep retrying the same request for timeouts. This is so that we # have a good idea that the request has either succeeded or failed # on the master, and so whether we should clean up or not. while True: - headers: Dict[bytes, List[bytes]] = {} - # Add an authorization header, if configured. - if replication_secret: - headers[b"Authorization"] = [ - b"Bearer " + replication_secret - ] - opentracing.inject_header_dict(headers, check_destination=False) try: result = await request_func(uri, data, headers=headers) break @@ -255,11 +264,27 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: if not cls.RETRY_ON_TIMEOUT: raise - logger.warning("%s request timed out; retrying", cls.NAME) + logger.warning("%s request timed out; retrying", cls.NAME) + + # If we timed out we probably don't need to worry about backing + # off too much, but lets just wait a little anyway. + await clock.sleep(1) + except (ConnectError, DNSLookupError) as e: + if not cls.RETRY_ON_CONNECT_ERROR: + raise + if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS: + raise + + delay = 2 ** attempts + logger.warning( + "%s request connection failed; retrying in %ds: %r", + cls.NAME, + delay, + e, + ) - # If we timed out we probably don't need to worry about backing - # off too much, but lets just wait a little anyway. - await clock.sleep(1) + await clock.sleep(delay) + attempts += 1 except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError # on the main process that we should send to the client. (And diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index b5b84c09ae..14706a0817 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -54,6 +54,6 @@ async def insert_client_ip( self.client_ip_last_seen.set(key, now) - self.hs.get_tcp_replication().send_user_ip( + self.hs.get_replication_command_handler().send_user_ip( user_id, access_token, ip, user_agent, device_id, now ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1b8479b0b4..deeaaec4e6 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -380,7 +380,7 @@ async def process_replication_rows( # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local @@ -462,6 +462,8 @@ async def _save_and_send_ack(self) -> None: # We ACK this token over replication so that the master can drop # its in memory queues - self._hs.get_tcp_replication().send_federation_ack(current_position) + self._hs.get_replication_command_handler().send_federation_ack( + current_position + ) except Exception: logger.exception("Error updating federation stream position") diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index aaf91e5e02..bf7d017968 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -21,7 +21,7 @@ from synapse.util import json_decoder, json_encoder if TYPE_CHECKING: - from txredisapi import RedisProtocol + from txredisapi import ConnectionHandler from synapse.server import HomeServer @@ -63,7 +63,7 @@ class ExternalCache: def __init__(self, hs: "HomeServer"): if hs.config.redis.redis_enabled: self._redis_connection: Optional[ - "RedisProtocol" + "ConnectionHandler" ] = hs.get_outbound_redis_connection() else: self._redis_connection = None diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 0d2013a3cf..b217c35f99 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -295,9 +295,7 @@ async def _process_command( raise Exception("Unrecognised command %s in stream queue", cmd.NAME) def start_replication(self, hs: "HomeServer") -> None: - """Helper method to start a replication connection to the remote server - using TCP. - """ + """Helper method to start replication.""" if hs.config.redis.redis_enabled: from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, @@ -711,7 +709,7 @@ def send_remote_server_up(self, server: str) -> None: self.send_command(RemoteServerUpCommand(server)) def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: - """Called when a new update is available to stream to clients. + """Called when a new update is available to stream to Redis subscribers. We need to check if the client is interested in the stream or not """ diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 3170f7c59b..989c5be032 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -93,7 +93,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_name: str - synapse_outbound_redis_connection: txredisapi.RedisProtocol + synapse_outbound_redis_connection: txredisapi.ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -313,7 +313,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): protocol = RedisSubscriber def __init__( - self, hs: "HomeServer", outbound_redis_connection: txredisapi.RedisProtocol + self, hs: "HomeServer", outbound_redis_connection: txredisapi.ConnectionHandler ): super().__init__( @@ -325,7 +325,7 @@ def __init__( password=hs.config.redis.redis_password, ) - self.synapse_handler = hs.get_tcp_replication() + self.synapse_handler = hs.get_replication_command_handler() self.synapse_stream_name = hs.hostname self.synapse_outbound_redis_connection = outbound_redis_connection @@ -353,7 +353,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.RedisProtocol: +) -> txredisapi.ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 494e42a2be..c6870df8f9 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -44,7 +44,7 @@ class ReplicationStreamProtocolFactory(ServerFactory): """Factory for new replication connections.""" def __init__(self, hs: "HomeServer"): - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() self.clock = hs.get_clock() self.server_name = hs.config.server.server_name @@ -67,8 +67,8 @@ def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol: class ReplicationStreamer: """Handles replication connections. - This needs to be poked when new replication data may be available. When new - data is available it will propagate to all connected clients. + This needs to be poked when new replication data may be available. + When new data is available it will propagate to all Redis subscribers. """ def __init__(self, hs: "HomeServer"): @@ -85,7 +85,7 @@ def __init__(self, hs: "HomeServer"): self.is_looping = False self.pending_updates = False - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() # Set of streams to replicate. self.streams = self.command_handler.get_streams_to_replicate() @@ -109,7 +109,7 @@ def __init__(self, hs: "HomeServer"): def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the - connections if there are. + Redis subscribers if there are. This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 23d631a769..495f2f0285 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -316,7 +316,19 @@ def __init__(self, hs: "HomeServer"): class TypingStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) class TypingStreamRow: + """ + An entry in the typing stream. + Describes all the users that are 'typing' right now in one room. + + When a user stops typing, it will be streamed as a new update with that + user absent; you can think of the `user_ids` list as overwriting the + entire list that was there previously. + """ + + # The room that this update is for. room_id: str + + # All the users that are 'typing' right now in the specified room. user_ids: List[str] NAME = "typing" diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 00e1dcdbb8..1ba850369a 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,22 +130,22 @@
-

Your account is nearly ready

-

Check your details before creating an account on {{ server_name }}

+

Create your account

+

This is required. Continue to create your account on {{ server_name }}. You can't change this later.

- +
@
- +
:{{ server_name }}
{% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
-

{% if idp.idp_icon %}{% endif %}Information from {{ idp.idp_name }}

+

{% if idp.idp_icon %}{% endif %}Optional data from {{ idp.idp_name }}

{% if user_attributes.avatar_url %}