Compare commits

..

No commits in common. "develop" and "v1.131.0rc1" have entirely different histories.

195 changed files with 3380 additions and 14447 deletions

View File

@ -24,13 +24,13 @@ jobs:
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- name: Inspect builder
run: docker buildx inspect
- name: Install Cosign
uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3.9.0
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
@ -72,7 +72,7 @@ jobs:
- name: Build and push all platforms
id: build-and-push
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
with:
push: true
labels: |

View File

@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
uses: dawidd6/action-download-artifact@ac66b43f0e6a346234dd65d4d0c8fbb31cb316e5 # v11
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}

View File

@ -78,18 +78,6 @@ jobs:
mdbook build
cp book/welcome_and_overview.html book/index.html
- name: Prepare and publish schema files
run: |
sudo apt-get update && sudo apt-get install -y yq
mkdir -p book/schema
# Remove developer notice before publishing.
rm schema/v*/Do\ not\ edit\ files\ in\ this\ folder
# Copy schema files that are independent from current Synapse version.
cp -r -t book/schema schema/v*/
# Convert config schema from YAML source file to JSON.
yq < schema/synapse-config.schema.yaml \
> book/schema/synapse-config.schema.json
# Deploy to the target directory.
- name: Deploy to gh pages
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0

View File

@ -61,7 +61,7 @@ jobs:
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
with:
install: true

View File

@ -1,57 +0,0 @@
name: Schema
on:
pull_request:
paths:
- schema/**
- docs/usage/configuration/config_documentation.md
push:
branches: ["develop", "release-*"]
workflow_dispatch:
jobs:
validate-schema:
name: Ensure Synapse config schema is valid
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- name: Install check-jsonschema
run: pip install check-jsonschema==0.33.0
- name: Validate meta schema
run: check-jsonschema --check-metaschema schema/v*/meta.schema.json
- name: Validate schema
run: |-
# Please bump on introduction of a new meta schema.
LATEST_META_SCHEMA_VERSION=v1
check-jsonschema \
--schemafile="schema/$LATEST_META_SCHEMA_VERSION/meta.schema.json" \
schema/synapse-config.schema.yaml
- name: Validate default config
# Populates the empty instance with default values and checks against the schema.
run: |-
echo "{}" | check-jsonschema \
--fill-defaults --schemafile=schema/synapse-config.schema.yaml -
check-doc-generation:
name: Ensure generated documentation is up-to-date
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: "3.x"
- name: Install PyYAML
run: pip install PyYAML==6.0.2
- name: Regenerate config documentation
run: |
scripts-dev/gen_config_documentation.py \
schema/synapse-config.schema.yaml \
> docs/usage/configuration/config_documentation.md
- name: Error in case of any differences
# Errors if there are now any modified files (untracked files are ignored).
run: 'git diff --exit-code'

View File

@ -85,7 +85,7 @@ jobs:
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
@ -149,7 +149,7 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Setup Poetry
@ -210,7 +210,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
@ -227,7 +227,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust
uses: dtolnay/rust-toolchain@0d72692bcfbf448b1e2afa01a67f71b455a9dcec # 1.86.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
with:
components: clippy
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
@ -247,7 +247,7 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
with:
toolchain: nightly-2025-04-23
toolchain: nightly-2022-12-01
components: clippy
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
@ -265,7 +265,7 @@ jobs:
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
with:
# We use nightly so that it correctly groups together imports
toolchain: nightly-2025-04-23
toolchain: nightly-2022-12-01
components: rustfmt
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
@ -362,7 +362,7 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
@ -404,7 +404,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
# There aren't wheels for some of the older deps, so we need to install
@ -519,7 +519,7 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Run SyTest
@ -663,7 +663,7 @@ jobs:
path: synapse
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Prepare Complement's Prerequisites
@ -695,7 +695,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Rust
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- run: cargo test

View File

@ -1,90 +1,3 @@
# Synapse 1.133.0rc1 (2025-06-24)
### Features
- Add support for the [MSC4260 user report API](https://github.com/matrix-org/matrix-spec-proposals/pull/4260). ([\#18120](https://github.com/element-hq/synapse/issues/18120))
### Bugfixes
- Fix an issue where, during state resolution for v11 rooms, Synapse would incorrectly calculate the power level of the creator when there was no power levels event in the room. ([\#18534](https://github.com/element-hq/synapse/issues/18534), [\#18547](https://github.com/element-hq/synapse/issues/18547))
- Fix long-standing bug where sliding sync did not honour the `room_id_to_include` config option. ([\#18535](https://github.com/element-hq/synapse/issues/18535))
- Fix an issue where "Lock timeout is getting excessive" warnings would be logged even when the lock timeout was <10 minutes. ([\#18543](https://github.com/element-hq/synapse/issues/18543))
- Fix an issue where Synapse could calculate the wrong power level for the creator of the room if there was no power levels event. ([\#18545](https://github.com/element-hq/synapse/issues/18545))
### Improved Documentation
- Generate config documentation from JSON Schema file. ([\#18528](https://github.com/element-hq/synapse/issues/18528))
- Fix typo in user type documentation. ([\#18568](https://github.com/element-hq/synapse/issues/18568))
### Internal Changes
- Increase performance of introspecting access tokens when using delegated auth. ([\#18357](https://github.com/element-hq/synapse/issues/18357), [\#18561](https://github.com/element-hq/synapse/issues/18561))
- Log user deactivations. ([\#18541](https://github.com/element-hq/synapse/issues/18541))
- Enable [`flake8-logging`](https://docs.astral.sh/ruff/rules/#flake8-logging-log) and [`flake8-logging-format`](https://docs.astral.sh/ruff/rules/#flake8-logging-format-g) rules in Ruff and fix related issues throughout the codebase. ([\#18542](https://github.com/element-hq/synapse/issues/18542))
- Clean up old, unused rows from the `device_federation_inbox` table. ([\#18546](https://github.com/element-hq/synapse/issues/18546))
- Run config schema CI on develop and release branches. ([\#18551](https://github.com/element-hq/synapse/issues/18551))
- Add support for Twisted `25.5.0`+ releases. ([\#18577](https://github.com/element-hq/synapse/issues/18577))
- Update PyO3 to version 0.25. ([\#18578](https://github.com/element-hq/synapse/issues/18578))
### Updates to locked dependencies
* Bump actions/setup-python from 5.5.0 to 5.6.0. ([\#18555](https://github.com/element-hq/synapse/issues/18555))
* Bump base64 from 0.21.7 to 0.22.1. ([\#18559](https://github.com/element-hq/synapse/issues/18559))
* Bump dawidd6/action-download-artifact from 9 to 11. ([\#18556](https://github.com/element-hq/synapse/issues/18556))
* Bump headers from 0.4.0 to 0.4.1. ([\#18529](https://github.com/element-hq/synapse/issues/18529))
* Bump requests from 2.32.2 to 2.32.4. ([\#18533](https://github.com/element-hq/synapse/issues/18533))
* Bump types-requests from 2.32.0.20250328 to 2.32.4.20250611. ([\#18558](https://github.com/element-hq/synapse/issues/18558))
# Synapse 1.132.0 (2025-06-17)
### Improved Documentation
- Improvements to generate config documentation from JSON Schema file. ([\#18522](https://github.com/element-hq/synapse/issues/18522))
# Synapse 1.132.0rc1 (2025-06-10)
### Features
- Add support for [MSC4155](https://github.com/matrix-org/matrix-spec-proposals/pull/4155) Invite Filtering. ([\#18288](https://github.com/element-hq/synapse/issues/18288))
- Add experimental `user_may_send_state_event` module API callback. ([\#18455](https://github.com/element-hq/synapse/issues/18455))
- Add experimental `get_media_config_for_user` and `is_user_allowed_to_upload_media_of_size` module API callbacks that allow overriding of media repository maximum upload size. ([\#18457](https://github.com/element-hq/synapse/issues/18457))
- Add experimental `get_ratelimit_override_for_user` module API callback that allows overriding of per-user ratelimits. ([\#18458](https://github.com/element-hq/synapse/issues/18458))
- Pass `room_config` argument to `user_may_create_room` spam checker module callback. ([\#18486](https://github.com/element-hq/synapse/issues/18486))
- Support configuration of default and extra user types. ([\#18456](https://github.com/element-hq/synapse/issues/18456))
- Successful requests to `/_matrix/app/v1/ping` will now force Synapse to reattempt delivering transactions to appservices. ([\#18521](https://github.com/element-hq/synapse/issues/18521))
- Support the import of the `RatelimitOverride` type from `synapse.module_api` in modules and rename `messages_per_second` to `per_second`. ([\#18513](https://github.com/element-hq/synapse/issues/18513))
### Bugfixes
- Remove destinations from sending if not whitelisted. ([\#18484](https://github.com/element-hq/synapse/issues/18484))
- Fixed room summary API incorrectly returning that a room is private in the room summary response when the join rule is omitted by the remote server. Contributed by @nexy7574. ([\#18493](https://github.com/element-hq/synapse/issues/18493))
- Prevent users from adding themselves to their own user ignore list. ([\#18508](https://github.com/element-hq/synapse/issues/18508))
### Improved Documentation
- Generate config documentation from JSON Schema file. ([\#17892](https://github.com/element-hq/synapse/issues/17892))
- Mention `CAP_NET_BIND_SERVICE` as an alternative to running Synapse as root in order to bind to a privileged port. ([\#18408](https://github.com/element-hq/synapse/issues/18408))
- Surface hidden Admin API documentation regarding fetching of scheduled tasks. ([\#18516](https://github.com/element-hq/synapse/issues/18516))
- Mark the new module APIs in this release as experimental. ([\#18536](https://github.com/element-hq/synapse/issues/18536))
### Internal Changes
- Mark dehydrated devices in the [List All User Devices Admin API](https://element-hq.github.io/synapse/latest/admin_api/user_admin_api.html#list-all-devices). ([\#18252](https://github.com/element-hq/synapse/issues/18252))
- Reduce disk wastage by cleaning up `received_transactions` older than 1 day, rather than 30 days. ([\#18310](https://github.com/element-hq/synapse/issues/18310))
- Distinguish all vs local events being persisted in the "Event Send Time Quantiles" graph (Grafana). ([\#18510](https://github.com/element-hq/synapse/issues/18510))
# Synapse 1.131.0 (2025-06-03)
No significant changes since 1.131.0rc1.
# Synapse 1.131.0rc1 (2025-05-28)
### Features

1262
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
Support for [MSC4235](https://github.com/matrix-org/matrix-spec-proposals/pull/4235): via query param for hierarchy endpoint. Contributed by Krishan (@kfiven).

View File

@ -1 +0,0 @@
Add `forget_forced_upon_leave` capability as per [MSC4267](https://github.com/matrix-org/matrix-spec-proposals/pull/4267).

View File

@ -1 +0,0 @@
Add `federated_user_may_invite` spam checker callback which receives the entire invite event. Contributed by @tulir @ Beeper.

View File

@ -1 +0,0 @@
Fix `KeyError` on background updates when using split main/state databases.

View File

@ -1 +0,0 @@
Improve docstring on `simple_upsert_many`.

View File

@ -1 +0,0 @@
Improve performance of device deletion by adding missing index.

View File

@ -1 +0,0 @@
Better handling of ratelimited requests.

View File

@ -1 +0,0 @@
Better handling of ratelimited requests.

View File

@ -1 +0,0 @@
Speed up bulk device deletion.

View File

@ -1 +0,0 @@
Ensure policy servers are not asked to scan policy server change events, allowing rooms to disable the use of a policy server while the policy server is down.

View File

@ -220,24 +220,29 @@
"yBucketBound": "auto"
},
{
"datasource": {
"uid": "${DS_PROMETHEUS}",
"type": "prometheus"
},
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"description": "",
"fieldConfig": {
"defaults": {
"links": []
},
"overrides": []
},
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 1
},
"hiddenSeries": false,
"id": 152,
"legend": {
"avg": false,
@ -250,81 +255,71 @@
"values": false
},
"lines": true,
"linewidth": 0,
"links": [],
"nullPointMode": "connected",
"options": {
"alertThreshold": true
},
"paceLength": 10,
"pluginVersion": "10.4.3",
"percentage": false,
"pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "Avg",
"fill": 0,
"linewidth": 3,
"$$hashKey": "object:48"
"linewidth": 3
},
{
"alias": "99%",
"color": "#C4162A",
"fillBelowTo": "90%",
"$$hashKey": "object:49"
"fillBelowTo": "90%"
},
{
"alias": "90%",
"color": "#FF7383",
"fillBelowTo": "75%",
"$$hashKey": "object:50"
"fillBelowTo": "75%"
},
{
"alias": "75%",
"color": "#FFEE52",
"fillBelowTo": "50%",
"$$hashKey": "object:51"
"fillBelowTo": "50%"
},
{
"alias": "50%",
"color": "#73BF69",
"fillBelowTo": "25%",
"$$hashKey": "object:52"
"fillBelowTo": "25%"
},
{
"alias": "25%",
"color": "#1F60C4",
"fillBelowTo": "5%",
"$$hashKey": "object:53"
"fillBelowTo": "5%"
},
{
"alias": "5%",
"lines": false,
"$$hashKey": "object:54"
"lines": false
},
{
"alias": "Average",
"color": "rgb(255, 255, 255)",
"lines": true,
"linewidth": 3,
"$$hashKey": "object:55"
"linewidth": 3
},
{
"alias": "Local events being persisted",
"color": "#96d98D",
"points": true,
"yaxis": 2,
"zindex": -3,
"$$hashKey": "object:56"
},
{
"$$hashKey": "object:329",
"alias": "Events",
"color": "#B877D9",
"alias": "All events being persisted",
"hideTooltip": true,
"points": true,
"yaxis": 2,
"zindex": -3
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"datasource": {
@ -389,20 +384,7 @@
},
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
"legendFormat": "Average",
"refId": "H",
"editorMode": "code",
"range": true
},
{
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
"hide": false,
"instant": false,
"legendFormat": "Local events being persisted",
"refId": "E",
"editorMode": "code"
"refId": "H"
},
{
"datasource": {
@ -411,9 +393,8 @@
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
"hide": false,
"instant": false,
"legendFormat": "All events being persisted",
"refId": "I",
"editorMode": "code"
"legendFormat": "Events",
"refId": "E"
}
],
"thresholds": [
@ -447,9 +428,7 @@
"xaxis": {
"mode": "time",
"show": true,
"values": [],
"name": null,
"buckets": null
"values": []
},
"yaxes": [
{
@ -471,20 +450,7 @@
],
"yaxis": {
"align": false
},
"bars": false,
"dashes": false,
"description": "",
"fill": 0,
"fillGradient": 0,
"hiddenSeries": false,
"linewidth": 0,
"percentage": false,
"points": false,
"stack": false,
"steppedLine": false,
"timeFrom": null,
"timeShift": null
}
},
{
"aliasColors": {},

24
debian/changelog vendored
View File

@ -1,27 +1,3 @@
matrix-synapse-py3 (1.133.0~rc1) stable; urgency=medium
* New Synapse release 1.133.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 24 Jun 2025 11:57:47 +0100
matrix-synapse-py3 (1.132.0) stable; urgency=medium
* New Synapse release 1.132.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jun 2025 13:16:20 +0100
matrix-synapse-py3 (1.132.0~rc1) stable; urgency=medium
* New Synapse release 1.132.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Jun 2025 11:15:18 +0100
matrix-synapse-py3 (1.131.0) stable; urgency=medium
* New Synapse release 1.131.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Jun 2025 14:36:55 +0100
matrix-synapse-py3 (1.131.0~rc1) stable; urgency=medium
* New synapse release 1.131.0rc1.

View File

@ -127,8 +127,6 @@ experimental_features:
msc3983_appservice_otk_claims: true
# Proxy key queries to exclusive ASes
msc3984_appservice_key_query: true
# Invite filtering
msc4155_enabled: true
server_notices:
system_mxid_localpart: _server

View File

@ -63,18 +63,6 @@ mdbook serve
The URL at which the docs can be viewed at will be logged.
## Synapse configuration documentation
The [Configuration
Manual](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html)
page is generated from a YAML file,
[schema/synapse-config.schema.yaml](../schema/synapse-config.schema.yaml). To
add new options or modify existing ones, first edit that file, then run
[scripts-dev/gen_config_documentation.py](../scripts-dev/gen_config_documentation.py)
to generate an updated Configuration Manual markdown file.
Build the book as described above to preview it in a web browser.
## Configuration and theming
The look and behaviour of the website is configured by the [book.toml](../book.toml) file

View File

@ -49,8 +49,6 @@
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
- [Account data callbacks](modules/account_data_callbacks.md)
- [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md)
- [Media repository callbacks](modules/media_repository_callbacks.md)
- [Ratelimit callbacks](modules/ratelimit_callbacks.md)
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)
@ -68,7 +66,6 @@
- [Registration Tokens](usage/administration/admin_api/registration_tokens.md)
- [Manipulate Room Membership](admin_api/room_membership.md)
- [Rooms](admin_api/rooms.md)
- [Scheduled tasks](admin_api/scheduled_tasks.md)
- [Server Notices](admin_api/server_notices.md)
- [Statistics](admin_api/statistics.md)
- [Users](admin_api/user_admin_api.md)

View File

@ -163,8 +163,7 @@ Body parameters:
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
- `user_type` - **string** or null, optional. If not provided, the user type will be
not be changed. If `null` is given, the user type will be cleared.
Other allowed options are: `bot` and `support` and any extra values defined in the homserver
[configuration](../usage/configuration/config_documentation.md#user_types).
Other allowed options are: `bot` and `support`.
## List Accounts
### List Accounts (V2)
@ -955,8 +954,7 @@ A response body like the following is returned:
"last_seen_ip": "1.2.3.4",
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775024,
"user_id": "<user_id>",
"dehydrated": false
"user_id": "<user_id>"
},
{
"device_id": "AUIECTSRND",
@ -964,8 +962,7 @@ A response body like the following is returned:
"last_seen_ip": "1.2.3.5",
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
"last_seen_ts": 1474491775025,
"user_id": "<user_id>",
"dehydrated": false
"user_id": "<user_id>"
}
],
"total": 2
@ -995,7 +992,6 @@ The following fields are returned in the JSON response body:
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- `user_id` - Owner of device.
- `dehydrated` - Whether the device is a dehydrated device.
- `total` - Total number of user's devices.

View File

@ -1,66 +0,0 @@
# Media repository callbacks
Media repository callbacks allow module developers to customise the behaviour of the
media repository on a per user basis. Media repository callbacks can be registered
using the module API's `register_media_repository_callbacks` method.
The available media repository callbacks are:
### `get_media_config_for_user`
_First introduced in Synapse v1.132.0_
```python
async def get_media_config_for_user(user_id: str) -> Optional[JsonDict]
```
**<span style="color:red">
Caution: This callback is currently experimental . The method signature or behaviour
may change without notice.
</span>**
Called when processing a request from a client for the
[media config endpoint](https://spec.matrix.org/latest/client-server-api/#get_matrixclientv1mediaconfig).
The arguments passed to this callback are:
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
If the callback returns a dictionary then it will be used as the body of the response to the
client.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `None`, Synapse falls through to the next one. The value of the first
callback that does not return `None` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
If no module returns a non-`None` value then the default media config will be returned.
### `is_user_allowed_to_upload_media_of_size`
_First introduced in Synapse v1.132.0_
```python
async def is_user_allowed_to_upload_media_of_size(user_id: str, size: int) -> bool
```
**<span style="color:red">
Caution: This callback is currently experimental . The method signature or behaviour
may change without notice.
</span>**
Called before media is accepted for upload from a user, in case the module needs to
enforce a different limit for the particular user.
The arguments passed to this callback are:
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
* `size`: The size in bytes of media that is being requested to upload.
If the module returns `False`, the current request will be denied with the error code
`M_TOO_LARGE` and the HTTP status code 413.
If multiple modules implement this callback, they will be considered in order. If a callback
returns `True`, Synapse falls through to the next one. The value of the first callback that
returns `False` will be used. If this happens, Synapse will not call any of the subsequent
implementations of this callback.

View File

@ -1,43 +0,0 @@
# Ratelimit callbacks
Ratelimit callbacks allow module developers to override ratelimit settings dynamically whilst
Synapse is running. Ratelimit callbacks can be registered using the module API's
`register_ratelimit_callbacks` method.
The available ratelimit callbacks are:
### `get_ratelimit_override_for_user`
_First introduced in Synapse v1.132.0_
```python
async def get_ratelimit_override_for_user(user: str, limiter_name: str) -> Optional[synapse.module_api.RatelimitOverride]
```
**<span style="color:red">
Caution: This callback is currently experimental . The method signature or behaviour
may change without notice.
</span>**
Called when constructing a ratelimiter of a particular type for a user. The module can
return a `messages_per_second` and `burst_count` to be used, or `None` if
the default settings are adequate. The user is represented by their Matrix user ID
(e.g. `@alice:example.com`). The limiter name is usually taken from the `RatelimitSettings` key
value.
The limiters that are currently supported are:
- `rc_invites.per_room`
- `rc_invites.per_user`
- `rc_invites.per_issuer`
The `RatelimitOverride` return type has the following fields:
- `per_second: float`. The number of actions that can be performed in a second. `0.0` means that ratelimiting is disabled.
- `burst_count: int`. The number of actions that can be performed before being limited.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `None`, Synapse falls through to the next one. The value of the first
callback that does not return `None` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback. If no module returns a non-`None` value
then the default settings will be used.

View File

@ -80,8 +80,6 @@ Called when processing an invitation, both when one is created locally or when
receiving an invite over federation. Both inviter and invitee are represented by
their Matrix user ID (e.g. `@alice:example.com`).
Note that federated invites will call `federated_user_may_invite` before this callback.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
@ -99,34 +97,6 @@ be used. If this happens, Synapse will not call any of the subsequent implementa
this callback.
### `federated_user_may_invite`
_First introduced in Synapse v1.133.0_
```python
async def federated_user_may_invite(event: "synapse.events.EventBase") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```
Called when processing an invitation received over federation. Unlike `user_may_invite`,
this callback receives the entire event, including any stripped state in the `unsigned`
section, not just the room and user IDs.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
If all of the callbacks return `synapse.module_api.NOT_SPAM`, Synapse will also fall
through to the `user_may_invite` callback before approving the invite.
### `user_may_send_3pid_invite`
_First introduced in Synapse v1.45.0_
@ -189,19 +159,12 @@ _First introduced in Synapse v1.37.0_
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
_Changed in Synapse v1.132.0: Added the `room_config` argument. Callbacks that only expect a single `user_id` argument are still supported._
```python
async def user_may_create_room(user_id: str, room_config: synapse.module_api.JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```
Called when processing a room creation request.
The arguments passed to this callback are:
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`).
* `room_config`: The contents of the body of a [/createRoom request](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) as a dictionary.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
@ -276,41 +239,6 @@ be used. If this happens, Synapse will not call any of the subsequent implementa
this callback.
### `user_may_send_state_event`
_First introduced in Synapse v1.132.0_
```python
async def user_may_send_state_event(user_id: str, room_id: str, event_type: str, state_key: str, content: JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
```
**<span style="color:red">
Caution: This callback is currently experimental . The method signature or behaviour
may change without notice.
</span>**
Called when processing a request to [send state events](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey) to a room.
The arguments passed to this callback are:
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) sending the state event.
* `room_id`: The ID of the room that the requested state event is being sent to.
* `event_type`: The requested type of event.
* `state_key`: The requested state key.
* `content`: The requested event contents.
The callback must return one of:
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
decide to reject it.
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
If multiple modules implement this callback, they will be considered in order. If a
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
be used. If this happens, Synapse will not call any of the subsequent implementations of
this callback.
### `check_username_for_spam`

View File

@ -5,10 +5,10 @@ It is recommended to put a reverse proxy such as
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
[HAProxy](https://www.haproxy.org/) or
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse.
This has the advantage of being able to expose the default HTTPS port (443) to Matrix
clients without requiring Synapse to bind to a privileged port (port numbers less than
1024), avoiding the need for `CAP_NET_BIND_SERVICE` or running as root.
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
of doing so is that it means that you can expose the default https port
(443) to Matrix clients without needing to run Synapse with root
privileges.
You should configure your reverse proxy to forward requests to `/_matrix` or
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and

View File

@ -63,7 +63,7 @@ class ExampleSpamChecker:
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
return True # allow all invites
async def user_may_create_room(self, userid, room_config):
async def user_may_create_room(self, userid):
return True # allow all room creations
async def user_may_create_room_alias(self, userid, room_alias):

View File

@ -255,7 +255,7 @@ line to `/etc/default/matrix-synapse`:
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
This made a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.

File diff suppressed because it is too large Load Diff

129
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
files = [
{file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"},
{file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"},
@ -50,18 +50,19 @@ cryptography = "*"
[[package]]
name = "automat"
version = "25.4.16"
version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
optional = false
python-versions = ">=3.9"
python-versions = "*"
groups = ["main"]
files = [
{file = "automat-25.4.16-py3-none-any.whl", hash = "sha256:04e9bce696a8d5671ee698005af6e5a9fa15354140a87f4870744604dcdd3ba1"},
{file = "automat-25.4.16.tar.gz", hash = "sha256:0017591a5477066e90d26b0e696ddc143baafd87b588cfac8100bc6be9634de0"},
{file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
{file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
]
[package.dependencies]
typing_extensions = {version = "*", markers = "python_version < \"3.10\""}
attrs = ">=19.2.0"
six = "*"
[package.extras]
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
@ -450,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
@ -493,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
@ -543,7 +544,7 @@ description = "Python wrapper for hiredis"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
@ -889,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
]
@ -1027,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
@ -1043,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"url-preview\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"url-preview\""
files = [
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"},
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"},
@ -1323,7 +1324,7 @@ description = "An LDAP3 auth provider for Synapse"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
files = [
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
@ -1544,7 +1545,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
@ -1713,7 +1714,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"postgres\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"postgres\""
files = [
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
@ -1721,6 +1722,7 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@ -1733,7 +1735,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
]
@ -1749,7 +1751,7 @@ description = "A Simple library to enable psycopg2 compatability"
optional = true
python-versions = "*"
groups = ["main"]
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
files = [
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
]
@ -1771,18 +1773,18 @@ files = [
[[package]]
name = "pyasn1-modules"
version = "0.4.2"
version = "0.4.1"
description = "A collection of ASN.1-based protocols modules"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"},
{file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"},
{file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
]
[package.dependencies]
pyasn1 = ">=0.6.1,<0.7.0"
pyasn1 = ">=0.4.6,<0.7.0"
[[package]]
name = "pycparser"
@ -1972,7 +1974,7 @@ description = "Python extension wrapping the ICU C++ API"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"user-search\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"user-search\""
files = [
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
]
@ -2021,7 +2023,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"cache-memory\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"cache-memory\""
files = [
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
@ -2081,7 +2083,7 @@ description = "Python implementation of SAML Version 2 Standard"
optional = true
python-versions = ">=3.9,<4.0"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
@ -2106,7 +2108,7 @@ description = "Extensions to the standard Python datetime module"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
@ -2134,7 +2136,7 @@ description = "World timezone definitions, modern and historical"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
@ -2254,19 +2256,19 @@ rpds-py = ">=0.7.0"
[[package]]
name = "requests"
version = "2.32.4"
version = "2.32.2"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"},
{file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"},
{file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"},
{file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset_normalizer = ">=2,<4"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
@ -2498,7 +2500,7 @@ description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
groups = ["main"]
markers = "extra == \"sentry\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"sentry\""
files = [
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
@ -2686,7 +2688,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
@ -2702,7 +2704,7 @@ description = "Python bindings for the Apache Thrift RPC system"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
@ -2764,7 +2766,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"opentracing\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"opentracing\""
files = [
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
@ -2855,19 +2857,19 @@ keyring = ["keyring (>=15.1)"]
[[package]]
name = "twisted"
version = "25.5.0"
version = "24.7.0"
description = "An asynchronous networking framework written in Python"
optional = false
python-versions = ">=3.8.0"
groups = ["main"]
files = [
{file = "twisted-25.5.0-py3-none-any.whl", hash = "sha256:8559f654d01a54a8c3efe66d533d43f383531ebf8d81d9f9ab4769d91ca15df7"},
{file = "twisted-25.5.0.tar.gz", hash = "sha256:1deb272358cb6be1e3e8fc6f9c8b36f78eb0fa7c2233d2dbe11ec6fee04ea316"},
{file = "twisted-24.7.0-py3-none-any.whl", hash = "sha256:734832ef98108136e222b5230075b1079dad8a3fc5637319615619a7725b0c81"},
{file = "twisted-24.7.0.tar.gz", hash = "sha256:5a60147f044187a127ec7da96d170d49bcce50c6fd36f594e60f4587eff4d394"},
]
[package.dependencies]
attrs = ">=22.2.0"
automat = ">=24.8.0"
attrs = ">=21.3.0"
automat = ">=0.8.0"
constantly = ">=15.1"
hyperlink = ">=17.1.1"
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
@ -2878,20 +2880,19 @@ typing-extensions = ">=4.2.0"
zope-interface = ">=5"
[package.extras]
all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "pydoctor (>=24.11.1,<24.12.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
dev-release = ["pydoctor (>=24.11.1,<24.12.0)", "pydoctor (>=24.11.1,<24.12.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
http2 = ["h2 (>=3.2,<5.0)", "priority (>=1.1.0,<2.0)"]
macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (==1.10.1)", "mypy-zope (==1.0.6)", "priority (>=1.1.0,<2.0)", "pydoctor (>=24.11.1,<24.12.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools", "wsproto"]
osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"]
osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\""]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
websocket = ["wsproto"]
windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)", "wsproto", "wsproto"]
windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"]
[[package]]
name = "txredisapi"
@ -2900,7 +2901,7 @@ description = "non-blocking redis client for python"
optional = true
python-versions = "*"
groups = ["main"]
markers = "extra == \"redis\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"redis\""
files = [
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
@ -2993,14 +2994,14 @@ files = [
[[package]]
name = "types-opentracing"
version = "2.4.10.20250622"
version = "2.4.10.6"
description = "Typing stubs for opentracing"
optional = false
python-versions = ">=3.9"
python-versions = "*"
groups = ["dev"]
files = [
{file = "types_opentracing-2.4.10.20250622-py3-none-any.whl", hash = "sha256:26bc21f9e385d54898b47e9bd1fa13f200c2dada50394f6eafd063ed53813062"},
{file = "types_opentracing-2.4.10.20250622.tar.gz", hash = "sha256:00db48b7f57136c45ac3250218bd0f18b9792566dfcbd5ad1de9f7e180347e74"},
{file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"},
{file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"},
]
[[package]]
@ -3057,14 +3058,14 @@ files = [
[[package]]
name = "types-requests"
version = "2.32.4.20250611"
version = "2.32.0.20250328"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"},
{file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"},
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
]
[package.dependencies]
@ -3123,14 +3124,14 @@ files = [
[[package]]
name = "urllib3"
version = "2.5.0"
version = "2.2.2"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main", "dev"]
files = [
{file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
{file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
{file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
{file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
]
[package.extras]
@ -3243,7 +3244,7 @@ description = "An XML Schema validator and decoder"
optional = true
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"saml2\" or extra == \"all\""
markers = "extra == \"all\" or extra == \"saml2\""
files = [
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},

View File

@ -74,10 +74,6 @@ select = [
"PIE",
# flake8-executable
"EXE",
# flake8-logging
"LOG",
# flake8-logging-format
"G",
]
[tool.ruff.lint.isort]
@ -101,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.133.0rc1"
version = "1.131.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"

View File

@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
rust-version = "1.81.0"
rust-version = "1.66.0"
[lib]
name = "synapse"
@ -30,27 +30,19 @@ http = "1.1.0"
lazy_static = "1.4.0"
log = "0.4.17"
mime = "0.3.17"
pyo3 = { version = "0.25.1", features = [
pyo3 = { version = "0.24.2", features = [
"macros",
"anyhow",
"abi3",
"abi3-py39",
] }
pyo3-log = "0.12.4"
pythonize = "0.25.0"
pyo3-log = "0.12.0"
pythonize = "0.24.0"
regex = "1.6.0"
sha2 = "0.10.8"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
ulid = "1.1.2"
reqwest = { version = "0.12.15", default-features = false, features = [
"http2",
"stream",
"rustls-tls-native-roots",
] }
http-body-util = "0.1.3"
futures = "0.3.31"
tokio = { version = "1.44.2", features = ["rt", "rt-multi-thread"] }
[features]
extension-module = ["pyo3/extension-module"]

View File

@ -58,15 +58,3 @@ impl NotFoundError {
NotFoundError::new_err(())
}
}
import_exception!(synapse.api.errors, HttpResponseException);
impl HttpResponseException {
pub fn new(status: StatusCode, bytes: Vec<u8>) -> pyo3::PyErr {
HttpResponseException::new_err((
status.as_u16(),
status.canonical_reason().unwrap_or_default(),
bytes,
))
}
}

View File

@ -1,218 +0,0 @@
/*
* This file is licensed under the Affero General Public License (AGPL) version 3.
*
* Copyright (C) 2025 New Vector, Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* See the GNU Affero General Public License for more details:
* <https://www.gnu.org/licenses/agpl-3.0.html>.
*/
use std::{collections::HashMap, future::Future, panic::AssertUnwindSafe, sync::LazyLock};
use anyhow::Context;
use futures::{FutureExt, TryStreamExt};
use pyo3::{exceptions::PyException, prelude::*, types::PyString};
use reqwest::RequestBuilder;
use tokio::runtime::Runtime;
use crate::errors::HttpResponseException;
/// The tokio runtime that we're using to run async Rust libs.
static RUNTIME: LazyLock<Runtime> = LazyLock::new(|| {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_all()
.build()
.unwrap()
});
/// A reference to the `Deferred` python class.
static DEFERRED_CLASS: LazyLock<PyObject> = LazyLock::new(|| {
Python::with_gil(|py| {
py.import("twisted.internet.defer")
.expect("module 'twisted.internet.defer' should be importable")
.getattr("Deferred")
.expect("module 'twisted.internet.defer' should have a 'Deferred' class")
.unbind()
})
});
/// A reference to the twisted `reactor`.
static TWISTED_REACTOR: LazyLock<Py<PyModule>> = LazyLock::new(|| {
Python::with_gil(|py| {
py.import("twisted.internet.reactor")
.expect("module 'twisted.internet.reactor' should be importable")
.unbind()
})
});
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
let child_module: Bound<'_, PyModule> = PyModule::new(py, "http_client")?;
child_module.add_class::<HttpClient>()?;
// Make sure we fail early if we can't build the lazy statics.
LazyLock::force(&RUNTIME);
LazyLock::force(&DEFERRED_CLASS);
m.add_submodule(&child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import acl` work.
py.import("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.http_client", child_module)?;
Ok(())
}
#[pyclass]
#[derive(Clone)]
struct HttpClient {
client: reqwest::Client,
}
#[pymethods]
impl HttpClient {
#[new]
pub fn py_new(user_agent: &str) -> PyResult<HttpClient> {
// The twisted reactor can only be imported after Synapse has been
// imported, to allow Synapse to change the twisted reactor. If we try
// and import the reactor too early twisted installs a default reactor,
// which can't be replaced.
LazyLock::force(&TWISTED_REACTOR);
Ok(HttpClient {
client: reqwest::Client::builder()
.user_agent(user_agent)
.build()
.context("building reqwest client")?,
})
}
pub fn get<'a>(
&self,
py: Python<'a>,
url: String,
response_limit: usize,
) -> PyResult<Bound<'a, PyAny>> {
self.send_request(py, self.client.get(url), response_limit)
}
pub fn post<'a>(
&self,
py: Python<'a>,
url: String,
response_limit: usize,
headers: HashMap<String, String>,
request_body: String,
) -> PyResult<Bound<'a, PyAny>> {
let mut builder = self.client.post(url);
for (name, value) in headers {
builder = builder.header(name, value);
}
builder = builder.body(request_body);
self.send_request(py, builder, response_limit)
}
}
impl HttpClient {
fn send_request<'a>(
&self,
py: Python<'a>,
builder: RequestBuilder,
response_limit: usize,
) -> PyResult<Bound<'a, PyAny>> {
create_deferred(py, async move {
let response = builder.send().await.context("sending request")?;
let status = response.status();
let mut stream = response.bytes_stream();
let mut buffer = Vec::new();
while let Some(chunk) = stream.try_next().await.context("reading body")? {
if buffer.len() + chunk.len() > response_limit {
Err(anyhow::anyhow!("Response size too large"))?;
}
buffer.extend_from_slice(&chunk);
}
if !status.is_success() {
return Err(HttpResponseException::new(status, buffer));
}
let r = Python::with_gil(|py| buffer.into_pyobject(py).map(|o| o.unbind()))?;
Ok(r)
})
}
}
/// Creates a twisted deferred from the given future, spawning the task on the
/// tokio runtime.
///
/// Does not handle deferred cancellation or contextvars.
fn create_deferred<F, O>(py: Python, fut: F) -> PyResult<Bound<'_, PyAny>>
where
F: Future<Output = PyResult<O>> + Send + 'static,
for<'a> O: IntoPyObject<'a>,
{
let deferred = DEFERRED_CLASS.bind(py).call0()?;
let deferred_callback = deferred.getattr("callback")?.unbind();
let deferred_errback = deferred.getattr("errback")?.unbind();
RUNTIME.spawn(async move {
// TODO: Is it safe to assert unwind safety here? I think so, as we
// don't use anything that could be tainted by the panic afterwards.
// Note that `.spawn(..)` asserts unwind safety on the future too.
let res = AssertUnwindSafe(fut).catch_unwind().await;
Python::with_gil(move |py| {
// Flatten the panic into standard python error
let res = match res {
Ok(r) => r,
Err(panic_err) => {
let panic_message = get_panic_message(&panic_err);
Err(PyException::new_err(
PyString::new(py, panic_message).unbind(),
))
}
};
// Send the result to the deferred, via `.callback(..)` or `.errback(..)`
match res {
Ok(obj) => {
TWISTED_REACTOR
.call_method(py, "callFromThread", (deferred_callback, obj), None)
.expect("callFromThread should not fail"); // There's nothing we can really do with errors here
}
Err(err) => {
TWISTED_REACTOR
.call_method(py, "callFromThread", (deferred_errback, err), None)
.expect("callFromThread should not fail"); // There's nothing we can really do with errors here
}
}
});
});
Ok(deferred)
}
/// Try and get the panic message out of the panic
fn get_panic_message<'a>(panic_err: &'a (dyn std::any::Any + Send + 'static)) -> &'a str {
// Apparently this is how you extract the panic message from a panic
if let Some(str_slice) = panic_err.downcast_ref::<&str>() {
str_slice
} else if let Some(string) = panic_err.downcast_ref::<String>() {
string
} else {
"unknown error"
}
}

View File

@ -27,7 +27,7 @@ pub enum IdentifierError {
impl fmt::Display for IdentifierError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
write!(f, "{:?}", self)
}
}

View File

@ -8,7 +8,6 @@ pub mod acl;
pub mod errors;
pub mod events;
pub mod http;
pub mod http_client;
pub mod identifier;
pub mod matrix_const;
pub mod push;
@ -51,7 +50,6 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
acl::register_module(py, m)?;
push::register_module(py, m)?;
events::register_module(py, m)?;
http_client::register_module(py, m)?;
rendezvous::register_module(py, m)?;
Ok(())

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +0,0 @@
If you want to update the meta schema, copy this folder and increase its version
number instead.

View File

@ -1,29 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json",
"$vocabulary": {
"https://json-schema.org/draft/2020-12/vocab/core": true,
"https://json-schema.org/draft/2020-12/vocab/applicator": true,
"https://json-schema.org/draft/2020-12/vocab/unevaluated": true,
"https://json-schema.org/draft/2020-12/vocab/validation": true,
"https://json-schema.org/draft/2020-12/vocab/meta-data": true,
"https://json-schema.org/draft/2020-12/vocab/format-annotation": true,
"https://json-schema.org/draft/2020-12/vocab/content": true,
"https://element-hq.github.io/synapse/latest/schema/v1/vocab/documentation": false
},
"$ref": "https://json-schema.org/draft/2020-12/schema",
"properties": {
"io.element.type_name": {
"type": "string",
"description": "Human-readable type of a schema that is displayed instead of the standard JSON Schema types like `object` or `integer`. In case the JSON Schema type contains `null`, this information should be presented alongside the human-readable type name.",
"examples": ["duration", "byte size"]
},
"io.element.post_description": {
"type": "string",
"description": "Additional description of a schema, better suited to be placed less prominently in the generated documentation, e.g., at the end of a section after listings of items and properties.",
"examples": [
"### Advanced uses\n\nThe spent coffee grounds can be added to compost for improving soil and growing plants."
]
}
}
}

View File

@ -1,11 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="refresh" content="0; URL=../meta.schema.json">
<meta charset="UTF-8">
<title>Redirecting to ../meta.schema.json…</title>
</head>
<body>
<p>Redirecting to <a href="../meta.schema.json">../meta.schema.json</a></p>
</body>
</html>

View File

@ -243,7 +243,7 @@ def do_lint() -> Set[str]:
importlib.import_module(module_info.name)
except ModelCheckerException as e:
logger.warning(
"Bad annotation found when importing %s", module_info.name
f"Bad annotation found when importing {module_info.name}"
)
failures.add(format_model_checker_exception(e))

View File

@ -229,7 +229,6 @@ test_packages=(
./tests/msc3902
./tests/msc3967
./tests/msc4140
./tests/msc4155
)
# Enable dirty runs, so tests will reuse the same container where possible.

View File

@ -1,503 +0,0 @@
#!/usr/bin/env python3
"""Generate Synapse documentation from JSON Schema file."""
import json
import re
import sys
from typing import Any, Optional
import yaml
HEADER = """<!-- Document auto-generated by scripts-dev/gen_config_documentation.py -->
# Configuring Synapse
This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified
through the many configuration settings documented here each config option is explained,
including what the default is, how to change the default and what sort of behaviour the setting governs.
Also included is an example configuration for each setting. If you don't want to spend a lot of time
thinking about options, the config as generated sets sensible defaults for all values. Do note however that the
database defaults to SQLite, which is not recommended for production usage. You can read more on this subject
[here](../../setup/installation.md#using-postgresql).
## Config Conventions
Configuration options that take a time period can be set using a number
followed by a letter. Letters have the following meanings:
* `s` = second
* `m` = minute
* `h` = hour
* `d` = day
* `w` = week
* `y` = year
For example, setting `redaction_retention_period: 5m` would remove redacted
messages from the database after 5 minutes, rather than 5 months.
In addition, configuration options referring to size use the following suffixes:
* `K` = KiB, or 1024 bytes
* `M` = MiB, or 1,048,576 bytes
* `G` = GiB, or 1,073,741,824 bytes
* `T` = TiB, or 1,099,511,627,776 bytes
For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes
for a user avatar.
## Config Validation
The configuration file can be validated with the following command:
```bash
python -m synapse.config read <config key to print> -c <path to config>
```
To validate the entire file, omit `read <config key to print>`:
```bash
python -m synapse.config -c <path to config>
```
To see how to set other options, check the help reference:
```bash
python -m synapse.config --help
```
### YAML
The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules
apply if you want your config file to be read properly. A few helpful things to know:
* `#` before any option in the config will comment out that setting and either a default (if available) will
be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and
applied, but in example #2 the setting will not be read and a default will be applied.
Example #1:
```yaml
pid_file: DATADIR/homeserver.pid
```
Example #2:
```yaml
#pid_file: DATADIR/homeserver.pid
```
* Indentation matters! The indentation before a setting
will determine whether a given setting is read as part of another
setting, or considered on its own. Thus, in example #1, the `enabled` setting
is read as a sub-option of the `presence` setting, and will be properly applied.
However, the lack of indentation before the `enabled` setting in example #2 means
that when reading the config, Synapse will consider both `presence` and `enabled` as
different settings. In this case, `presence` has no value, and thus a default applied, and `enabled`
is an option that Synapse doesn't recognize and thus ignores.
Example #1:
```yaml
presence:
enabled: false
```
Example #2:
```yaml
presence:
enabled: false
```
In this manual, all top-level settings (ones with no indentation) are identified
at the beginning of their section (i.e. "### `example_setting`") and
the sub-options, if any, are identified and listed in the body of the section.
In addition, each setting has an example of its usage, with the proper indentation
shown.
"""
SECTION_HEADERS = {
"modules": {
"title": "Modules",
"description": (
"Server admins can expand Synapse's functionality with external "
"modules.\n\n"
"See [here](../../modules/index.md) for more documentation on how "
"to configure or create custom modules for Synapse."
),
},
"server_name": {
"title": "Server",
"description": "Define your homeserver name and other base options.",
},
"admin_contact": {
"title": "Homeserver blocking",
"description": "Useful options for Synapse admins.",
},
"tls_certificate_path": {
"title": "TLS",
"description": "Options related to TLS.",
},
"federation_domain_whitelist": {
"title": "Federation",
"description": "Options related to federation.",
},
"event_cache_size": {
"title": "Caching",
"description": "Options related to caching.",
},
"database": {
"title": "Database",
"description": "Config options related to database settings.",
},
"log_config": {
"title": "Logging",
"description": ("Config options related to logging."),
},
"rc_message": {
"title": "Ratelimiting",
"description": (
"Options related to ratelimiting in Synapse.\n\n"
"Each ratelimiting configuration is made of two parameters:\n"
"- `per_second`: number of requests a client can send per second.\n"
"- `burst_count`: number of requests a client can send before "
"being throttled."
),
},
"enable_authenticated_media": {
"title": "Media Store",
"description": "Config options related to Synapse's media store.",
},
"recaptcha_public_key": {
"title": "Captcha",
"description": (
"See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha."
),
},
"turn_uris": {
"title": "TURN",
"description": ("Options related to adding a TURN server to Synapse."),
},
"enable_registration": {
"title": "Registration",
"description": (
"Registration can be rate-limited using the parameters in the "
"[Ratelimiting](#ratelimiting) section of this manual."
),
},
"session_lifetime": {
"title": "User session management",
"description": ("Config options related to user session management."),
},
"enable_metrics": {
"title": "Metrics",
"description": ("Config options related to metrics."),
},
"room_prejoin_state": {
"title": "API Configuration",
"description": ("Config settings related to the client/server API."),
},
"signing_key_path": {
"title": "Signing Keys",
"description": ("Config options relating to signing keys."),
},
"saml2_config": {
"title": "Single sign-on integration",
"description": (
"The following settings can be used to make Synapse use a single sign-on provider for authentication, instead of its internal password database.\n\n"
"You will probably also want to set the following options to `false` to disable the regular login/registration flows:\n"
"* [`enable_registration`](#enable_registration)\n"
"* [`password_config.enabled`](#password_config)"
),
},
"push": {
"title": "Push",
"description": ("Configuration settings related to push notifications."),
},
"encryption_enabled_by_default_for_room_type": {
"title": "Rooms",
"description": ("Config options relating to rooms."),
},
"opentracing": {
"title": "Opentracing",
"description": ("Configuration options related to Opentracing support."),
},
"worker_replication_secret": {
"title": "Coordinating workers",
"description": (
"Configuration options related to workers which belong in the main config file (usually called `homeserver.yaml`). A Synapse deployment can scale horizontally by running multiple Synapse processes called _workers_. Incoming requests are distributed between workers to handle higher loads. Some workers are privileged and can accept requests from other workers.\n\n"
"As a result, the worker configuration is divided into two parts.\n\n"
"1. The first part (in this section of the manual) defines which shardable tasks are delegated to privileged workers. This allows unprivileged workers to make requests to a privileged worker to act on their behalf.\n"
"2. [The second part](#individual-worker-configuration) controls the behaviour of individual workers in isolation.\n\n"
"For guidance on setting up workers, see the [worker documentation](../../workers.md)."
),
},
"worker_app": {
"title": "Individual worker configuration",
"description": (
"These options configure an individual worker, in its worker configuration file. They should be not be provided when configuring the main process.\n\n"
"Note also the configuration above for [coordinating a cluster of workers](#coordinating-workers).\n\n"
"For guidance on setting up workers, see the [worker documentation](../../workers.md)."
),
},
"background_updates": {
"title": "Background Updates",
"description": ("Configuration settings related to background updates."),
},
"auto_accept_invites": {
"title": "Auto Accept Invites",
"description": (
"Configuration settings related to automatically accepting invites."
),
},
}
INDENT = " "
has_error = False
def error(text: str) -> None:
global has_error
print(f"ERROR: {text}", file=sys.stderr)
has_error = True
def indent(text: str, first_line: bool = True) -> str:
"""Indents each non-empty line of the given text."""
text = re.sub(r"(\n)([^\n])", r"\1" + INDENT + r"\2", text)
if first_line:
text = re.sub(r"^([^\n])", INDENT + r"\1", text)
return text
def em(s: Optional[str]) -> str:
"""Add emphasis to text."""
return f"*{s}*" if s else ""
def a(s: Optional[str], suffix: str = " ") -> str:
"""Appends a space if the given string is not empty."""
return s + suffix if s else ""
def p(s: Optional[str], prefix: str = " ") -> str:
"""Prepend a space if the given string is not empty."""
return prefix + s if s else ""
def resolve_local_refs(schema: dict) -> dict:
"""Returns the given schema with local $ref properties replaced by their keywords.
Crude approximation that will override keywords.
"""
defs = schema["$defs"]
def replace_ref(d: Any) -> Any:
if isinstance(d, dict):
the_def = {}
if "$ref" in d:
# Found a "$ref" key.
def_name = d["$ref"].removeprefix("#/$defs/")
del d["$ref"]
the_def = defs[def_name]
new_dict = {k: replace_ref(v) for k, v in d.items()}
if common_keys := (new_dict.keys() & the_def.keys()) - {"properties"}:
print(
f"WARN: '{def_name}' overrides keys '{common_keys}'",
file=sys.stderr,
)
new_dict_props = new_dict.get("properties", {})
the_def_props = the_def.get("properties", {})
if common_props := new_dict_props.keys() & the_def_props.keys():
print(
f"WARN: '{def_name}' overrides properties '{common_props}'",
file=sys.stderr,
)
if merged_props := {**new_dict_props, **the_def_props}:
return {**new_dict, **the_def, "properties": merged_props}
else:
return {**new_dict, **the_def}
elif isinstance(d, list):
return [replace_ref(v) for v in d]
else:
return d
return replace_ref(schema)
def sep(values: dict) -> str:
"""Separator between parts of the description."""
# If description is multiple paragraphs already, add new ones. Otherwise
# append to same paragraph.
return "\n\n" if "\n\n" in values.get("description", "") else " "
def type_str(values: dict) -> str:
"""Type of the current value."""
if t := values.get("io.element.type_name"):
# Allow custom overrides for the type name, for documentation clarity
return f"({t})"
if not (t := values.get("type")):
return ""
if not isinstance(t, list):
t = [t]
joined = "|".join(t)
return f"({joined})"
def items(values: dict) -> str:
"""A block listing properties of array items."""
if not (items := values.get("items")):
return ""
if not (item_props := items.get("properties")):
return ""
return "\nOptions for each entry include:\n\n" + "\n".join(
sub_section(k, v) for k, v in item_props.items()
)
def properties(values: dict) -> str:
"""A block listing object properties."""
if not (properties := values.get("properties")):
return ""
return "\nThis setting has the following sub-options:\n\n" + "\n".join(
sub_section(k, v) for k, v in properties.items()
)
def sub_section(prop: str, values: dict) -> str:
"""Formats a bullet point about the given sub-property."""
sep = lambda: globals()["sep"](values)
type_str = lambda: globals()["type_str"](values)
items = lambda: globals()["items"](values)
properties = lambda: globals()["properties"](values)
def default() -> str:
try:
default = values["default"]
return f"Defaults to `{json.dumps(default)}`."
except KeyError:
return ""
def description() -> str:
if not (description := values.get("description")):
error(f"missing description for {prop}")
return "MISSING DESCRIPTION\n"
return f"{description}{p(default(), sep())}\n"
return (
f"* `{prop}`{p(type_str())}: "
+ f"{indent(description(), first_line=False)}"
+ indent(items())
+ indent(properties())
)
def section(prop: str, values: dict) -> str:
"""Formats a section about the given property."""
sep = lambda: globals()["sep"](values)
type_str = lambda: globals()["type_str"](values)
items = lambda: globals()["items"](values)
properties = lambda: globals()["properties"](values)
def is_simple_default() -> bool:
"""Whether the given default is simple enough for a one-liner."""
if not (d := values.get("default")):
return True
return not isinstance(d, dict) and not isinstance(d, list)
def default_str() -> str:
try:
default = values["default"]
except KeyError:
t = values.get("type", [])
if "object" == t or "object" in t:
# Skip objects as they probably have child defaults.
return ""
return "There is no default for this option."
if not is_simple_default():
# Show complex defaults as a code block instead.
return ""
return f"Defaults to `{json.dumps(default)}`."
def header() -> str:
try:
title = SECTION_HEADERS[prop]["title"]
description = SECTION_HEADERS[prop]["description"]
return f"## {title}\n\n{description}\n\n---\n"
except KeyError:
return ""
def title() -> str:
return f"### `{prop}`\n"
def description() -> str:
if not (description := values.get("description")):
error(f"missing description for {prop}")
return "MISSING DESCRIPTION\n"
return f"\n{a(em(type_str()))}{description}{p(default_str(), sep())}\n"
def example_str(example: Any) -> str:
return "```yaml\n" + f"{yaml.dump({prop: example}, sort_keys=False)}" + "```\n"
def default_example() -> str:
if is_simple_default():
return ""
default_cfg = example_str(values["default"])
return f"\nDefault configuration:\n{default_cfg}"
def examples() -> str:
if not (examples := values.get("examples")):
return ""
examples_str = "\n".join(example_str(e) for e in examples)
if len(examples) >= 2:
return f"\nExample configurations:\n{examples_str}"
else:
return f"\nExample configuration:\n{examples_str}"
def post_description() -> str:
# Sometimes it's helpful to have a description after the list of fields,
# e.g. with a subsection that consists only of text.
# This helps with that.
if not (description := values.get("io.element.post_description")):
return ""
return f"\n{description}\n\n"
return (
"---\n"
+ header()
+ title()
+ description()
+ items()
+ properties()
+ default_example()
+ examples()
+ post_description()
)
def main() -> None:
def usage(err_msg: str) -> int:
script_name = (sys.argv[:1] or ["__main__.py"])[0]
print(err_msg, file=sys.stderr)
print(f"Usage: {script_name} <JSON Schema file>", file=sys.stderr)
print(f"\n{__doc__}", file=sys.stderr)
exit(1)
def read_json_file_arg() -> Any:
if len(sys.argv) > 2:
exit(usage("Too many arguments."))
if not (filepath := (sys.argv[1:] or [""])[0]):
exit(usage("No schema file provided."))
with open(filepath) as f:
return yaml.safe_load(f)
schema = read_json_file_arg()
schema = resolve_local_refs(schema)
sections = (section(k, v) for k, v in schema["properties"].items())
print(HEADER + "".join(sections), end="")
if has_error:
print("There were errors.", file=sys.stderr)
exit(2)
if __name__ == "__main__":
main()

View File

@ -139,6 +139,3 @@ cargo-fmt
# Ensure type hints are correct.
mypy
# Generate configuration documentation from the JSON Schema
./scripts-dev/gen_config_documentation.py schema/synapse-config.schema.yaml > docs/usage/configuration/config_documentation.md

View File

@ -254,12 +254,6 @@ def _prepare() -> None:
# Update the version specified in pyproject.toml.
subprocess.check_output(["poetry", "version", new_version])
# Update config schema $id.
schema_file = "schema/synapse-config.schema.yaml"
major_minor_version = ".".join(new_version.split(".")[:2])
url = f"https://element-hq.github.io/synapse/schema/synapse/v{major_minor_version}/synapse-config.schema.json"
subprocess.check_output(["sed", "-i", f"0,/^\\$id: .*/s||$id: {url}|", schema_file])
# Generate changelogs.
generate_and_write_changelog(synapse_repo, current_version, new_version)

View File

@ -37,9 +37,7 @@ from synapse.appservice import ApplicationService
from synapse.http import get_request_user_agent
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace
from synapse.state import CREATE_KEY, POWER_KEY
from synapse.types import Requester, create_requester
from synapse.types.state import StateFilter
from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
@ -218,20 +216,18 @@ class BaseAuth:
# by checking if they would (theoretically) be able to change the
# m.room.canonical_alias events
auth_events = await self._storage_controllers.state.get_current_state(
room_id,
StateFilter.from_types(
[
POWER_KEY,
CREATE_KEY,
]
),
power_level_event = (
await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.PowerLevels, ""
)
)
auth_events = {}
if power_level_event:
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
send_level = event_auth.get_send_level(
EventTypes.CanonicalAlias,
"",
auth_events.get(POWER_KEY),
EventTypes.CanonicalAlias, "", power_level_event
)
user_level = event_auth.get_user_power_level(
requester.user.to_string(), auth_events

View File

@ -30,6 +30,9 @@ from authlib.oauth2.rfc7662 import IntrospectionToken
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
from prometheus_client import Histogram
from twisted.web.client import readBody
from twisted.web.http_headers import Headers
from synapse.api.auth.base import BaseAuth
from synapse.api.errors import (
AuthError,
@ -40,14 +43,8 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import (
active_span,
force_tracing,
inject_request_headers,
start_active_span,
)
from synapse.synapse_rust.http_client import HttpClient
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
from synapse.types import Requester, UserID, create_requester
from synapse.util import json_decoder
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
@ -182,10 +179,6 @@ class MSC3861DelegatedAuth(BaseAuth):
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
self._rust_http_client = HttpClient(
user_agent=self._http_client.user_agent.decode("utf8")
)
# # Token Introspection Cache
# This remembers what users/devices are represented by which access tokens,
# in order to reduce overall system load:
@ -308,6 +301,7 @@ class MSC3861DelegatedAuth(BaseAuth):
introspection_endpoint = await self._introspection_endpoint()
raw_headers: Dict[str, str] = {
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": str(self._http_client.user_agent, "utf-8"),
"Accept": "application/json",
# Tell MAS that we support reading the device ID as an explicit
# value, not encoded in the scope. This is supported by MAS 0.15+
@ -321,34 +315,38 @@ class MSC3861DelegatedAuth(BaseAuth):
uri, raw_headers, body = self._client_auth.prepare(
method="POST", uri=introspection_endpoint, headers=raw_headers, body=body
)
headers = Headers({k: [v] for (k, v) in raw_headers.items()})
# Do the actual request
# We're not using the SimpleHttpClient util methods as we don't want to
# check the HTTP status code, and we do the body encoding ourselves.
logger.debug("Fetching token from MAS")
start_time = self._clock.time()
try:
with start_active_span("mas-introspect-token"):
inject_request_headers(raw_headers)
with PreserveLoggingContext():
resp_body = await self._rust_http_client.post(
url=uri,
response_limit=1 * 1024 * 1024,
headers=raw_headers,
request_body=body,
)
except HttpResponseException as e:
end_time = self._clock.time()
introspection_response_timer.labels(e.code).observe(end_time - start_time)
raise
response = await self._http_client.request(
method="POST",
uri=uri,
data=body.encode("utf-8"),
headers=headers,
)
resp_body = await make_deferred_yieldable(readBody(response))
except Exception:
end_time = self._clock.time()
introspection_response_timer.labels("ERR").observe(end_time - start_time)
raise
logger.debug("Fetched token from MAS")
end_time = self._clock.time()
introspection_response_timer.labels(200).observe(end_time - start_time)
introspection_response_timer.labels(response.code).observe(
end_time - start_time
)
if response.code < 200 or response.code >= 300:
raise HttpResponseException(
response.code,
response.phrase.decode("ascii", errors="replace"),
resp_body,
)
resp = json_decoder.decode(resp_body.decode("utf-8"))
@ -477,7 +475,7 @@ class MSC3861DelegatedAuth(BaseAuth):
# XXX: This is a temporary solution so that the admin API can be called by
# the OIDC provider. This will be removed once we have OIDC client
# credentials grant support in matrix-authentication-service.
logger.info("Admin toked used")
logging.info("Admin toked used")
# XXX: that user doesn't exist and won't be provisioned.
# This is mostly fine for admin calls, but we should also think about doing
# requesters without a user_id.

View File

@ -185,18 +185,12 @@ ServerNoticeLimitReached: Final = "m.server_notice.usage_limit_reached"
class UserTypes:
"""Allows for user type specific behaviour. With the benefit of hindsight
'admin' and 'guest' users should also be UserTypes. Extra user types can be
added in the configuration. Normal users are type None or one of the extra
user types (if configured).
'admin' and 'guest' users should also be UserTypes. Normal users are type None
"""
SUPPORT: Final = "support"
BOT: Final = "bot"
ALL_BUILTIN_USER_TYPES: Final = (SUPPORT, BOT)
"""
The user types that are built-in to Synapse. Extra user types can be
added in the configuration.
"""
ALL_USER_TYPES: Final = (SUPPORT, BOT)
class RelationTypes:
@ -286,10 +280,6 @@ class AccountDataTypes:
IGNORED_USER_LIST: Final = "m.ignored_user_list"
TAG: Final = "m.tag"
PUSH_RULES: Final = "m.push_rules"
# MSC4155: Invite filtering
MSC4155_INVITE_PERMISSION_CONFIG: Final = (
"org.matrix.msc4155.invite_permission_config"
)
class HistoryVisibility:

View File

@ -137,9 +137,6 @@ class Codes(str, Enum):
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
# Part of MSC4155
INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED"
class CodeMessageException(RuntimeError):
"""An exception with integer code, a message string attributes and optional headers.
@ -527,11 +524,7 @@ class InvalidCaptchaError(SynapseError):
class LimitExceededError(SynapseError):
"""A client has sent too many requests and is being throttled.
Args:
pause: Optional time in seconds to pause before responding to the client.
"""
"""A client has sent too many requests and is being throttled."""
def __init__(
self,
@ -539,7 +532,6 @@ class LimitExceededError(SynapseError):
code: int = 429,
retry_after_ms: Optional[int] = None,
errcode: str = Codes.LIMIT_EXCEEDED,
pause: Optional[float] = None,
):
# Use HTTP header Retry-After to enable library-assisted retry handling.
headers = (
@ -550,7 +542,6 @@ class LimitExceededError(SynapseError):
super().__init__(code, "Too Many Requests", errcode, headers=headers)
self.retry_after_ms = retry_after_ms
self.limiter_name = limiter_name
self.pause = pause
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)

View File

@ -20,7 +20,7 @@
#
#
from typing import TYPE_CHECKING, Dict, Hashable, Optional, Tuple
from typing import Dict, Hashable, Optional, Tuple
from synapse.api.errors import LimitExceededError
from synapse.config.ratelimiting import RatelimitSettings
@ -28,12 +28,6 @@ from synapse.storage.databases.main import DataStore
from synapse.types import Requester
from synapse.util import Clock
if TYPE_CHECKING:
# To avoid circular imports:
from synapse.module_api.callbacks.ratelimit_callbacks import (
RatelimitModuleApiCallbacks,
)
class Ratelimiter:
"""
@ -78,14 +72,12 @@ class Ratelimiter:
store: DataStore,
clock: Clock,
cfg: RatelimitSettings,
ratelimit_callbacks: Optional["RatelimitModuleApiCallbacks"] = None,
):
self.clock = clock
self.rate_hz = cfg.per_second
self.burst_count = cfg.burst_count
self.store = store
self._limiter_name = cfg.key
self._ratelimit_callbacks = ratelimit_callbacks
# A dictionary representing the token buckets tracked by this rate
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
@ -173,20 +165,6 @@ class Ratelimiter:
if override and not override.messages_per_second:
return True, -1.0
if requester and self._ratelimit_callbacks:
# Check if the user has a custom rate limit for this specific limiter
# as returned by the module API.
module_override = (
await self._ratelimit_callbacks.get_ratelimit_override_for_user(
requester.user.to_string(),
self._limiter_name,
)
)
if module_override:
rate_hz = module_override.per_second
burst_count = module_override.burst_count
# Override default values if set
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
@ -338,10 +316,12 @@ class Ratelimiter:
)
if not allowed:
if pause:
await self.clock.sleep(pause)
raise LimitExceededError(
limiter_name=self._limiter_name,
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
pause=pause,
)

View File

@ -445,8 +445,8 @@ def listen_http(
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
# encoded as a byte string. Decode as utf-8 so pretty.
logger.info(
"Synapse now listening on Unix Socket at: %s",
ports[0].getHost().name.decode("utf-8"),
"Synapse now listening on Unix Socket at: "
f"{ports[0].getHost().name.decode('utf-8')}"
)
return ports

View File

@ -28,13 +28,15 @@ from prometheus_client import Gauge
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import JsonDict
from synapse.util.constants import ONE_HOUR_SECONDS, ONE_MINUTE_SECONDS
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger("synapse.app.homeserver")
ONE_MINUTE_SECONDS = 60
ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS
MILLISECONDS_PER_SECOND = 1000
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
@ -171,7 +173,7 @@ async def phone_stats_home(
stats["log_level"] = logging.getLevelName(log_level)
logger.info(
"Reporting stats to %s: %s", hs.config.metrics.report_stats_endpoint, stats
"Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
)
try:
await hs.get_proxied_http_client().put_json(

View File

@ -2,7 +2,7 @@
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright (C) 2023, 2025 New Vector, Ltd
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@ -70,8 +70,6 @@ from typing import (
Tuple,
)
from twisted.internet.interfaces import IDelayedCall
from synapse.appservice import (
ApplicationService,
ApplicationServiceState,
@ -452,20 +450,6 @@ class _TransactionController:
recoverer.recover()
logger.info("Now %i active recoverers", len(self.recoverers))
def force_retry(self, service: ApplicationService) -> None:
"""Forces a Recoverer to attempt delivery of transations immediately.
Args:
service:
"""
recoverer = self.recoverers.get(service.id)
if not recoverer:
# No need to force a retry on a happy AS.
logger.info("%s is not in recovery, not forcing retry", service.id)
return
recoverer.force_retry()
async def _is_service_up(self, service: ApplicationService) -> bool:
state = await self.store.get_appservice_state(service)
return state == ApplicationServiceState.UP or state is None
@ -498,12 +482,11 @@ class _Recoverer:
self.service = service
self.callback = callback
self.backoff_counter = 1
self.scheduled_recovery: Optional[IDelayedCall] = None
def recover(self) -> None:
delay = 2**self.backoff_counter
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
self.scheduled_recovery = self.clock.call_later(
self.clock.call_later(
delay, run_as_background_process, "as-recoverer", self.retry
)
@ -513,21 +496,6 @@ class _Recoverer:
self.backoff_counter += 1
self.recover()
def force_retry(self) -> None:
"""Cancels the existing timer and forces an immediate retry in the background.
Args:
service:
"""
# Prevent the existing backoff from occuring
if self.scheduled_recovery:
self.clock.cancel_call_later(self.scheduled_recovery)
# Run a retry, which will resechedule a recovery if it fails.
run_as_background_process(
"retry",
self.retry,
)
async def retry(self) -> None:
logger.info("Starting retries on %s", self.service.id)
try:

View File

@ -59,7 +59,6 @@ from synapse.config import ( # noqa: F401
tls,
tracer,
user_directory,
user_types,
voip,
workers,
)
@ -123,7 +122,6 @@ class RootConfig:
retention: retention.RetentionConfig
background_updates: background_updates.BackgroundUpdateConfig
auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
user_types: user_types.UserTypesConfig
config_classes: List[Type["Config"]] = ...
config_files: List[str]

View File

@ -561,17 +561,8 @@ class ExperimentalConfig(Config):
# MSC4076: Add `disable_badge_count`` to pusher configuration
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
# MSC4235: Add `via` param to hierarchy endpoint
self.msc4235_enabled: bool = experimental.get("msc4235_enabled", False)
# MSC4263: Preventing MXID enumeration via key queries
self.msc4263_limit_key_queries_to_users_who_share_rooms = experimental.get(
"msc4263_limit_key_queries_to_users_who_share_rooms",
False,
)
# MSC4267: Automatically forgetting rooms on leave
self.msc4267_enabled: bool = experimental.get("msc4267_enabled", False)
# MSC4155: Invite filtering
self.msc4155_enabled: bool = experimental.get("msc4155_enabled", False)

View File

@ -94,21 +94,5 @@ class FederationConfig(Config):
2**62,
)
def is_domain_allowed_according_to_federation_whitelist(self, domain: str) -> bool:
"""
Returns whether a domain is allowed according to the federation whitelist. If a
federation whitelist is not set, all domains are allowed.
Args:
domain: The domain to test.
Returns:
True if the domain is allowed or if a whitelist is not set, False otherwise.
"""
if self.federation_domain_whitelist is None:
return True
return domain in self.federation_domain_whitelist
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}

View File

@ -59,7 +59,6 @@ from .third_party_event_rules import ThirdPartyRulesConfig
from .tls import TlsConfig
from .tracer import TracerConfig
from .user_directory import UserDirectoryConfig
from .user_types import UserTypesConfig
from .voip import VoipConfig
from .workers import WorkerConfig
@ -108,5 +107,4 @@ class HomeServerConfig(RootConfig):
ExperimentalConfig,
BackgroundUpdateConfig,
AutoAcceptInvitesConfig,
UserTypesConfig,
]

View File

@ -51,8 +51,6 @@ if TYPE_CHECKING:
from synapse.config.homeserver import HomeServerConfig
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
DEFAULT_LOG_CONFIG = Template(
"""\
# Log configuration for Synapse.
@ -293,7 +291,7 @@ def _load_logging_config(log_config_path: str) -> None:
log_config = yaml.safe_load(f.read())
if not log_config:
logger.warning("Loaded a blank logging config?")
logging.warning("Loaded a blank logging config?")
# If the old structured logging configuration is being used, raise an error.
if "structured" in log_config and log_config.get("structured"):
@ -314,7 +312,7 @@ def _reload_logging_config(log_config_path: Optional[str]) -> None:
return
_load_logging_config(log_config_path)
logger.info("Reloaded log config from %s due to SIGHUP", log_config_path)
logging.info("Reloaded log config from %s due to SIGHUP", log_config_path)
def setup_logging(
@ -351,17 +349,17 @@ def setup_logging(
appbase.register_sighup(_reload_logging_config, log_config_path)
# Log immediately so we can grep backwards.
logger.warning("***** STARTING SERVER *****")
logger.warning(
logging.warning("***** STARTING SERVER *****")
logging.warning(
"Server %s version %s",
sys.argv[0],
SYNAPSE_VERSION,
)
logger.warning("Copyright (c) 2023 New Vector, Inc")
logger.warning(
logging.warning("Copyright (c) 2023 New Vector, Inc")
logging.warning(
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
)
logger.info("Server hostname: %s", config.server.server_name)
logger.info("Public Base URL: %s", config.server.public_baseurl)
logger.info("Instance name: %s", hs.get_instance_name())
logger.info("Twisted reactor: %s", type(reactor).__name__)
logging.info("Server hostname: %s", config.server.server_name)
logging.info("Public Base URL: %s", config.server.public_baseurl)
logging.info("Instance name: %s", hs.get_instance_name())
logging.info("Twisted reactor: %s", type(reactor).__name__)

View File

@ -240,9 +240,3 @@ class RatelimitConfig(Config):
"rc_delayed_event_mgmt",
defaults={"per_second": 1, "burst_count": 5},
)
self.rc_reports = RatelimitSettings.parse(
config,
"rc_reports",
defaults={"per_second": 1, "burst_count": 5},
)

View File

@ -27,7 +27,7 @@ from synapse.types import JsonDict
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
logger = logging.Logger(__name__)
class RoomDefaultEncryptionTypes:
@ -85,4 +85,4 @@ class RoomConfig(Config):
# When enabled, users will forget rooms when they leave them, either via a
# leave, kick or ban.
self.forget_on_leave: bool = config.get("forget_rooms_on_leave", False)
self.forget_on_leave = config.get("forget_rooms_on_leave", False)

View File

@ -41,7 +41,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
from ._base import Config, ConfigError
from ._util import validate_config
logger = logging.getLogger(__name__)
logger = logging.Logger(__name__)
DIRECT_TCP_ERROR = """
Using direct TCP replication for workers is no longer supported.

View File

@ -1,44 +0,0 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright (C) 2025 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
from typing import Any, List, Optional
from synapse.api.constants import UserTypes
from synapse.types import JsonDict
from ._base import Config, ConfigError
class UserTypesConfig(Config):
section = "user_types"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
user_types: JsonDict = config.get("user_types", {})
self.default_user_type: Optional[str] = user_types.get(
"default_user_type", None
)
self.extra_user_types: List[str] = user_types.get("extra_user_types", [])
all_user_types: List[str] = []
all_user_types.extend(UserTypes.ALL_BUILTIN_USER_TYPES)
all_user_types.extend(self.extra_user_types)
self.all_user_types = all_user_types
if self.default_user_type is not None:
if self.default_user_type not in all_user_types:
raise ConfigError(
f"Default user type {self.default_user_type} is not in the list of all user types: {all_user_types}"
)

View File

@ -64,7 +64,6 @@ from synapse.api.room_versions import (
RoomVersion,
RoomVersions,
)
from synapse.state import CREATE_KEY
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
MutableStateMap,
@ -309,13 +308,6 @@ def check_state_dependent_auth_rules(
auth_dict = {(e.type, e.state_key): e for e in auth_events}
# Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed
# so produce a more intelligible error if we don't have one.
if auth_dict.get(CREATE_KEY) is None:
raise AuthError(
403, f"Event {event.event_id} is missing a create event in auth_events."
)
# additional check for m.federate
creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender)
@ -1018,16 +1010,11 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
user_id: user's id to look up in power_levels
auth_events:
state in force at this point in the room (or rather, a subset of
it including at least the create event, and possibly a power levels event).
it including at least the create event and power levels event.
Returns:
the user's power level in this room.
"""
create_event = auth_events.get(CREATE_KEY)
assert create_event is not None, (
"A create event in the auth events chain is required to calculate user power level correctly,"
" but was not found. This indicates a bug"
)
power_level_event = get_power_level_event(auth_events)
if power_level_event:
level = power_level_event.content.get("users", {}).get(user_id)
@ -1041,12 +1028,18 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
else:
# if there is no power levels event, the creator gets 100 and everyone
# else gets 0.
if create_event.room_version.implicit_room_creator:
creator = create_event.sender
else:
creator = create_event.content[EventContentFields.ROOM_CREATOR]
if creator == user_id:
return 100
# some things which call this don't pass the create event: hack around
# that.
key = (EventTypes.Create, "")
create_event = auth_events.get(key)
if create_event is not None:
if create_event.room_version.implicit_room_creator:
creator = create_event.sender
else:
creator = create_event.content[EventContentFields.ROOM_CREATOR]
if creator == user_id:
return 100
return 0

View File

@ -195,18 +195,15 @@ class InviteAutoAccepter:
except SynapseError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.debug(
"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: %s",
e,
f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}"
)
else:
logger.warning(
"Update_room_membership raised the following unexpected (SynapseError) exception: %s",
e,
logger.warn(
f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}"
)
except Exception as e:
logger.warning(
"Update_room_membership raised the following unexpected exception: %s",
e,
logger.warn(
f"Update_room_membership raised the following unexpected exception: {e}"
)
sleep = 2**retries

View File

@ -1818,7 +1818,7 @@ class FederationClient(FederationBase):
)
return timestamp_to_event_response
except SynapseError as e:
logger.warning(
logger.warn(
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
room_id,
timestamp,

View File

@ -928,8 +928,7 @@ class FederationServer(FederationBase):
# joins) or the full state (for full joins).
# Return a 404 as we would if we weren't in the room at all.
logger.info(
"Rejecting /send_%s to %s because it's a partial state room",
membership_type,
f"Rejecting /send_{membership_type} to %s because it's a partial state room",
room_id,
)
raise SynapseError(

View File

@ -342,8 +342,6 @@ class _DestinationWakeupQueue:
destination, _ = self.queue.popitem(last=False)
queue = self.sender._get_per_destination_queue(destination)
if queue is None:
continue
if not queue._new_data_to_send:
# The per destination queue has already been woken up.
@ -438,23 +436,12 @@ class FederationSender(AbstractFederationSender):
self._wake_destinations_needing_catchup,
)
def _get_per_destination_queue(
self, destination: str
) -> Optional[PerDestinationQueue]:
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination
Args:
destination: server_name of remote server
Returns:
None if the destination is not allowed by the federation whitelist.
Otherwise a PerDestinationQueue for this destination.
"""
if not self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
destination
):
return None
queue = self._per_destination_queues.get(destination)
if not queue:
queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
@ -731,16 +718,6 @@ class FederationSender(AbstractFederationSender):
# track the fact that we have a PDU for these destinations,
# to allow us to perform catch-up later on if the remote is unreachable
# for a while.
# Filter out any destinations not present in the federation_domain_whitelist, if
# the whitelist exists. These destinations should not be sent to so let's not
# waste time or space keeping track of events destined for them.
destinations = [
d
for d in destinations
if self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
d
)
]
await self.store.store_destination_rooms_entries(
destinations,
pdu.room_id,
@ -755,12 +732,7 @@ class FederationSender(AbstractFederationSender):
)
for destination in destinations:
queue = self._get_per_destination_queue(destination)
# We expect `queue` to not be None as we already filtered out
# non-whitelisted destinations above.
assert queue is not None
queue.send_pdu(pdu)
self._get_per_destination_queue(destination).send_pdu(pdu)
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
"""Send a RR to any other servers in the room
@ -869,16 +841,12 @@ class FederationSender(AbstractFederationSender):
for domain in immediate_domains:
# Add to destination queue and wake the destination up
queue = self._get_per_destination_queue(domain)
if queue is None:
continue
queue.queue_read_receipt(receipt)
queue.attempt_new_transaction()
for domain in delay_domains:
# Add to destination queue...
queue = self._get_per_destination_queue(domain)
if queue is None:
continue
queue.queue_read_receipt(receipt)
# ... and schedule the destination to be woken up.
@ -914,10 +882,9 @@ class FederationSender(AbstractFederationSender):
if self.is_mine_server_name(destination):
continue
queue = self._get_per_destination_queue(destination)
if queue is None:
continue
queue.send_presence(states, start_loop=False)
self._get_per_destination_queue(destination).send_presence(
states, start_loop=False
)
self._destination_wakeup_queue.add_to_queue(destination)
@ -967,8 +934,6 @@ class FederationSender(AbstractFederationSender):
return
queue = self._get_per_destination_queue(edu.destination)
if queue is None:
return
if key:
queue.send_keyed_edu(edu, key)
else:
@ -993,15 +958,9 @@ class FederationSender(AbstractFederationSender):
for destination in destinations:
if immediate:
queue = self._get_per_destination_queue(destination)
if queue is None:
continue
queue.attempt_new_transaction()
self._get_per_destination_queue(destination).attempt_new_transaction()
else:
queue = self._get_per_destination_queue(destination)
if queue is None:
continue
queue.mark_new_data()
self._get_per_destination_queue(destination).mark_new_data()
self._destination_wakeup_queue.add_to_queue(destination)
def wake_destination(self, destination: str) -> None:
@ -1020,9 +979,7 @@ class FederationSender(AbstractFederationSender):
):
return
queue = self._get_per_destination_queue(destination)
if queue is not None:
queue.attempt_new_transaction()
self._get_per_destination_queue(destination).attempt_new_transaction()
@staticmethod
def get_current_token() -> int:
@ -1067,9 +1024,6 @@ class FederationSender(AbstractFederationSender):
d
for d in destinations_to_wake
if self._federation_shard_config.should_handle(self._instance_name, d)
and self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
d
)
]
for destination in destinations_to_wake:

View File

@ -495,7 +495,7 @@ class AdminHandler:
)
except Exception as ex:
logger.info(
"Redaction of event %s failed due to: %s", event.event_id, ex
f"Redaction of event {event.event_id} failed due to: {ex}"
)
result["failed_redactions"][event.event_id] = str(ex)
await self._task_scheduler.update_task(task.id, result=result)

View File

@ -465,7 +465,9 @@ class ApplicationServicesHandler:
service, "read_receipt"
)
if new_token is not None and new_token.stream <= from_key:
logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
logger.debug(
"Rejecting token lower than or equal to stored: %s" % (new_token,)
)
return []
from_token = MultiWriterStreamToken(stream=from_key)
@ -507,7 +509,9 @@ class ApplicationServicesHandler:
service, "presence"
)
if new_token is not None and new_token <= from_key:
logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
logger.debug(
"Rejecting token lower than or equal to stored: %s" % (new_token,)
)
return []
for user in users:

View File

@ -76,7 +76,7 @@ from synapse.storage.databases.main.registration import (
LoginTokenLookupResult,
LoginTokenReused,
)
from synapse.types import JsonDict, Requester, StrCollection, UserID
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
from synapse.util.msisdn import phone_number_to_msisdn
@ -1547,31 +1547,6 @@ class AuthHandler:
user_id, (token_id for _, token_id, _ in tokens_and_devices)
)
async def delete_access_tokens_for_devices(
self,
user_id: str,
device_ids: StrCollection,
) -> None:
"""Invalidate access tokens for the devices
Args:
user_id: ID of user the tokens belong to
device_ids: ID of device the tokens are associated with.
If None, tokens associated with any device (or no device) will
be deleted
"""
tokens_and_devices = await self.store.user_delete_access_tokens_for_devices(
user_id,
device_ids,
)
# see if any modules want to know about this
if self.password_auth_provider.on_logged_out_callbacks:
for token, _, device_id in tokens_and_devices:
await self.password_auth_provider.on_logged_out(
user_id=user_id, device_id=device_id, access_token=token
)
async def add_threepid(
self, user_id: str, medium: str, address: str, validated_at: int
) -> None:
@ -1920,7 +1895,7 @@ def load_single_legacy_password_auth_provider(
try:
provider = module(config=config, account_handler=api)
except Exception as e:
logger.exception("Error while initializing %r: %s", module, e)
logger.error("Error while initializing %r: %s", module, e)
raise
# All methods that the module provides should be async, but this wasn't enforced
@ -2453,7 +2428,7 @@ class PasswordAuthProvider:
except CancelledError:
raise
except Exception as e:
logger.exception("Module raised an exception in is_3pid_allowed: %s", e)
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
raise SynapseError(code=500, msg="Internal Server Error")
return True

View File

@ -96,14 +96,6 @@ class DeactivateAccountHandler:
403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
)
logger.info(
"%s requested deactivation of %s erase_data=%s id_server=%s",
requester.user,
user_id,
erase_data,
id_server,
)
# FIXME: Theoretically there is a race here wherein user resets
# password using threepid.

View File

@ -671,12 +671,12 @@ class DeviceHandler(DeviceWorkerHandler):
except_device_id: optional device id which should not be deleted
"""
device_map = await self.store.get_devices_by_user(user_id)
device_ids = list(device_map)
if except_device_id is not None:
device_map.pop(except_device_id, None)
user_device_ids = device_map.keys()
await self.delete_devices(user_id, user_device_ids)
device_ids = [d for d in device_ids if d != except_device_id]
await self.delete_devices(user_id, device_ids)
async def delete_devices(self, user_id: str, device_ids: StrCollection) -> None:
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
"""Delete several devices
Args:
@ -695,10 +695,17 @@ class DeviceHandler(DeviceWorkerHandler):
else:
raise
# Delete data specific to each device. Not optimised as its an
# experimental MSC.
if self.hs.config.experimental.msc3890_enabled:
for device_id in device_ids:
# Delete data specific to each device. Not optimised as it is not
# considered as part of a critical path.
for device_id in device_ids:
await self._auth_handler.delete_access_tokens_for_user(
user_id, device_id=device_id
)
await self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=device_id
)
if self.hs.config.experimental.msc3890_enabled:
# Remove any local notification settings for this device in accordance
# with MSC3890.
await self._account_data_handler.remove_account_data_for_user(
@ -706,13 +713,6 @@ class DeviceHandler(DeviceWorkerHandler):
f"org.matrix.msc3890.local_notification_settings.{device_id}",
)
# If we're deleting a lot of devices, a bunch of them may not have any
# to-device messages queued up. We filter those out to avoid scheduling
# unnecessary tasks.
devices_with_messages = await self.store.get_devices_with_messages(
user_id, device_ids
)
for device_id in devices_with_messages:
# Delete device messages asynchronously and in batches using the task scheduler
# We specify an upper stream id to avoid deleting non delivered messages
# if an user re-uses a device ID.
@ -726,10 +726,6 @@ class DeviceHandler(DeviceWorkerHandler):
},
)
await self._auth_handler.delete_access_tokens_for_devices(
user_id, device_ids=device_ids
)
# Pushers are deleted after `delete_access_tokens_for_user` is called so that
# modules using `on_logged_out` hook can use them if needed.
await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
@ -823,11 +819,10 @@ class DeviceHandler(DeviceWorkerHandler):
# This should only happen if there are no updates, so we bail.
return
if logger.isEnabledFor(logging.DEBUG):
for device_id in device_ids:
logger.debug(
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
)
for device_id in device_ids:
logger.debug(
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
)
# specify the user ID too since the user should always get their own device list
# updates, even if they aren't in any rooms.
@ -927,6 +922,9 @@ class DeviceHandler(DeviceWorkerHandler):
# can't call self.delete_device because that will clobber the
# access token so call the storage layer directly
await self.store.delete_devices(user_id, [old_device_id])
await self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=old_device_id
)
# tell everyone that the old device is gone and that the dehydrated
# device has a new display name
@ -948,6 +946,7 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.NotFoundError()
await self.delete_devices(user_id, [device_id])
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
@wrap_as_background_process("_handle_new_device_update_async")
async def _handle_new_device_update_async(self) -> None:
@ -1601,7 +1600,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
if prev_stream_id is not None and cached_devices == {
d["device_id"]: d for d in devices
}:
logger.info(
logging.info(
"Skipping device list resync for %s, as our cache matches already",
user_id,
)

View File

@ -282,7 +282,7 @@ class DirectoryHandler:
except RequestSendFailed:
raise SynapseError(502, "Failed to fetch alias")
except CodeMessageException as e:
logger.warning(
logging.warning(
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
)
if e.code == 404:

View File

@ -78,7 +78,6 @@ from synapse.replication.http.federation import (
ReplicationStoreRoomOnOutlierMembershipRestServlet,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.invite_rule import InviteRule
from synapse.types import JsonDict, StrCollection, get_domain_from_id
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
@ -1062,8 +1061,8 @@ class FederationHandler:
if self.hs.config.server.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
spam_check = (
await self._spam_checker_module_callbacks.federated_user_may_invite(event)
spam_check = await self._spam_checker_module_callbacks.user_may_invite(
event.sender, event.state_key, event.room_id
)
if spam_check != NOT_SPAM:
raise SynapseError(
@ -1090,22 +1089,6 @@ class FederationHandler:
if event.state_key == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
# check the invitee's configuration and apply rules
invite_config = await self.store.get_invite_config_for_user(event.state_key)
rule = invite_config.get_invite_rule(event.sender)
if rule == InviteRule.BLOCK:
logger.info(
"Automatically rejecting invite from %s due to the invite filtering rules of %s",
event.sender,
event.state_key,
)
raise SynapseError(
403,
"You are not permitted to invite this user.",
errcode=Codes.INVITE_BLOCKED,
)
# InviteRule.IGNORE is handled at the sync layer
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
# We don't rate limit based on room ID, as that should be done by

View File

@ -218,7 +218,7 @@ class IdentityHandler:
return data
except HttpResponseException as e:
logger.exception("3PID bind failed with Matrix error: %r", e)
logger.error("3PID bind failed with Matrix error: %r", e)
raise e.to_synapse_error()
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
@ -323,7 +323,7 @@ class IdentityHandler:
# The remote server probably doesn't support unbinding (yet)
logger.warning("Received %d response while unbinding threepid", e.code)
else:
logger.exception("Failed to unbind threepid on identity server: %s", e)
logger.error("Failed to unbind threepid on identity server: %s", e)
raise SynapseError(500, "Failed to contact identity server")
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")

View File

@ -460,7 +460,7 @@ class MessageHandler:
# date from the database in the same database transaction.
await self.store.expire_event(event_id)
except Exception as e:
logger.exception("Could not expire event %s: %r", event_id, e)
logger.error("Could not expire event %s: %r", event_id, e)
# Schedule the expiry of the next event to expire.
await self._schedule_next_expiry()
@ -2061,8 +2061,7 @@ class EventCreationHandler:
# dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
logger.info(
"Failed to send dummy event into room %s. Will exclude it from "
"future attempts until cache expires",
room_id,
"future attempts until cache expires" % (room_id,)
)
now = self.clock.time_msec()
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
@ -2121,9 +2120,7 @@ class EventCreationHandler:
except AuthError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of power. Will try another user",
room_id,
user_id,
"lack of power. Will try another user" % (room_id, user_id)
)
return False

View File

@ -563,13 +563,12 @@ class OidcProvider:
raise ValueError("Unexpected subject")
except Exception:
logger.warning(
"OIDC Back-Channel Logout is enabled for issuer %r "
f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
"but it looks like the configured `user_mapping_provider` "
"does not use the `sub` claim as subject. If it is the case, "
"and you want Synapse to ignore the `sub` claim in OIDC "
"Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
"to `true` in the issuer config.",
self.issuer,
"to `true` in the issuer config."
)
@property
@ -827,10 +826,10 @@ class OidcProvider:
if response.code < 400:
logger.debug(
"Invalid response from the authorization server: "
'responded with a "%s" '
"but body has an error field: %r",
status,
resp["error"],
'responded with a "{status}" '
"but body has an error field: {error!r}".format(
status=status, error=resp["error"]
)
)
description = resp.get("error_description", error)
@ -1386,8 +1385,7 @@ class OidcProvider:
# support dynamic registration in Synapse at some point.
if not self._config.backchannel_logout_enabled:
logger.warning(
"Received an OIDC Back-Channel Logout request from issuer %r but it is disabled in config",
self.issuer,
f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
)
# TODO: this responds with a 400 status code, which is what the OIDC
@ -1799,5 +1797,5 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
extras[key] = template.render(user=userinfo).strip()
except Exception as e:
# Log an error and skip this value (don't break login for this).
logger.exception("Failed to render OIDC extra attribute %s: %s", key, e)
logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e))
return extras

View File

@ -115,7 +115,6 @@ class RegistrationHandler:
self._user_consent_version = self.hs.config.consent.user_consent_version
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self._server_name = hs.hostname
self._user_types_config = hs.config.user_types
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
@ -307,9 +306,6 @@ class RegistrationHandler:
elif default_display_name is None:
default_display_name = localpart
if user_type is None:
user_type = self._user_types_config.default_user_type
await self.register_with_store(
user_id=user_id,
password_hash=password_hash,
@ -506,7 +502,7 @@ class RegistrationHandler:
ratelimit=False,
)
except Exception as e:
logger.exception("Failed to join new user to %r: %r", r, e)
logger.error("Failed to join new user to %r: %r", r, e)
async def _join_rooms(self, user_id: str) -> None:
"""
@ -596,7 +592,7 @@ class RegistrationHandler:
# moving away from bare excepts is a good thing to do.
logger.error("Failed to join new user to %r: %r", r, e)
except Exception as e:
logger.exception("Failed to join new user to %r: %r", r, e)
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
async def _auto_join_rooms(self, user_id: str) -> None:
"""Automatically joins users to auto join rooms - creating the room in the first place

View File

@ -1,98 +0,0 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
#
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING
from synapse.api.errors import Codes, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.types import (
Requester,
)
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReportsHandler:
def __init__(self, hs: "HomeServer"):
self._hs = hs
self._store = hs.get_datastores().main
self._clock = hs.get_clock()
# Ratelimiter for management of existing delayed events,
# keyed by the requesting user ID.
self._reports_ratelimiter = Ratelimiter(
store=self._store,
clock=self._clock,
cfg=hs.config.ratelimiting.rc_reports,
)
async def report_user(
self, requester: Requester, target_user_id: str, reason: str
) -> None:
"""Files a report against a user from a user.
Rate and size limits are applied to the report. If the user being reported
does not belong to this server, the report is ignored. This check is done
after the limits to reduce DoS potential.
If the user being reported belongs to this server, but doesn't exist, we
similarly ignore the report. The spec allows us to return an error if we
want to, but we choose to hide that user's existence instead.
If the report is otherwise valid (for a user which exists on our server),
we append it to the database for later processing.
Args:
requester - The user filing the report.
target_user_id - The user being reported.
reason - The user-supplied reason the user is being reported.
Raises:
SynapseError for BAD_REQUEST/BAD_JSON if the reason is too long.
"""
await self._check_limits(requester)
if len(reason) > 1000:
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Reason must be less than 1000 characters",
Codes.BAD_JSON,
)
if not self._hs.is_mine_id(target_user_id):
return # hide that they're not ours/that we can't do anything about them
user = await self._store.get_user_by_id(target_user_id)
if user is None:
return # hide that they don't exist
await self._store.add_user_report(
target_user_id=target_user_id,
user_id=requester.user.to_string(),
reason=reason,
received_ts=self._clock.time_msec(),
)
async def _check_limits(self, requester: Requester) -> None:
await self._reports_ratelimiter.ratelimit(
requester,
requester.user.to_string(),
)

View File

@ -468,6 +468,17 @@ class RoomCreationHandler:
"""
user_id = requester.user.to_string()
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
user_id
)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise SynapseError(
403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
@ -574,24 +585,6 @@ class RoomCreationHandler:
if current_power_level_int < needed_power_level:
user_power_levels[user_id] = needed_power_level
# We construct what the body of a call to /createRoom would look like for passing
# to the spam checker. We don't include a preset here, as we expect the
# initial state to contain everything we need.
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
user_id,
{
"creation_content": creation_content,
"initial_state": list(initial_state.items()),
},
)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise SynapseError(
403,
"You are not permitted to create rooms",
errcode=spam_check[0],
additional_fields=spam_check[1],
)
await self._send_events_for_new_room(
requester,
new_room_id,
@ -698,7 +691,7 @@ class RoomCreationHandler:
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
logger.exception("Unable to send updated alias events in old room: %s", e)
logger.error("Unable to send updated alias events in old room: %s", e)
try:
await self.event_creation_handler.create_and_send_nonmember_event(
@ -715,7 +708,7 @@ class RoomCreationHandler:
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
logger.exception("Unable to send updated alias events in new room: %s", e)
logger.error("Unable to send updated alias events in new room: %s", e)
async def create_room(
self,
@ -793,7 +786,7 @@ class RoomCreationHandler:
if not is_requester_admin:
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
user_id, config
user_id
)
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise SynapseError(

View File

@ -53,7 +53,6 @@ from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
from synapse.storage.databases.main.state_deltas import StateDelta
from synapse.storage.invite_rule import InviteRule
from synapse.types import (
JsonDict,
Requester,
@ -159,7 +158,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
store=self.store,
clock=self.clock,
cfg=hs.config.ratelimiting.rc_invites_per_room,
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
)
# Ratelimiter for invites, keyed by recipient (across all rooms, all
@ -168,7 +166,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
store=self.store,
clock=self.clock,
cfg=hs.config.ratelimiting.rc_invites_per_user,
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
)
# Ratelimiter for invites, keyed by issuer (across all rooms, all
@ -177,7 +174,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
store=self.store,
clock=self.clock,
cfg=hs.config.ratelimiting.rc_invites_per_issuer,
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
)
self._third_party_invite_limiter = Ratelimiter(
@ -916,23 +912,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
additional_fields=block_invite_result[1],
)
# check the invitee's configuration and apply rules. Admins on the server can bypass.
if not is_requester_admin:
invite_config = await self.store.get_invite_config_for_user(target_id)
rule = invite_config.get_invite_rule(requester.user.to_string())
if rule == InviteRule.BLOCK:
logger.info(
"Automatically rejecting invite from %s due to the the invite filtering rules of %s",
target_id,
requester.user,
)
raise SynapseError(
403,
"You are not permitted to invite this user.",
errcode=Codes.INVITE_BLOCKED,
)
# InviteRule.IGNORE is handled at the sync layer.
# An empty prev_events list is allowed as long as the auth_event_ids are present
if prev_event_ids is not None:
return await self._local_membership_update(
@ -1572,7 +1551,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s", e)
logger.exception("Error kicking guest user: %s" % (e,))
async def lookup_room_alias(
self, room_alias: RoomAlias

View File

@ -54,9 +54,6 @@ class RoomPolicyHandler:
Returns:
bool: True if the event is allowed in the room, False otherwise.
"""
if event.type == "org.matrix.msc4284.policy" and event.state_key is not None:
return True # always allow policy server change events
policy_event = await self._storage_controllers.state.get_current_state_event(
event.room_id, "org.matrix.msc4284.policy", ""
)

View File

@ -111,15 +111,7 @@ class RoomSummaryHandler:
# If a user tries to fetch the same page multiple times in quick succession,
# only process the first attempt and return its result to subsequent requests.
self._pagination_response_cache: ResponseCache[
Tuple[
str,
str,
bool,
Optional[int],
Optional[int],
Optional[str],
Optional[Tuple[str, ...]],
]
Tuple[str, str, bool, Optional[int], Optional[int], Optional[str]]
] = ResponseCache(
hs.get_clock(),
"get_room_hierarchy",
@ -134,7 +126,6 @@ class RoomSummaryHandler:
max_depth: Optional[int] = None,
limit: Optional[int] = None,
from_token: Optional[str] = None,
remote_room_hosts: Optional[Tuple[str, ...]] = None,
) -> JsonDict:
"""
Implementation of the room hierarchy C-S API.
@ -152,9 +143,6 @@ class RoomSummaryHandler:
limit: An optional limit on the number of rooms to return per
page. Must be a positive integer.
from_token: An optional pagination token.
remote_room_hosts: An optional list of remote homeserver server names. If defined,
each host will be used to try and fetch the room hierarchy. Must be a tuple so
that it can be hashed by the `RoomSummaryHandler._pagination_response_cache`.
Returns:
The JSON hierarchy dictionary.
@ -174,7 +162,6 @@ class RoomSummaryHandler:
max_depth,
limit,
from_token,
remote_room_hosts,
),
self._get_room_hierarchy,
requester.user.to_string(),
@ -183,7 +170,6 @@ class RoomSummaryHandler:
max_depth,
limit,
from_token,
remote_room_hosts,
)
async def _get_room_hierarchy(
@ -194,7 +180,6 @@ class RoomSummaryHandler:
max_depth: Optional[int] = None,
limit: Optional[int] = None,
from_token: Optional[str] = None,
remote_room_hosts: Optional[Tuple[str, ...]] = None,
) -> JsonDict:
"""See docstring for SpaceSummaryHandler.get_room_hierarchy."""
@ -214,7 +199,7 @@ class RoomSummaryHandler:
if not local_room:
room_hierarchy = await self._summarize_remote_room_hierarchy(
_RoomQueueEntry(requested_room_id, remote_room_hosts or ()),
_RoomQueueEntry(requested_room_id, ()),
False,
)
root_room_entry = room_hierarchy[0]
@ -255,7 +240,7 @@ class RoomSummaryHandler:
processed_rooms = set(pagination_session["processed_rooms"])
else:
# The queue of rooms to process, the next room is last on the stack.
room_queue = [_RoomQueueEntry(requested_room_id, remote_room_hosts or ())]
room_queue = [_RoomQueueEntry(requested_room_id, ())]
# Rooms we have already processed.
processed_rooms = set()
@ -716,7 +701,7 @@ class RoomSummaryHandler:
# The API doesn't return the room version so assume that a
# join rule of knock is valid.
if (
room.get("join_rule", JoinRules.PUBLIC)
room.get("join_rule")
in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
or room.get("world_readable") is True
):

View File

@ -124,7 +124,7 @@ class SamlHandler:
)
# Since SAML sessions timeout it is useful to log when they were created.
logger.info("Initiating a new SAML session: %s", reqid)
logger.info("Initiating a new SAML session: %s" % (reqid,))
now = self.clock.time_msec()
self._outstanding_requests_dict[reqid] = Saml2SessionData(

View File

@ -238,7 +238,7 @@ class SendEmailHandler:
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
logger.info("Sending email to %s", email_address)
logger.info("Sending email to %s" % email_address)
await self._sendmail(
self._reactor,

View File

@ -23,7 +23,6 @@ from typing import (
List,
Literal,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
@ -50,7 +49,6 @@ from synapse.storage.databases.main.state import (
Sentinel as StateSentinel,
)
from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
from synapse.storage.invite_rule import InviteRule
from synapse.storage.roommember import (
RoomsForUser,
RoomsForUserSlidingSync,
@ -74,7 +72,6 @@ from synapse.types.handlers.sliding_sync import (
SlidingSyncResult,
)
from synapse.types.state import StateFilter
from synapse.util import MutableOverlayMapping
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -247,11 +244,9 @@ class SlidingSyncRoomLists:
# Note: this won't include rooms the user has left themselves. We add back
# `newly_left` rooms below. This is more efficient than fetching all rooms and
# then filtering out the old left rooms.
room_membership_for_user_map: MutableMapping[str, RoomsForUserSlidingSync] = (
MutableOverlayMapping(
await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots(
user_id
)
room_membership_for_user_map = (
await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots(
user_id
)
)
# To play nice with the rewind logic below, we need to go fetch the rooms the
@ -272,26 +267,32 @@ class SlidingSyncRoomLists:
)
)
if self_leave_room_membership_for_user_map:
# FIXME: It would be nice to avoid this copy but since
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
# can't return a mutable value like a `dict`. We make the copy to get a
# mutable dict that we can change. We try to only make a copy when necessary
# (if we actually need to change something) as in most cases, the logic
# doesn't need to run.
room_membership_for_user_map = dict(room_membership_for_user_map)
room_membership_for_user_map.update(self_leave_room_membership_for_user_map)
# Remove invites from ignored users
ignored_users = await self.store.ignored_users(user_id)
invite_config = await self.store.get_invite_config_for_user(user_id)
if ignored_users:
# FIXME: It would be nice to avoid this copy but since
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
# can't return a mutable value like a `dict`. We make the copy to get a
# mutable dict that we can change. We try to only make a copy when necessary
# (if we actually need to change something) as in most cases, the logic
# doesn't need to run.
room_membership_for_user_map = dict(room_membership_for_user_map)
# Make a copy so we don't run into an error: `dictionary changed size during
# iteration`, when we remove items
for room_id in list(room_membership_for_user_map.keys()):
room_for_user_sliding_sync = room_membership_for_user_map[room_id]
if (
room_for_user_sliding_sync.membership == Membership.INVITE
and room_for_user_sliding_sync.sender
and (
room_for_user_sliding_sync.sender in ignored_users
or invite_config.get_invite_rule(
room_for_user_sliding_sync.sender
)
== InviteRule.IGNORE
)
and room_for_user_sliding_sync.sender in ignored_users
):
room_membership_for_user_map.pop(room_id, None)
@ -306,6 +307,13 @@ class SlidingSyncRoomLists:
sync_config.user, room_membership_for_user_map, to_token=to_token
)
if changes:
# FIXME: It would be nice to avoid this copy but since
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
# can't return a mutable value like a `dict`. We make the copy to get a
# mutable dict that we can change. We try to only make a copy when necessary
# (if we actually need to change something) as in most cases, the logic
# doesn't need to run.
room_membership_for_user_map = dict(room_membership_for_user_map)
for room_id, change in changes.items():
if change is None:
# Remove rooms that the user joined after the `to_token`
@ -347,6 +355,13 @@ class SlidingSyncRoomLists:
newly_left_room_map.keys() - room_membership_for_user_map.keys()
)
if missing_newly_left_rooms:
# FIXME: It would be nice to avoid this copy but since
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
# can't return a mutable value like a `dict`. We make the copy to get a
# mutable dict that we can change. We try to only make a copy when necessary
# (if we actually need to change something) as in most cases, the logic
# doesn't need to run.
room_membership_for_user_map = dict(room_membership_for_user_map)
for room_id in missing_newly_left_rooms:
newly_left_room_for_user = newly_left_room_map[room_id]
# This should be a given
@ -437,10 +452,6 @@ class SlidingSyncRoomLists:
else:
room_membership_for_user_map.pop(room_id, None)
# Remove any rooms that we globally exclude from sync.
for room_id in self.rooms_to_exclude_globally:
room_membership_for_user_map.pop(room_id, None)
dm_room_ids = await self._get_dm_rooms_for_user(user_id)
if sync_config.lists:
@ -557,6 +568,14 @@ class SlidingSyncRoomLists:
if sync_config.room_subscriptions:
with start_active_span("assemble_room_subscriptions"):
# FIXME: It would be nice to avoid this copy but since
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
# can't return a mutable value like a `dict`. We make the copy to get a
# mutable dict that we can change. We try to only make a copy when necessary
# (if we actually need to change something) as in most cases, the logic
# doesn't need to run.
room_membership_for_user_map = dict(room_membership_for_user_map)
# Find which rooms are partially stated and may need to be filtered out
# depending on the `required_state` requested (see below).
partial_state_rooms = await self.store.get_partial_rooms()

View File

@ -1230,16 +1230,12 @@ class SsoHandler:
if expected_user_id is not None and user_id != expected_user_id:
logger.error(
"Received a logout notification from SSO provider "
"%r for the user %r, but with "
"a session ID (%r) which belongs to "
"%r. This may happen when the SSO provider user mapper "
f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
f"a session ID ({auth_provider_session_id!r}) which belongs to "
f"{user_id!r}. This may happen when the SSO provider user mapper "
"uses something else than the standard attribute as mapping ID. "
"For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
"in the provider config if that is the case.",
auth_provider_id,
expected_user_id,
auth_provider_session_id,
user_id,
"in the provider config if that is the case."
)
continue

View File

@ -66,7 +66,6 @@ from synapse.logging.opentracing import (
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.databases.main.stream import PaginateFunction
from synapse.storage.invite_rule import InviteRule
from synapse.storage.roommember import MemberSummary
from synapse.types import (
DeviceListUpdates,
@ -2550,7 +2549,6 @@ class SyncHandler:
room_entries: List[RoomSyncResultBuilder] = []
invited: List[InvitedSyncResult] = []
knocked: List[KnockedSyncResult] = []
invite_config = await self.store.get_invite_config_for_user(user_id)
for room_id, events in mem_change_events_by_room_id.items():
# The body of this loop will add this room to at least one of the five lists
# above. Things get messy if you've e.g. joined, left, joined then left the
@ -2633,11 +2631,7 @@ class SyncHandler:
# Only bother if we're still currently invited
should_invite = last_non_join.membership == Membership.INVITE
if should_invite:
if (
last_non_join.sender not in ignored_users
and invite_config.get_invite_rule(last_non_join.sender)
!= InviteRule.IGNORE
):
if last_non_join.sender not in ignored_users:
invite_room_sync = InvitedSyncResult(room_id, invite=last_non_join)
if invite_room_sync:
invited.append(invite_room_sync)
@ -2792,7 +2786,6 @@ class SyncHandler:
membership_list=Membership.LIST,
excluded_rooms=sync_result_builder.excluded_room_ids,
)
invite_config = await self.store.get_invite_config_for_user(user_id)
room_entries = []
invited = []
@ -2818,8 +2811,6 @@ class SyncHandler:
elif event.membership == Membership.INVITE:
if event.sender in ignored_users:
continue
if invite_config.get_invite_rule(event.sender) == InviteRule.IGNORE:
continue
invite = await self.store.get_event(event.event_id)
invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite))
elif event.membership == Membership.KNOCK:
@ -3074,10 +3065,8 @@ class SyncHandler:
if batch.limited and since_token:
user_id = sync_result_builder.sync_config.user.to_string()
logger.debug(
"Incremental gappy sync of %s for user %s with %d state events",
room_id,
user_id,
len(state),
"Incremental gappy sync of %s for user %s with %d state events"
% (room_id, user_id, len(state))
)
elif room_builder.rtype == "archived":
archived_room_sync = ArchivedSyncResult(

View File

@ -749,9 +749,10 @@ class UserDirectoryHandler(StateDeltasHandler):
)
continue
except Exception:
logger.exception(
logger.error(
"Failed to refresh profile for %r due to unhandled exception",
user_id,
exc_info=True,
)
await self.store.set_remote_user_profile_in_user_dir_stale(
user_id,

View File

@ -44,15 +44,12 @@ from synapse.logging.opentracing import start_active_span
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage.databases.main.lock import Lock, LockStore
from synapse.util.async_helpers import timeout_deferred
from synapse.util.constants import ONE_MINUTE_SECONDS
if TYPE_CHECKING:
from synapse.logging.opentracing import opentracing
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# This lock is used to avoid creating an event while we are purging the room.
# We take a read lock when creating an event, and a write one when purging a room.
# This is because it is fine to create several events concurrently, since referenced events
@ -273,10 +270,9 @@ class WaitingLock:
def _get_next_retry_interval(self) -> float:
next = self._retry_interval
self._retry_interval = max(5, next * 2)
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
logger.warning(
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
self._retry_interval,
if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes
logging.warning(
f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock."
)
return next * random.uniform(0.9, 1.1)
@ -353,9 +349,8 @@ class WaitingMultiLock:
def _get_next_retry_interval(self) -> float:
next = self._retry_interval
self._retry_interval = max(5, next * 2)
if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations
logger.warning(
"Lock timeout is getting excessive: %ss. There may be a deadlock.",
self._retry_interval,
if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes
logging.warning(
f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock."
)
return next * random.uniform(0.9, 1.1)

View File

@ -53,7 +53,7 @@ class AdditionalResource(DirectServeJsonResource):
hs: homeserver
handler: function to be called to handle the request.
"""
super().__init__(clock=hs.get_clock())
super().__init__()
self._handler = handler
async def _async_render(self, request: Request) -> Optional[Tuple[int, Any]]:

View File

@ -213,7 +213,7 @@ class _IPBlockingResolver:
if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
logger.info(
"Blocked %s from DNS resolution to %s", ip_address, hostname
"Blocked %s from DNS resolution to %s" % (ip_address, hostname)
)
has_bad_ip = True
@ -318,7 +318,7 @@ class BlocklistingAgentWrapper(Agent):
pass
else:
if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
logger.info("Blocking access to %s", ip_address)
logger.info("Blocking access to %s" % (ip_address,))
e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked")
return defer.fail(Failure(e))
@ -723,7 +723,7 @@ class BaseHttpClient:
resp_headers = dict(response.headers.getAllRawHeaders())
if response.code > 299:
logger.warning("Got %d when downloading %s", response.code, url)
logger.warning("Got %d when downloading %s" % (response.code, url))
raise SynapseError(
HTTPStatus.BAD_GATEWAY, "Got error %d" % (response.code,), Codes.UNKNOWN
)
@ -1106,7 +1106,7 @@ class _MultipartParserProtocol(protocol.Protocol):
self.stream.write(data[start:end])
except Exception as e:
logger.warning(
"Exception encountered writing file data to stream: %s", e
f"Exception encountered writing file data to stream: {e}"
)
self.deferred.errback()
self.file_length += end - start
@ -1129,7 +1129,7 @@ class _MultipartParserProtocol(protocol.Protocol):
try:
self.parser.write(incoming_data)
except Exception as e:
logger.warning("Exception writing to multipart parser: %s", e)
logger.warning(f"Exception writing to multipart parser: {e}")
self.deferred.errback()
return

View File

@ -602,7 +602,7 @@ class MatrixFederationHttpClient:
try:
parse_and_validate_server_name(request.destination)
except ValueError:
logger.exception("Invalid destination: %s.", request.destination)
logger.exception(f"Invalid destination: {request.destination}.")
raise FederationDeniedError(request.destination)
if timeout is not None:

View File

@ -106,7 +106,7 @@ class ProxyResource(_AsyncResource):
isLeaf = True
def __init__(self, reactor: ISynapseReactor, hs: "HomeServer"):
super().__init__(hs.get_clock(), True)
super().__init__(True)
self.reactor = reactor
self.agent = hs.get_federation_http_client().agent

View File

@ -21,7 +21,7 @@
import logging
import random
import re
from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple, Union, cast
from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple, Union
from urllib.parse import urlparse
from urllib.request import ( # type: ignore[attr-defined]
getproxies_environment,
@ -40,7 +40,6 @@ from twisted.internet.interfaces import (
IProtocol,
IProtocolFactory,
IReactorCore,
IReactorTime,
IStreamClientEndpoint,
)
from twisted.python.failure import Failure
@ -130,9 +129,7 @@ class ProxyAgent(_AgentBase):
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
# `_AgentBase` expects an `IReactorTime` provider. `IReactorCore`
# extends `IReactorTime`, so this cast is safe.
_AgentBase.__init__(self, cast(IReactorTime, reactor), pool)
_AgentBase.__init__(self, reactor, pool)
if proxy_reactor is None:
self.proxy_reactor = reactor
@ -171,7 +168,7 @@ class ProxyAgent(_AgentBase):
self.no_proxy = no_proxy
self._policy_for_https = contextFactory
self._reactor = cast(IReactorTime, reactor)
self._reactor = reactor
self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
self._federation_proxy_credentials: Optional[ProxyCredentials] = None
@ -260,11 +257,7 @@ class ProxyAgent(_AgentBase):
raise ValueError(f"Invalid URI {uri!r}")
parsed_uri = URI.fromBytes(uri)
pool_key: tuple[bytes, bytes, int] = (
parsed_uri.scheme,
parsed_uri.host,
parsed_uri.port,
)
pool_key = f"{parsed_uri.scheme!r}{parsed_uri.host!r}{parsed_uri.port}"
request_path = parsed_uri.originForm
should_skip_proxy = False
@ -290,7 +283,7 @@ class ProxyAgent(_AgentBase):
)
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
pool_key = (b"http-proxy", b"", 0)
pool_key = "http-proxy"
endpoint = self.http_proxy_endpoint
request_path = uri
elif (

View File

@ -180,16 +180,9 @@ class ReplicationAgent(_AgentBase):
worker_name = parsedURI.netloc.decode("utf-8")
key_scheme = self._endpointFactory.instance_map[worker_name].scheme()
key_netloc = self._endpointFactory.instance_map[worker_name].netloc()
# Build a connection pool key.
#
# `_AgentBase` expects this to be a three-tuple of `(scheme, host,
# port)` of type `bytes`. We don't have a real port when connecting via
# a Unix socket, so use `0`.
key = (
key_scheme.encode("ascii"),
key_netloc.encode("utf-8"),
0,
)
# This sets the Pool key to be:
# (http(s), <host:port>) or (unix, <socket_path>)
key = (key_scheme, key_netloc)
# _requestWithEndpoint comes from _AgentBase class
return self._requestWithEndpoint(

View File

@ -42,7 +42,6 @@ from typing import (
Protocol,
Tuple,
Union,
cast,
)
import attr
@ -50,9 +49,8 @@ import jinja2
from canonicaljson import encode_canonical_json
from zope.interface import implementer
from twisted.internet import defer, interfaces, reactor
from twisted.internet import defer, interfaces
from twisted.internet.defer import CancelledError
from twisted.internet.interfaces import IReactorTime
from twisted.python import failure
from twisted.web import resource
@ -69,7 +67,6 @@ from twisted.web.util import redirectTo
from synapse.api.errors import (
CodeMessageException,
Codes,
LimitExceededError,
RedirectException,
SynapseError,
UnrecognizedRequestError,
@ -77,7 +74,7 @@ from synapse.api.errors import (
from synapse.config.homeserver import HomeServerConfig
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.util import Clock, json_encoder
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
from synapse.util.cancellation import is_function_cancellable
from synapse.util.iterutils import chunk_seq
@ -311,10 +308,9 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
context from the request the servlet is handling.
"""
def __init__(self, clock: Clock, extract_context: bool = False):
def __init__(self, extract_context: bool = False):
super().__init__()
self._clock = clock
self._extract_context = extract_context
def render(self, request: "SynapseRequest") -> int:
@ -333,12 +329,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
request.request_metrics.name = self.__class__.__name__
with trace_servlet(request, self._extract_context):
try:
callback_return = await self._async_render(request)
except LimitExceededError as e:
if e.pause:
self._clock.sleep(e.pause)
raise
callback_return = await self._async_render(request)
if callback_return is not None:
code, response = callback_return
@ -402,17 +393,8 @@ class DirectServeJsonResource(_AsyncResource):
formatting responses and errors as JSON.
"""
def __init__(
self,
canonical_json: bool = False,
extract_context: bool = False,
# Clock is optional as this class is exposed to the module API.
clock: Optional[Clock] = None,
):
if clock is None:
clock = Clock(cast(IReactorTime, reactor))
super().__init__(clock, extract_context)
def __init__(self, canonical_json: bool = False, extract_context: bool = False):
super().__init__(extract_context)
self.canonical_json = canonical_json
def _send_response(
@ -468,8 +450,8 @@ class JsonResource(DirectServeJsonResource):
canonical_json: bool = True,
extract_context: bool = False,
):
super().__init__(canonical_json, extract_context)
self.clock = hs.get_clock()
super().__init__(canonical_json, extract_context, clock=self.clock)
# Map of path regex -> method -> callback.
self._routes: Dict[Pattern[str], Dict[bytes, _PathEntry]] = {}
self.hs = hs
@ -515,7 +497,7 @@ class JsonResource(DirectServeJsonResource):
key word arguments to pass to the callback
"""
# At this point the path must be bytes.
request_path_bytes: bytes = request.path
request_path_bytes: bytes = request.path # type: ignore
request_path = request_path_bytes.decode("ascii")
# Treat HEAD requests as GET requests.
request_method = request.method
@ -582,17 +564,6 @@ class DirectServeHtmlResource(_AsyncResource):
# The error template to use for this resource
ERROR_TEMPLATE = HTML_ERROR_TEMPLATE
def __init__(
self,
extract_context: bool = False,
# Clock is optional as this class is exposed to the module API.
clock: Optional[Clock] = None,
):
if clock is None:
clock = Clock(cast(IReactorTime, reactor))
super().__init__(clock, extract_context)
def _send_response(
self,
request: "SynapseRequest",

View File

@ -796,13 +796,6 @@ def inject_response_headers(response_headers: Headers) -> None:
response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}")
@ensure_active_span("inject the span into a header dict")
def inject_request_headers(headers: Dict[str, str]) -> None:
span = opentracing.tracer.active_span
assert span is not None
opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, headers)
@ensure_active_span(
"get the active span context as a dict", ret=cast(Dict[str, str], {})
)

Some files were not shown because too many files have changed in this diff Show More