mirror of
https://github.com/element-hq/synapse.git
synced 2025-07-02 00:00:32 -04:00
Compare commits
No commits in common. "develop" and "v1.131.0rc1" have entirely different histories.
develop
...
v1.131.0rc
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@ -24,13 +24,13 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
id: buildx
|
id: buildx
|
||||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||||
|
|
||||||
- name: Inspect builder
|
- name: Inspect builder
|
||||||
run: docker buildx inspect
|
run: docker buildx inspect
|
||||||
|
|
||||||
- name: Install Cosign
|
- name: Install Cosign
|
||||||
uses: sigstore/cosign-installer@fb28c2b6339dcd94da6e4cbcbc5e888961f6f8c3 # v3.9.0
|
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
@ -72,7 +72,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Build and push all platforms
|
- name: Build and push all platforms
|
||||||
id: build-and-push
|
id: build-and-push
|
||||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
labels: |
|
labels: |
|
||||||
|
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
@ -14,7 +14,7 @@ jobs:
|
|||||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||||
- name: 📥 Download artifact
|
- name: 📥 Download artifact
|
||||||
uses: dawidd6/action-download-artifact@ac66b43f0e6a346234dd65d4d0c8fbb31cb316e5 # v11
|
uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9
|
||||||
with:
|
with:
|
||||||
workflow: docs-pr.yaml
|
workflow: docs-pr.yaml
|
||||||
run_id: ${{ github.event.workflow_run.id }}
|
run_id: ${{ github.event.workflow_run.id }}
|
||||||
|
12
.github/workflows/docs.yaml
vendored
12
.github/workflows/docs.yaml
vendored
@ -78,18 +78,6 @@ jobs:
|
|||||||
mdbook build
|
mdbook build
|
||||||
cp book/welcome_and_overview.html book/index.html
|
cp book/welcome_and_overview.html book/index.html
|
||||||
|
|
||||||
- name: Prepare and publish schema files
|
|
||||||
run: |
|
|
||||||
sudo apt-get update && sudo apt-get install -y yq
|
|
||||||
mkdir -p book/schema
|
|
||||||
# Remove developer notice before publishing.
|
|
||||||
rm schema/v*/Do\ not\ edit\ files\ in\ this\ folder
|
|
||||||
# Copy schema files that are independent from current Synapse version.
|
|
||||||
cp -r -t book/schema schema/v*/
|
|
||||||
# Convert config schema from YAML source file to JSON.
|
|
||||||
yq < schema/synapse-config.schema.yaml \
|
|
||||||
> book/schema/synapse-config.schema.json
|
|
||||||
|
|
||||||
# Deploy to the target directory.
|
# Deploy to the target directory.
|
||||||
- name: Deploy to gh pages
|
- name: Deploy to gh pages
|
||||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||||
|
6
.github/workflows/release-artifacts.yml
vendored
6
.github/workflows/release-artifacts.yml
vendored
@ -61,7 +61,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
id: buildx
|
id: buildx
|
||||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||||
with:
|
with:
|
||||||
install: true
|
install: true
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, macos-13]
|
os: [ubuntu-22.04, macos-13]
|
||||||
arch: [x86_64, aarch64]
|
arch: [x86_64, aarch64]
|
||||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||||
# It is not read by the rest of the workflow.
|
# It is not read by the rest of the workflow.
|
||||||
@ -139,7 +139,7 @@ jobs:
|
|||||||
python-version: "3.x"
|
python-version: "3.x"
|
||||||
|
|
||||||
- name: Install cibuildwheel
|
- name: Install cibuildwheel
|
||||||
run: python -m pip install cibuildwheel==3.0.0
|
run: python -m pip install cibuildwheel==2.23.0
|
||||||
|
|
||||||
- name: Set up QEMU to emulate aarch64
|
- name: Set up QEMU to emulate aarch64
|
||||||
if: matrix.arch == 'aarch64'
|
if: matrix.arch == 'aarch64'
|
||||||
|
57
.github/workflows/schema.yaml
vendored
57
.github/workflows/schema.yaml
vendored
@ -1,57 +0,0 @@
|
|||||||
name: Schema
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- schema/**
|
|
||||||
- docs/usage/configuration/config_documentation.md
|
|
||||||
push:
|
|
||||||
branches: ["develop", "release-*"]
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate-schema:
|
|
||||||
name: Ensure Synapse config schema is valid
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
|
||||||
with:
|
|
||||||
python-version: "3.x"
|
|
||||||
- name: Install check-jsonschema
|
|
||||||
run: pip install check-jsonschema==0.33.0
|
|
||||||
|
|
||||||
- name: Validate meta schema
|
|
||||||
run: check-jsonschema --check-metaschema schema/v*/meta.schema.json
|
|
||||||
- name: Validate schema
|
|
||||||
run: |-
|
|
||||||
# Please bump on introduction of a new meta schema.
|
|
||||||
LATEST_META_SCHEMA_VERSION=v1
|
|
||||||
check-jsonschema \
|
|
||||||
--schemafile="schema/$LATEST_META_SCHEMA_VERSION/meta.schema.json" \
|
|
||||||
schema/synapse-config.schema.yaml
|
|
||||||
- name: Validate default config
|
|
||||||
# Populates the empty instance with default values and checks against the schema.
|
|
||||||
run: |-
|
|
||||||
echo "{}" | check-jsonschema \
|
|
||||||
--fill-defaults --schemafile=schema/synapse-config.schema.yaml -
|
|
||||||
|
|
||||||
check-doc-generation:
|
|
||||||
name: Ensure generated documentation is up-to-date
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
|
||||||
with:
|
|
||||||
python-version: "3.x"
|
|
||||||
- name: Install PyYAML
|
|
||||||
run: pip install PyYAML==6.0.2
|
|
||||||
|
|
||||||
- name: Regenerate config documentation
|
|
||||||
run: |
|
|
||||||
scripts-dev/gen_config_documentation.py \
|
|
||||||
schema/synapse-config.schema.yaml \
|
|
||||||
> docs/usage/configuration/config_documentation.md
|
|
||||||
- name: Error in case of any differences
|
|
||||||
# Errors if there are now any modified files (untracked files are ignored).
|
|
||||||
run: 'git diff --exit-code'
|
|
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@ -85,7 +85,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||||
with:
|
with:
|
||||||
@ -149,7 +149,7 @@ jobs:
|
|||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
- name: Setup Poetry
|
- name: Setup Poetry
|
||||||
@ -210,7 +210,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||||
with:
|
with:
|
||||||
@ -227,7 +227,7 @@ jobs:
|
|||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@0d72692bcfbf448b1e2afa01a67f71b455a9dcec # 1.86.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
with:
|
with:
|
||||||
components: clippy
|
components: clippy
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
@ -247,7 +247,7 @@ jobs:
|
|||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||||
with:
|
with:
|
||||||
toolchain: nightly-2025-04-23
|
toolchain: nightly-2022-12-01
|
||||||
components: clippy
|
components: clippy
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
@ -265,7 +265,7 @@ jobs:
|
|||||||
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1)
|
||||||
with:
|
with:
|
||||||
# We use nightly so that it correctly groups together imports
|
# We use nightly so that it correctly groups together imports
|
||||||
toolchain: nightly-2025-04-23
|
toolchain: nightly-2022-12-01
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
@ -362,7 +362,7 @@ jobs:
|
|||||||
postgres:${{ matrix.job.postgres-version }}
|
postgres:${{ matrix.job.postgres-version }}
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
|
||||||
@ -404,7 +404,7 @@ jobs:
|
|||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
# There aren't wheels for some of the older deps, so we need to install
|
# There aren't wheels for some of the older deps, so we need to install
|
||||||
@ -519,7 +519,7 @@ jobs:
|
|||||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
- name: Run SyTest
|
- name: Run SyTest
|
||||||
@ -663,7 +663,7 @@ jobs:
|
|||||||
path: synapse
|
path: synapse
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
- name: Prepare Complement's Prerequisites
|
- name: Prepare Complement's Prerequisites
|
||||||
@ -695,7 +695,7 @@ jobs:
|
|||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
- name: Install Rust
|
- name: Install Rust
|
||||||
uses: dtolnay/rust-toolchain@c1678930c21fb233e4987c4ae12158f9125e5762 # 1.81.0
|
uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0
|
||||||
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
|
||||||
|
|
||||||
- run: cargo test
|
- run: cargo test
|
||||||
|
105
CHANGES.md
105
CHANGES.md
@ -1,108 +1,3 @@
|
|||||||
# Synapse 1.133.0 (2025-07-01)
|
|
||||||
|
|
||||||
Pre-built wheels are now built using the [manylinux_2_28](https://github.com/pypa/manylinux#manylinux_2_28-almalinux-8-based) base, which is expected to be compatible with distros using glibc 2.28 or later, including:
|
|
||||||
|
|
||||||
- Debian 10+
|
|
||||||
- Ubuntu 18.10+
|
|
||||||
- Fedora 29+
|
|
||||||
- CentOS/RHEL 8+
|
|
||||||
|
|
||||||
Previously, wheels were built using the [manylinux2014](https://github.com/pypa/manylinux#manylinux2014-centos-7-based-glibc-217) base, which was expected to be compatible with distros using glibc 2.17 or later.
|
|
||||||
|
|
||||||
### Bugfixes
|
|
||||||
|
|
||||||
- Bump `cibuildwheel` to 3.0.0 to fix the `manylinux` wheel builds. ([\#18615](https://github.com/element-hq/synapse/issues/18615))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Synapse 1.133.0rc1 (2025-06-24)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Add support for the [MSC4260 user report API](https://github.com/matrix-org/matrix-spec-proposals/pull/4260). ([\#18120](https://github.com/element-hq/synapse/issues/18120))
|
|
||||||
|
|
||||||
### Bugfixes
|
|
||||||
|
|
||||||
- Fix an issue where, during state resolution for v11 rooms, Synapse would incorrectly calculate the power level of the creator when there was no power levels event in the room. ([\#18534](https://github.com/element-hq/synapse/issues/18534), [\#18547](https://github.com/element-hq/synapse/issues/18547))
|
|
||||||
- Fix long-standing bug where sliding sync did not honour the `room_id_to_include` config option. ([\#18535](https://github.com/element-hq/synapse/issues/18535))
|
|
||||||
- Fix an issue where "Lock timeout is getting excessive" warnings would be logged even when the lock timeout was <10 minutes. ([\#18543](https://github.com/element-hq/synapse/issues/18543))
|
|
||||||
- Fix an issue where Synapse could calculate the wrong power level for the creator of the room if there was no power levels event. ([\#18545](https://github.com/element-hq/synapse/issues/18545))
|
|
||||||
|
|
||||||
### Improved Documentation
|
|
||||||
|
|
||||||
- Generate config documentation from JSON Schema file. ([\#18528](https://github.com/element-hq/synapse/issues/18528))
|
|
||||||
- Fix typo in user type documentation. ([\#18568](https://github.com/element-hq/synapse/issues/18568))
|
|
||||||
|
|
||||||
### Internal Changes
|
|
||||||
|
|
||||||
- Increase performance of introspecting access tokens when using delegated auth. ([\#18357](https://github.com/element-hq/synapse/issues/18357), [\#18561](https://github.com/element-hq/synapse/issues/18561))
|
|
||||||
- Log user deactivations. ([\#18541](https://github.com/element-hq/synapse/issues/18541))
|
|
||||||
- Enable [`flake8-logging`](https://docs.astral.sh/ruff/rules/#flake8-logging-log) and [`flake8-logging-format`](https://docs.astral.sh/ruff/rules/#flake8-logging-format-g) rules in Ruff and fix related issues throughout the codebase. ([\#18542](https://github.com/element-hq/synapse/issues/18542))
|
|
||||||
- Clean up old, unused rows from the `device_federation_inbox` table. ([\#18546](https://github.com/element-hq/synapse/issues/18546))
|
|
||||||
- Run config schema CI on develop and release branches. ([\#18551](https://github.com/element-hq/synapse/issues/18551))
|
|
||||||
- Add support for Twisted `25.5.0`+ releases. ([\#18577](https://github.com/element-hq/synapse/issues/18577))
|
|
||||||
- Update PyO3 to version 0.25. ([\#18578](https://github.com/element-hq/synapse/issues/18578))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Updates to locked dependencies
|
|
||||||
|
|
||||||
* Bump actions/setup-python from 5.5.0 to 5.6.0. ([\#18555](https://github.com/element-hq/synapse/issues/18555))
|
|
||||||
* Bump base64 from 0.21.7 to 0.22.1. ([\#18559](https://github.com/element-hq/synapse/issues/18559))
|
|
||||||
* Bump dawidd6/action-download-artifact from 9 to 11. ([\#18556](https://github.com/element-hq/synapse/issues/18556))
|
|
||||||
* Bump headers from 0.4.0 to 0.4.1. ([\#18529](https://github.com/element-hq/synapse/issues/18529))
|
|
||||||
* Bump requests from 2.32.2 to 2.32.4. ([\#18533](https://github.com/element-hq/synapse/issues/18533))
|
|
||||||
* Bump types-requests from 2.32.0.20250328 to 2.32.4.20250611. ([\#18558](https://github.com/element-hq/synapse/issues/18558))
|
|
||||||
|
|
||||||
# Synapse 1.132.0 (2025-06-17)
|
|
||||||
|
|
||||||
### Improved Documentation
|
|
||||||
|
|
||||||
- Improvements to generate config documentation from JSON Schema file. ([\#18522](https://github.com/element-hq/synapse/issues/18522))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Synapse 1.132.0rc1 (2025-06-10)
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
- Add support for [MSC4155](https://github.com/matrix-org/matrix-spec-proposals/pull/4155) Invite Filtering. ([\#18288](https://github.com/element-hq/synapse/issues/18288))
|
|
||||||
- Add experimental `user_may_send_state_event` module API callback. ([\#18455](https://github.com/element-hq/synapse/issues/18455))
|
|
||||||
- Add experimental `get_media_config_for_user` and `is_user_allowed_to_upload_media_of_size` module API callbacks that allow overriding of media repository maximum upload size. ([\#18457](https://github.com/element-hq/synapse/issues/18457))
|
|
||||||
- Add experimental `get_ratelimit_override_for_user` module API callback that allows overriding of per-user ratelimits. ([\#18458](https://github.com/element-hq/synapse/issues/18458))
|
|
||||||
- Pass `room_config` argument to `user_may_create_room` spam checker module callback. ([\#18486](https://github.com/element-hq/synapse/issues/18486))
|
|
||||||
- Support configuration of default and extra user types. ([\#18456](https://github.com/element-hq/synapse/issues/18456))
|
|
||||||
- Successful requests to `/_matrix/app/v1/ping` will now force Synapse to reattempt delivering transactions to appservices. ([\#18521](https://github.com/element-hq/synapse/issues/18521))
|
|
||||||
- Support the import of the `RatelimitOverride` type from `synapse.module_api` in modules and rename `messages_per_second` to `per_second`. ([\#18513](https://github.com/element-hq/synapse/issues/18513))
|
|
||||||
|
|
||||||
### Bugfixes
|
|
||||||
|
|
||||||
- Remove destinations from sending if not whitelisted. ([\#18484](https://github.com/element-hq/synapse/issues/18484))
|
|
||||||
- Fixed room summary API incorrectly returning that a room is private in the room summary response when the join rule is omitted by the remote server. Contributed by @nexy7574. ([\#18493](https://github.com/element-hq/synapse/issues/18493))
|
|
||||||
- Prevent users from adding themselves to their own user ignore list. ([\#18508](https://github.com/element-hq/synapse/issues/18508))
|
|
||||||
|
|
||||||
### Improved Documentation
|
|
||||||
|
|
||||||
- Generate config documentation from JSON Schema file. ([\#17892](https://github.com/element-hq/synapse/issues/17892))
|
|
||||||
- Mention `CAP_NET_BIND_SERVICE` as an alternative to running Synapse as root in order to bind to a privileged port. ([\#18408](https://github.com/element-hq/synapse/issues/18408))
|
|
||||||
- Surface hidden Admin API documentation regarding fetching of scheduled tasks. ([\#18516](https://github.com/element-hq/synapse/issues/18516))
|
|
||||||
- Mark the new module APIs in this release as experimental. ([\#18536](https://github.com/element-hq/synapse/issues/18536))
|
|
||||||
|
|
||||||
### Internal Changes
|
|
||||||
|
|
||||||
- Mark dehydrated devices in the [List All User Devices Admin API](https://element-hq.github.io/synapse/latest/admin_api/user_admin_api.html#list-all-devices). ([\#18252](https://github.com/element-hq/synapse/issues/18252))
|
|
||||||
- Reduce disk wastage by cleaning up `received_transactions` older than 1 day, rather than 30 days. ([\#18310](https://github.com/element-hq/synapse/issues/18310))
|
|
||||||
- Distinguish all vs local events being persisted in the "Event Send Time Quantiles" graph (Grafana). ([\#18510](https://github.com/element-hq/synapse/issues/18510))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Synapse 1.131.0 (2025-06-03)
|
|
||||||
|
|
||||||
No significant changes since 1.131.0rc1.
|
|
||||||
|
|
||||||
# Synapse 1.131.0rc1 (2025-05-28)
|
# Synapse 1.131.0rc1 (2025-05-28)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
1262
Cargo.lock
generated
1262
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
|||||||
Support for [MSC4235](https://github.com/matrix-org/matrix-spec-proposals/pull/4235): via query param for hierarchy endpoint. Contributed by Krishan (@kfiven).
|
|
@ -1 +0,0 @@
|
|||||||
Add `forget_forced_upon_leave` capability as per [MSC4267](https://github.com/matrix-org/matrix-spec-proposals/pull/4267).
|
|
@ -1 +0,0 @@
|
|||||||
Add `federated_user_may_invite` spam checker callback which receives the entire invite event. Contributed by @tulir @ Beeper.
|
|
@ -1 +0,0 @@
|
|||||||
Stop adding the "origin" field to newly-created events (PDUs).
|
|
@ -1 +0,0 @@
|
|||||||
Fix `KeyError` on background updates when using split main/state databases.
|
|
@ -1 +0,0 @@
|
|||||||
Fix documentation of the Delete Room Admin API's status field.
|
|
@ -1 +0,0 @@
|
|||||||
Improve docstring on `simple_upsert_many`.
|
|
@ -1 +0,0 @@
|
|||||||
Improve performance of device deletion by adding missing index.
|
|
@ -1 +0,0 @@
|
|||||||
Better handling of ratelimited requests.
|
|
@ -1 +0,0 @@
|
|||||||
Better handling of ratelimited requests.
|
|
@ -1 +0,0 @@
|
|||||||
Speed up bulk device deletion.
|
|
@ -1 +0,0 @@
|
|||||||
Ensure policy servers are not asked to scan policy server change events, allowing rooms to disable the use of a policy server while the policy server is down.
|
|
@ -220,24 +220,29 @@
|
|||||||
"yBucketBound": "auto"
|
"yBucketBound": "auto"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"datasource": {
|
|
||||||
"uid": "${DS_PROMETHEUS}",
|
|
||||||
"type": "prometheus"
|
|
||||||
},
|
|
||||||
"aliasColors": {},
|
"aliasColors": {},
|
||||||
|
"bars": false,
|
||||||
"dashLength": 10,
|
"dashLength": 10,
|
||||||
|
"dashes": false,
|
||||||
|
"datasource": {
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"description": "",
|
||||||
"fieldConfig": {
|
"fieldConfig": {
|
||||||
"defaults": {
|
"defaults": {
|
||||||
"links": []
|
"links": []
|
||||||
},
|
},
|
||||||
"overrides": []
|
"overrides": []
|
||||||
},
|
},
|
||||||
|
"fill": 0,
|
||||||
|
"fillGradient": 0,
|
||||||
"gridPos": {
|
"gridPos": {
|
||||||
"h": 9,
|
"h": 9,
|
||||||
"w": 12,
|
"w": 12,
|
||||||
"x": 12,
|
"x": 12,
|
||||||
"y": 1
|
"y": 1
|
||||||
},
|
},
|
||||||
|
"hiddenSeries": false,
|
||||||
"id": 152,
|
"id": 152,
|
||||||
"legend": {
|
"legend": {
|
||||||
"avg": false,
|
"avg": false,
|
||||||
@ -250,81 +255,71 @@
|
|||||||
"values": false
|
"values": false
|
||||||
},
|
},
|
||||||
"lines": true,
|
"lines": true,
|
||||||
|
"linewidth": 0,
|
||||||
|
"links": [],
|
||||||
"nullPointMode": "connected",
|
"nullPointMode": "connected",
|
||||||
"options": {
|
"options": {
|
||||||
"alertThreshold": true
|
"alertThreshold": true
|
||||||
},
|
},
|
||||||
"paceLength": 10,
|
"paceLength": 10,
|
||||||
"pluginVersion": "10.4.3",
|
"percentage": false,
|
||||||
|
"pluginVersion": "9.2.2",
|
||||||
"pointradius": 5,
|
"pointradius": 5,
|
||||||
|
"points": false,
|
||||||
"renderer": "flot",
|
"renderer": "flot",
|
||||||
"seriesOverrides": [
|
"seriesOverrides": [
|
||||||
{
|
{
|
||||||
"alias": "Avg",
|
"alias": "Avg",
|
||||||
"fill": 0,
|
"fill": 0,
|
||||||
"linewidth": 3,
|
"linewidth": 3
|
||||||
"$$hashKey": "object:48"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "99%",
|
"alias": "99%",
|
||||||
"color": "#C4162A",
|
"color": "#C4162A",
|
||||||
"fillBelowTo": "90%",
|
"fillBelowTo": "90%"
|
||||||
"$$hashKey": "object:49"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "90%",
|
"alias": "90%",
|
||||||
"color": "#FF7383",
|
"color": "#FF7383",
|
||||||
"fillBelowTo": "75%",
|
"fillBelowTo": "75%"
|
||||||
"$$hashKey": "object:50"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "75%",
|
"alias": "75%",
|
||||||
"color": "#FFEE52",
|
"color": "#FFEE52",
|
||||||
"fillBelowTo": "50%",
|
"fillBelowTo": "50%"
|
||||||
"$$hashKey": "object:51"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "50%",
|
"alias": "50%",
|
||||||
"color": "#73BF69",
|
"color": "#73BF69",
|
||||||
"fillBelowTo": "25%",
|
"fillBelowTo": "25%"
|
||||||
"$$hashKey": "object:52"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "25%",
|
"alias": "25%",
|
||||||
"color": "#1F60C4",
|
"color": "#1F60C4",
|
||||||
"fillBelowTo": "5%",
|
"fillBelowTo": "5%"
|
||||||
"$$hashKey": "object:53"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "5%",
|
"alias": "5%",
|
||||||
"lines": false,
|
"lines": false
|
||||||
"$$hashKey": "object:54"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "Average",
|
"alias": "Average",
|
||||||
"color": "rgb(255, 255, 255)",
|
"color": "rgb(255, 255, 255)",
|
||||||
"lines": true,
|
"lines": true,
|
||||||
"linewidth": 3,
|
"linewidth": 3
|
||||||
"$$hashKey": "object:55"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"alias": "Local events being persisted",
|
"alias": "Events",
|
||||||
"color": "#96d98D",
|
|
||||||
"points": true,
|
|
||||||
"yaxis": 2,
|
|
||||||
"zindex": -3,
|
|
||||||
"$$hashKey": "object:56"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"$$hashKey": "object:329",
|
|
||||||
"color": "#B877D9",
|
"color": "#B877D9",
|
||||||
"alias": "All events being persisted",
|
"hideTooltip": true,
|
||||||
"points": true,
|
"points": true,
|
||||||
"yaxis": 2,
|
"yaxis": 2,
|
||||||
"zindex": -3
|
"zindex": -3
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"spaceLength": 10,
|
"spaceLength": 10,
|
||||||
|
"stack": false,
|
||||||
|
"steppedLine": false,
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": {
|
"datasource": {
|
||||||
@ -389,20 +384,7 @@
|
|||||||
},
|
},
|
||||||
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
|
"expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
|
||||||
"legendFormat": "Average",
|
"legendFormat": "Average",
|
||||||
"refId": "H",
|
"refId": "H"
|
||||||
"editorMode": "code",
|
|
||||||
"range": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"expr": "sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"legendFormat": "Local events being persisted",
|
|
||||||
"refId": "E",
|
|
||||||
"editorMode": "code"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"datasource": {
|
"datasource": {
|
||||||
@ -411,9 +393,8 @@
|
|||||||
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
|
"expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"legendFormat": "All events being persisted",
|
"legendFormat": "Events",
|
||||||
"refId": "I",
|
"refId": "E"
|
||||||
"editorMode": "code"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"thresholds": [
|
"thresholds": [
|
||||||
@ -447,9 +428,7 @@
|
|||||||
"xaxis": {
|
"xaxis": {
|
||||||
"mode": "time",
|
"mode": "time",
|
||||||
"show": true,
|
"show": true,
|
||||||
"values": [],
|
"values": []
|
||||||
"name": null,
|
|
||||||
"buckets": null
|
|
||||||
},
|
},
|
||||||
"yaxes": [
|
"yaxes": [
|
||||||
{
|
{
|
||||||
@ -471,20 +450,7 @@
|
|||||||
],
|
],
|
||||||
"yaxis": {
|
"yaxis": {
|
||||||
"align": false
|
"align": false
|
||||||
},
|
}
|
||||||
"bars": false,
|
|
||||||
"dashes": false,
|
|
||||||
"description": "",
|
|
||||||
"fill": 0,
|
|
||||||
"fillGradient": 0,
|
|
||||||
"hiddenSeries": false,
|
|
||||||
"linewidth": 0,
|
|
||||||
"percentage": false,
|
|
||||||
"points": false,
|
|
||||||
"stack": false,
|
|
||||||
"steppedLine": false,
|
|
||||||
"timeFrom": null,
|
|
||||||
"timeShift": null
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"aliasColors": {},
|
"aliasColors": {},
|
||||||
|
@ -45,10 +45,6 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
|
|||||||
colors = {"red", "green", "blue", "yellow", "purple"}
|
colors = {"red", "green", "blue", "yellow", "purple"}
|
||||||
|
|
||||||
for pdu in pdus:
|
for pdu in pdus:
|
||||||
# TODO: The "origin" field has since been removed from events generated
|
|
||||||
# by Synapse. We should consider removing it here as well but since this
|
|
||||||
# is part of `contrib/`, it is left for the community to revise and ensure things
|
|
||||||
# still work correctly.
|
|
||||||
origins.add(pdu.get("origin"))
|
origins.add(pdu.get("origin"))
|
||||||
|
|
||||||
color_map = {color: color for color in colors if color in origins}
|
color_map = {color: color for color in colors if color in origins}
|
||||||
|
30
debian/changelog
vendored
30
debian/changelog
vendored
@ -1,33 +1,3 @@
|
|||||||
matrix-synapse-py3 (1.133.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New synapse release 1.133.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jul 2025 13:13:24 +0000
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.133.0~rc1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New Synapse release 1.133.0rc1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 24 Jun 2025 11:57:47 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.132.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New Synapse release 1.132.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 17 Jun 2025 13:16:20 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.132.0~rc1) stable; urgency=medium
|
|
||||||
|
|
||||||
* New Synapse release 1.132.0rc1.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 10 Jun 2025 11:15:18 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.131.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* New Synapse release 1.131.0.
|
|
||||||
|
|
||||||
-- Synapse Packaging team <packages@matrix.org> Tue, 03 Jun 2025 14:36:55 +0100
|
|
||||||
|
|
||||||
matrix-synapse-py3 (1.131.0~rc1) stable; urgency=medium
|
matrix-synapse-py3 (1.131.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
* New synapse release 1.131.0rc1.
|
* New synapse release 1.131.0rc1.
|
||||||
|
@ -127,8 +127,6 @@ experimental_features:
|
|||||||
msc3983_appservice_otk_claims: true
|
msc3983_appservice_otk_claims: true
|
||||||
# Proxy key queries to exclusive ASes
|
# Proxy key queries to exclusive ASes
|
||||||
msc3984_appservice_key_query: true
|
msc3984_appservice_key_query: true
|
||||||
# Invite filtering
|
|
||||||
msc4155_enabled: true
|
|
||||||
|
|
||||||
server_notices:
|
server_notices:
|
||||||
system_mxid_localpart: _server
|
system_mxid_localpart: _server
|
||||||
|
@ -63,18 +63,6 @@ mdbook serve
|
|||||||
|
|
||||||
The URL at which the docs can be viewed at will be logged.
|
The URL at which the docs can be viewed at will be logged.
|
||||||
|
|
||||||
## Synapse configuration documentation
|
|
||||||
|
|
||||||
The [Configuration
|
|
||||||
Manual](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html)
|
|
||||||
page is generated from a YAML file,
|
|
||||||
[schema/synapse-config.schema.yaml](../schema/synapse-config.schema.yaml). To
|
|
||||||
add new options or modify existing ones, first edit that file, then run
|
|
||||||
[scripts-dev/gen_config_documentation.py](../scripts-dev/gen_config_documentation.py)
|
|
||||||
to generate an updated Configuration Manual markdown file.
|
|
||||||
|
|
||||||
Build the book as described above to preview it in a web browser.
|
|
||||||
|
|
||||||
## Configuration and theming
|
## Configuration and theming
|
||||||
|
|
||||||
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
|
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
|
||||||
|
@ -49,8 +49,6 @@
|
|||||||
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
|
- [Background update controller callbacks](modules/background_update_controller_callbacks.md)
|
||||||
- [Account data callbacks](modules/account_data_callbacks.md)
|
- [Account data callbacks](modules/account_data_callbacks.md)
|
||||||
- [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md)
|
- [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md)
|
||||||
- [Media repository callbacks](modules/media_repository_callbacks.md)
|
|
||||||
- [Ratelimit callbacks](modules/ratelimit_callbacks.md)
|
|
||||||
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
- [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
|
||||||
- [Workers](workers.md)
|
- [Workers](workers.md)
|
||||||
- [Using `synctl` with Workers](synctl_workers.md)
|
- [Using `synctl` with Workers](synctl_workers.md)
|
||||||
@ -68,7 +66,6 @@
|
|||||||
- [Registration Tokens](usage/administration/admin_api/registration_tokens.md)
|
- [Registration Tokens](usage/administration/admin_api/registration_tokens.md)
|
||||||
- [Manipulate Room Membership](admin_api/room_membership.md)
|
- [Manipulate Room Membership](admin_api/room_membership.md)
|
||||||
- [Rooms](admin_api/rooms.md)
|
- [Rooms](admin_api/rooms.md)
|
||||||
- [Scheduled tasks](admin_api/scheduled_tasks.md)
|
|
||||||
- [Server Notices](admin_api/server_notices.md)
|
- [Server Notices](admin_api/server_notices.md)
|
||||||
- [Statistics](admin_api/statistics.md)
|
- [Statistics](admin_api/statistics.md)
|
||||||
- [Users](admin_api/user_admin_api.md)
|
- [Users](admin_api/user_admin_api.md)
|
||||||
|
@ -117,6 +117,7 @@ It returns a JSON body like the following:
|
|||||||
"hashes": {
|
"hashes": {
|
||||||
"sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw"
|
"sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw"
|
||||||
},
|
},
|
||||||
|
"origin": "matrix.org",
|
||||||
"origin_server_ts": 1592291711430,
|
"origin_server_ts": 1592291711430,
|
||||||
"prev_events": [
|
"prev_events": [
|
||||||
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M"
|
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M"
|
||||||
|
@ -806,7 +806,7 @@ A response body like the following is returned:
|
|||||||
}, {
|
}, {
|
||||||
"delete_id": "delete_id2",
|
"delete_id": "delete_id2",
|
||||||
"room_id": "!roomid:example.com",
|
"room_id": "!roomid:example.com",
|
||||||
"status": "active",
|
"status": "purging",
|
||||||
"shutdown_room": {
|
"shutdown_room": {
|
||||||
"kicked_users": [
|
"kicked_users": [
|
||||||
"@foobar:example.com"
|
"@foobar:example.com"
|
||||||
@ -843,7 +843,7 @@ A response body like the following is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"status": "active",
|
"status": "purging",
|
||||||
"delete_id": "bHkCNQpHqOaFhPtK",
|
"delete_id": "bHkCNQpHqOaFhPtK",
|
||||||
"room_id": "!roomid:example.com",
|
"room_id": "!roomid:example.com",
|
||||||
"shutdown_room": {
|
"shutdown_room": {
|
||||||
@ -876,8 +876,8 @@ The following fields are returned in the JSON response body:
|
|||||||
- `delete_id` - The ID for this purge
|
- `delete_id` - The ID for this purge
|
||||||
- `room_id` - The ID of the room being deleted
|
- `room_id` - The ID of the room being deleted
|
||||||
- `status` - The status will be one of:
|
- `status` - The status will be one of:
|
||||||
- `scheduled` - The deletion is waiting to be started
|
- `shutting_down` - The process is removing users from the room.
|
||||||
- `active` - The process is purging the room and event data from database.
|
- `purging` - The process is purging the room and event data from database.
|
||||||
- `complete` - The process has completed successfully.
|
- `complete` - The process has completed successfully.
|
||||||
- `failed` - The process is aborted, an error has occurred.
|
- `failed` - The process is aborted, an error has occurred.
|
||||||
- `error` - A string that shows an error message if `status` is `failed`.
|
- `error` - A string that shows an error message if `status` is `failed`.
|
||||||
|
@ -163,8 +163,7 @@ Body parameters:
|
|||||||
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
|
- `locked` - **bool**, optional. If unspecified, locked state will be left unchanged.
|
||||||
- `user_type` - **string** or null, optional. If not provided, the user type will be
|
- `user_type` - **string** or null, optional. If not provided, the user type will be
|
||||||
not be changed. If `null` is given, the user type will be cleared.
|
not be changed. If `null` is given, the user type will be cleared.
|
||||||
Other allowed options are: `bot` and `support` and any extra values defined in the homserver
|
Other allowed options are: `bot` and `support`.
|
||||||
[configuration](../usage/configuration/config_documentation.md#user_types).
|
|
||||||
|
|
||||||
## List Accounts
|
## List Accounts
|
||||||
### List Accounts (V2)
|
### List Accounts (V2)
|
||||||
@ -955,8 +954,7 @@ A response body like the following is returned:
|
|||||||
"last_seen_ip": "1.2.3.4",
|
"last_seen_ip": "1.2.3.4",
|
||||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
"last_seen_ts": 1474491775024,
|
"last_seen_ts": 1474491775024,
|
||||||
"user_id": "<user_id>",
|
"user_id": "<user_id>"
|
||||||
"dehydrated": false
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"device_id": "AUIECTSRND",
|
"device_id": "AUIECTSRND",
|
||||||
@ -964,8 +962,7 @@ A response body like the following is returned:
|
|||||||
"last_seen_ip": "1.2.3.5",
|
"last_seen_ip": "1.2.3.5",
|
||||||
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
"last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
"last_seen_ts": 1474491775025,
|
"last_seen_ts": 1474491775025,
|
||||||
"user_id": "<user_id>",
|
"user_id": "<user_id>"
|
||||||
"dehydrated": false
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"total": 2
|
"total": 2
|
||||||
@ -995,7 +992,6 @@ The following fields are returned in the JSON response body:
|
|||||||
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
|
||||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||||
- `user_id` - Owner of device.
|
- `user_id` - Owner of device.
|
||||||
- `dehydrated` - Whether the device is a dehydrated device.
|
|
||||||
|
|
||||||
- `total` - Total number of user's devices.
|
- `total` - Total number of user's devices.
|
||||||
|
|
||||||
|
@ -1,66 +0,0 @@
|
|||||||
# Media repository callbacks
|
|
||||||
|
|
||||||
Media repository callbacks allow module developers to customise the behaviour of the
|
|
||||||
media repository on a per user basis. Media repository callbacks can be registered
|
|
||||||
using the module API's `register_media_repository_callbacks` method.
|
|
||||||
|
|
||||||
The available media repository callbacks are:
|
|
||||||
|
|
||||||
### `get_media_config_for_user`
|
|
||||||
|
|
||||||
_First introduced in Synapse v1.132.0_
|
|
||||||
|
|
||||||
```python
|
|
||||||
async def get_media_config_for_user(user_id: str) -> Optional[JsonDict]
|
|
||||||
```
|
|
||||||
|
|
||||||
**<span style="color:red">
|
|
||||||
Caution: This callback is currently experimental . The method signature or behaviour
|
|
||||||
may change without notice.
|
|
||||||
</span>**
|
|
||||||
|
|
||||||
Called when processing a request from a client for the
|
|
||||||
[media config endpoint](https://spec.matrix.org/latest/client-server-api/#get_matrixclientv1mediaconfig).
|
|
||||||
|
|
||||||
The arguments passed to this callback are:
|
|
||||||
|
|
||||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
|
||||||
|
|
||||||
If the callback returns a dictionary then it will be used as the body of the response to the
|
|
||||||
client.
|
|
||||||
|
|
||||||
If multiple modules implement this callback, they will be considered in order. If a
|
|
||||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
|
||||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
|
||||||
any of the subsequent implementations of this callback.
|
|
||||||
|
|
||||||
If no module returns a non-`None` value then the default media config will be returned.
|
|
||||||
|
|
||||||
### `is_user_allowed_to_upload_media_of_size`
|
|
||||||
|
|
||||||
_First introduced in Synapse v1.132.0_
|
|
||||||
|
|
||||||
```python
|
|
||||||
async def is_user_allowed_to_upload_media_of_size(user_id: str, size: int) -> bool
|
|
||||||
```
|
|
||||||
|
|
||||||
**<span style="color:red">
|
|
||||||
Caution: This callback is currently experimental . The method signature or behaviour
|
|
||||||
may change without notice.
|
|
||||||
</span>**
|
|
||||||
|
|
||||||
Called before media is accepted for upload from a user, in case the module needs to
|
|
||||||
enforce a different limit for the particular user.
|
|
||||||
|
|
||||||
The arguments passed to this callback are:
|
|
||||||
|
|
||||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request.
|
|
||||||
* `size`: The size in bytes of media that is being requested to upload.
|
|
||||||
|
|
||||||
If the module returns `False`, the current request will be denied with the error code
|
|
||||||
`M_TOO_LARGE` and the HTTP status code 413.
|
|
||||||
|
|
||||||
If multiple modules implement this callback, they will be considered in order. If a callback
|
|
||||||
returns `True`, Synapse falls through to the next one. The value of the first callback that
|
|
||||||
returns `False` will be used. If this happens, Synapse will not call any of the subsequent
|
|
||||||
implementations of this callback.
|
|
@ -1,43 +0,0 @@
|
|||||||
# Ratelimit callbacks
|
|
||||||
|
|
||||||
Ratelimit callbacks allow module developers to override ratelimit settings dynamically whilst
|
|
||||||
Synapse is running. Ratelimit callbacks can be registered using the module API's
|
|
||||||
`register_ratelimit_callbacks` method.
|
|
||||||
|
|
||||||
The available ratelimit callbacks are:
|
|
||||||
|
|
||||||
### `get_ratelimit_override_for_user`
|
|
||||||
|
|
||||||
_First introduced in Synapse v1.132.0_
|
|
||||||
|
|
||||||
```python
|
|
||||||
async def get_ratelimit_override_for_user(user: str, limiter_name: str) -> Optional[synapse.module_api.RatelimitOverride]
|
|
||||||
```
|
|
||||||
|
|
||||||
**<span style="color:red">
|
|
||||||
Caution: This callback is currently experimental . The method signature or behaviour
|
|
||||||
may change without notice.
|
|
||||||
</span>**
|
|
||||||
|
|
||||||
Called when constructing a ratelimiter of a particular type for a user. The module can
|
|
||||||
return a `messages_per_second` and `burst_count` to be used, or `None` if
|
|
||||||
the default settings are adequate. The user is represented by their Matrix user ID
|
|
||||||
(e.g. `@alice:example.com`). The limiter name is usually taken from the `RatelimitSettings` key
|
|
||||||
value.
|
|
||||||
|
|
||||||
The limiters that are currently supported are:
|
|
||||||
|
|
||||||
- `rc_invites.per_room`
|
|
||||||
- `rc_invites.per_user`
|
|
||||||
- `rc_invites.per_issuer`
|
|
||||||
|
|
||||||
The `RatelimitOverride` return type has the following fields:
|
|
||||||
|
|
||||||
- `per_second: float`. The number of actions that can be performed in a second. `0.0` means that ratelimiting is disabled.
|
|
||||||
- `burst_count: int`. The number of actions that can be performed before being limited.
|
|
||||||
|
|
||||||
If multiple modules implement this callback, they will be considered in order. If a
|
|
||||||
callback returns `None`, Synapse falls through to the next one. The value of the first
|
|
||||||
callback that does not return `None` will be used. If this happens, Synapse will not call
|
|
||||||
any of the subsequent implementations of this callback. If no module returns a non-`None` value
|
|
||||||
then the default settings will be used.
|
|
@ -80,8 +80,6 @@ Called when processing an invitation, both when one is created locally or when
|
|||||||
receiving an invite over federation. Both inviter and invitee are represented by
|
receiving an invite over federation. Both inviter and invitee are represented by
|
||||||
their Matrix user ID (e.g. `@alice:example.com`).
|
their Matrix user ID (e.g. `@alice:example.com`).
|
||||||
|
|
||||||
Note that federated invites will call `federated_user_may_invite` before this callback.
|
|
||||||
|
|
||||||
|
|
||||||
The callback must return one of:
|
The callback must return one of:
|
||||||
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
||||||
@ -99,34 +97,6 @@ be used. If this happens, Synapse will not call any of the subsequent implementa
|
|||||||
this callback.
|
this callback.
|
||||||
|
|
||||||
|
|
||||||
### `federated_user_may_invite`
|
|
||||||
|
|
||||||
_First introduced in Synapse v1.133.0_
|
|
||||||
|
|
||||||
```python
|
|
||||||
async def federated_user_may_invite(event: "synapse.events.EventBase") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
|
|
||||||
```
|
|
||||||
|
|
||||||
Called when processing an invitation received over federation. Unlike `user_may_invite`,
|
|
||||||
this callback receives the entire event, including any stripped state in the `unsigned`
|
|
||||||
section, not just the room and user IDs.
|
|
||||||
|
|
||||||
The callback must return one of:
|
|
||||||
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
|
||||||
decide to reject it.
|
|
||||||
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
|
|
||||||
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
|
|
||||||
|
|
||||||
If multiple modules implement this callback, they will be considered in order. If a
|
|
||||||
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
|
|
||||||
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
|
|
||||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
|
||||||
this callback.
|
|
||||||
|
|
||||||
If all of the callbacks return `synapse.module_api.NOT_SPAM`, Synapse will also fall
|
|
||||||
through to the `user_may_invite` callback before approving the invite.
|
|
||||||
|
|
||||||
|
|
||||||
### `user_may_send_3pid_invite`
|
### `user_may_send_3pid_invite`
|
||||||
|
|
||||||
_First introduced in Synapse v1.45.0_
|
_First introduced in Synapse v1.45.0_
|
||||||
@ -189,19 +159,12 @@ _First introduced in Synapse v1.37.0_
|
|||||||
|
|
||||||
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
|
_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._
|
||||||
|
|
||||||
_Changed in Synapse v1.132.0: Added the `room_config` argument. Callbacks that only expect a single `user_id` argument are still supported._
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
async def user_may_create_room(user_id: str, room_config: synapse.module_api.JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
|
async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
|
||||||
```
|
```
|
||||||
|
|
||||||
Called when processing a room creation request.
|
Called when processing a room creation request.
|
||||||
|
|
||||||
The arguments passed to this callback are:
|
|
||||||
|
|
||||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`).
|
|
||||||
* `room_config`: The contents of the body of a [/createRoom request](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) as a dictionary.
|
|
||||||
|
|
||||||
The callback must return one of:
|
The callback must return one of:
|
||||||
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
||||||
decide to reject it.
|
decide to reject it.
|
||||||
@ -276,41 +239,6 @@ be used. If this happens, Synapse will not call any of the subsequent implementa
|
|||||||
this callback.
|
this callback.
|
||||||
|
|
||||||
|
|
||||||
### `user_may_send_state_event`
|
|
||||||
|
|
||||||
_First introduced in Synapse v1.132.0_
|
|
||||||
|
|
||||||
```python
|
|
||||||
async def user_may_send_state_event(user_id: str, room_id: str, event_type: str, state_key: str, content: JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
|
|
||||||
```
|
|
||||||
|
|
||||||
**<span style="color:red">
|
|
||||||
Caution: This callback is currently experimental . The method signature or behaviour
|
|
||||||
may change without notice.
|
|
||||||
</span>**
|
|
||||||
|
|
||||||
Called when processing a request to [send state events](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey) to a room.
|
|
||||||
|
|
||||||
The arguments passed to this callback are:
|
|
||||||
|
|
||||||
* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) sending the state event.
|
|
||||||
* `room_id`: The ID of the room that the requested state event is being sent to.
|
|
||||||
* `event_type`: The requested type of event.
|
|
||||||
* `state_key`: The requested state key.
|
|
||||||
* `content`: The requested event contents.
|
|
||||||
|
|
||||||
The callback must return one of:
|
|
||||||
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
|
||||||
decide to reject it.
|
|
||||||
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
|
|
||||||
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
|
|
||||||
|
|
||||||
If multiple modules implement this callback, they will be considered in order. If a
|
|
||||||
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
|
|
||||||
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
|
|
||||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
|
||||||
this callback.
|
|
||||||
|
|
||||||
|
|
||||||
### `check_username_for_spam`
|
### `check_username_for_spam`
|
||||||
|
|
||||||
|
@ -5,10 +5,10 @@ It is recommended to put a reverse proxy such as
|
|||||||
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
|
||||||
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
[Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy),
|
||||||
[HAProxy](https://www.haproxy.org/) or
|
[HAProxy](https://www.haproxy.org/) or
|
||||||
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse.
|
[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage
|
||||||
This has the advantage of being able to expose the default HTTPS port (443) to Matrix
|
of doing so is that it means that you can expose the default https port
|
||||||
clients without requiring Synapse to bind to a privileged port (port numbers less than
|
(443) to Matrix clients without needing to run Synapse with root
|
||||||
1024), avoiding the need for `CAP_NET_BIND_SERVICE` or running as root.
|
privileges.
|
||||||
|
|
||||||
You should configure your reverse proxy to forward requests to `/_matrix` or
|
You should configure your reverse proxy to forward requests to `/_matrix` or
|
||||||
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
|
`/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and
|
||||||
|
@ -63,7 +63,7 @@ class ExampleSpamChecker:
|
|||||||
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
async def user_may_invite(self, inviter_userid, invitee_userid, room_id):
|
||||||
return True # allow all invites
|
return True # allow all invites
|
||||||
|
|
||||||
async def user_may_create_room(self, userid, room_config):
|
async def user_may_create_room(self, userid):
|
||||||
return True # allow all room creations
|
return True # allow all room creations
|
||||||
|
|
||||||
async def user_may_create_room_alias(self, userid, room_alias):
|
async def user_may_create_room_alias(self, userid, room_alias):
|
||||||
|
@ -255,7 +255,7 @@ line to `/etc/default/matrix-synapse`:
|
|||||||
|
|
||||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||||
|
|
||||||
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
|
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
|
||||||
|
|
||||||
This made a significant difference on Python 2.7 - it's unclear how
|
This made a significant difference on Python 2.7 - it's unclear how
|
||||||
much of an improvement it provides on Python 3.x.
|
much of an improvement it provides on Python 3.x.
|
||||||
|
File diff suppressed because it is too large
Load Diff
129
poetry.lock
generated
129
poetry.lock
generated
@ -1,4 +1,4 @@
|
|||||||
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
|
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "annotated-types"
|
name = "annotated-types"
|
||||||
@ -39,7 +39,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\""
|
||||||
files = [
|
files = [
|
||||||
{file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"},
|
{file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"},
|
||||||
{file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"},
|
{file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"},
|
||||||
@ -50,18 +50,19 @@ cryptography = "*"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "automat"
|
name = "automat"
|
||||||
version = "25.4.16"
|
version = "22.10.0"
|
||||||
description = "Self-service finite-state machines for the programmer on the go."
|
description = "Self-service finite-state machines for the programmer on the go."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "automat-25.4.16-py3-none-any.whl", hash = "sha256:04e9bce696a8d5671ee698005af6e5a9fa15354140a87f4870744604dcdd3ba1"},
|
{file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
|
||||||
{file = "automat-25.4.16.tar.gz", hash = "sha256:0017591a5477066e90d26b0e696ddc143baafd87b588cfac8100bc6be9634de0"},
|
{file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
typing_extensions = {version = "*", markers = "python_version < \"3.10\""}
|
attrs = ">=19.2.0"
|
||||||
|
six = "*"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
|
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
|
||||||
@ -450,7 +451,7 @@ description = "XML bomb protection for Python stdlib modules"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
|
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
|
||||||
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
|
{file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
|
||||||
@ -493,7 +494,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
|
{file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"},
|
||||||
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
|
{file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"},
|
||||||
@ -543,7 +544,7 @@ description = "Python wrapper for hiredis"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"redis\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"redis\""
|
||||||
files = [
|
files = [
|
||||||
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
|
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"},
|
||||||
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
|
{file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"},
|
||||||
@ -889,7 +890,7 @@ description = "Jaeger Python OpenTracing Tracer implementation"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||||
files = [
|
files = [
|
||||||
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
|
{file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"},
|
||||||
]
|
]
|
||||||
@ -1027,7 +1028,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||||
files = [
|
files = [
|
||||||
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
|
{file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"},
|
||||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||||
@ -1043,7 +1044,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"url-preview\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"url-preview\""
|
||||||
files = [
|
files = [
|
||||||
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"},
|
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"},
|
||||||
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"},
|
{file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"},
|
||||||
@ -1323,7 +1324,7 @@ description = "An LDAP3 auth provider for Synapse"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\""
|
||||||
files = [
|
files = [
|
||||||
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
|
{file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"},
|
||||||
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
|
{file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"},
|
||||||
@ -1544,7 +1545,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||||
files = [
|
files = [
|
||||||
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
|
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
|
||||||
]
|
]
|
||||||
@ -1713,7 +1714,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"postgres\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"postgres\""
|
||||||
files = [
|
files = [
|
||||||
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
|
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
|
||||||
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
|
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
|
||||||
@ -1721,6 +1722,7 @@ files = [
|
|||||||
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
|
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
|
||||||
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
|
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
|
||||||
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
|
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
|
||||||
|
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
|
||||||
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
|
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
|
||||||
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
|
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
|
||||||
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
|
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
|
||||||
@ -1733,7 +1735,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||||
files = [
|
files = [
|
||||||
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
|
{file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"},
|
||||||
]
|
]
|
||||||
@ -1749,7 +1751,7 @@ description = "A Simple library to enable psycopg2 compatability"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")"
|
markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")"
|
||||||
files = [
|
files = [
|
||||||
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
|
{file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"},
|
||||||
]
|
]
|
||||||
@ -1771,18 +1773,18 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyasn1-modules"
|
name = "pyasn1-modules"
|
||||||
version = "0.4.2"
|
version = "0.4.1"
|
||||||
description = "A collection of ASN.1-based protocols modules"
|
description = "A collection of ASN.1-based protocols modules"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a"},
|
{file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
|
||||||
{file = "pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6"},
|
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
pyasn1 = ">=0.6.1,<0.7.0"
|
pyasn1 = ">=0.4.6,<0.7.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pycparser"
|
name = "pycparser"
|
||||||
@ -1972,7 +1974,7 @@ description = "Python extension wrapping the ICU C++ API"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"user-search\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"user-search\""
|
||||||
files = [
|
files = [
|
||||||
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
|
{file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"},
|
||||||
]
|
]
|
||||||
@ -2021,7 +2023,7 @@ description = "A development tool to measure, monitor and analyze the memory beh
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"cache-memory\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"cache-memory\""
|
||||||
files = [
|
files = [
|
||||||
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
|
{file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"},
|
||||||
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
|
{file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"},
|
||||||
@ -2081,7 +2083,7 @@ description = "Python implementation of SAML Version 2 Standard"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.9,<4.0"
|
python-versions = ">=3.9,<4.0"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
|
{file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"},
|
||||||
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
|
{file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"},
|
||||||
@ -2106,7 +2108,7 @@ description = "Extensions to the standard Python datetime module"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||||
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||||
@ -2134,7 +2136,7 @@ description = "World timezone definitions, modern and historical"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
|
{file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
|
||||||
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
|
{file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
|
||||||
@ -2254,19 +2256,19 @@ rpds-py = ">=0.7.0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "requests"
|
name = "requests"
|
||||||
version = "2.32.4"
|
version = "2.32.2"
|
||||||
description = "Python HTTP for Humans."
|
description = "Python HTTP for Humans."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
groups = ["main", "dev"]
|
groups = ["main", "dev"]
|
||||||
files = [
|
files = [
|
||||||
{file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"},
|
{file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"},
|
||||||
{file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"},
|
{file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
certifi = ">=2017.4.17"
|
certifi = ">=2017.4.17"
|
||||||
charset_normalizer = ">=2,<4"
|
charset-normalizer = ">=2,<4"
|
||||||
idna = ">=2.5,<4"
|
idna = ">=2.5,<4"
|
||||||
urllib3 = ">=1.21.1,<3"
|
urllib3 = ">=1.21.1,<3"
|
||||||
|
|
||||||
@ -2498,7 +2500,7 @@ description = "Python client for Sentry (https://sentry.io)"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"sentry\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"sentry\""
|
||||||
files = [
|
files = [
|
||||||
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
|
{file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"},
|
||||||
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
|
{file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"},
|
||||||
@ -2686,7 +2688,7 @@ description = "Tornado IOLoop Backed Concurrent Futures"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||||
files = [
|
files = [
|
||||||
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
|
{file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"},
|
||||||
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
|
{file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"},
|
||||||
@ -2702,7 +2704,7 @@ description = "Python bindings for the Apache Thrift RPC system"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||||
files = [
|
files = [
|
||||||
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
|
{file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
|
||||||
]
|
]
|
||||||
@ -2764,7 +2766,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"opentracing\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"opentracing\""
|
||||||
files = [
|
files = [
|
||||||
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
|
{file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"},
|
||||||
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
|
{file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"},
|
||||||
@ -2855,19 +2857,19 @@ keyring = ["keyring (>=15.1)"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "twisted"
|
name = "twisted"
|
||||||
version = "25.5.0"
|
version = "24.7.0"
|
||||||
description = "An asynchronous networking framework written in Python"
|
description = "An asynchronous networking framework written in Python"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8.0"
|
python-versions = ">=3.8.0"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
files = [
|
files = [
|
||||||
{file = "twisted-25.5.0-py3-none-any.whl", hash = "sha256:8559f654d01a54a8c3efe66d533d43f383531ebf8d81d9f9ab4769d91ca15df7"},
|
{file = "twisted-24.7.0-py3-none-any.whl", hash = "sha256:734832ef98108136e222b5230075b1079dad8a3fc5637319615619a7725b0c81"},
|
||||||
{file = "twisted-25.5.0.tar.gz", hash = "sha256:1deb272358cb6be1e3e8fc6f9c8b36f78eb0fa7c2233d2dbe11ec6fee04ea316"},
|
{file = "twisted-24.7.0.tar.gz", hash = "sha256:5a60147f044187a127ec7da96d170d49bcce50c6fd36f594e60f4587eff4d394"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
attrs = ">=22.2.0"
|
attrs = ">=21.3.0"
|
||||||
automat = ">=24.8.0"
|
automat = ">=0.8.0"
|
||||||
constantly = ">=15.1"
|
constantly = ">=15.1"
|
||||||
hyperlink = ">=17.1.1"
|
hyperlink = ">=17.1.1"
|
||||||
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
|
idna = {version = ">=2.4", optional = true, markers = "extra == \"tls\""}
|
||||||
@ -2878,20 +2880,19 @@ typing-extensions = ">=4.2.0"
|
|||||||
zope-interface = ">=5"
|
zope-interface = ">=5"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
|
all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
|
||||||
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
|
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"]
|
||||||
dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "pydoctor (>=24.11.1,<24.12.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
|
dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"]
|
||||||
dev-release = ["pydoctor (>=24.11.1,<24.12.0)", "pydoctor (>=24.11.1,<24.12.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
|
dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"]
|
||||||
gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
|
gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
|
||||||
http2 = ["h2 (>=3.2,<5.0)", "priority (>=1.1.0,<2.0)"]
|
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
|
||||||
macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
|
macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
|
||||||
mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (==1.10.1)", "mypy-zope (==1.0.6)", "priority (>=1.1.0,<2.0)", "pydoctor (>=24.11.1,<24.12.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools", "wsproto"]
|
mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"]
|
||||||
osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core (<11) ; python_version < \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-core ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork (<11) ; python_version < \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cfnetwork ; python_version >= \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa (<11) ; python_version < \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyobjc-framework-cocoa ; python_version >= \"3.9\"", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "wsproto", "wsproto"]
|
osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"]
|
||||||
serial = ["pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\""]
|
serial = ["pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\""]
|
||||||
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
|
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"]
|
||||||
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
|
tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"]
|
||||||
websocket = ["wsproto"]
|
windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"]
|
||||||
windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.2,<5.0)", "h2 (>=3.2,<5.0)", "httpx[http2] (>=0.27)", "httpx[http2] (>=0.27)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)", "wsproto", "wsproto"]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "txredisapi"
|
name = "txredisapi"
|
||||||
@ -2900,7 +2901,7 @@ description = "non-blocking redis client for python"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"redis\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"redis\""
|
||||||
files = [
|
files = [
|
||||||
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
|
{file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"},
|
||||||
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
|
{file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"},
|
||||||
@ -2993,14 +2994,14 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-opentracing"
|
name = "types-opentracing"
|
||||||
version = "2.4.10.20250622"
|
version = "2.4.10.6"
|
||||||
description = "Typing stubs for opentracing"
|
description = "Typing stubs for opentracing"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = "*"
|
||||||
groups = ["dev"]
|
groups = ["dev"]
|
||||||
files = [
|
files = [
|
||||||
{file = "types_opentracing-2.4.10.20250622-py3-none-any.whl", hash = "sha256:26bc21f9e385d54898b47e9bd1fa13f200c2dada50394f6eafd063ed53813062"},
|
{file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"},
|
||||||
{file = "types_opentracing-2.4.10.20250622.tar.gz", hash = "sha256:00db48b7f57136c45ac3250218bd0f18b9792566dfcbd5ad1de9f7e180347e74"},
|
{file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3057,14 +3058,14 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-requests"
|
name = "types-requests"
|
||||||
version = "2.32.4.20250611"
|
version = "2.32.0.20250328"
|
||||||
description = "Typing stubs for requests"
|
description = "Typing stubs for requests"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.9"
|
||||||
groups = ["dev"]
|
groups = ["dev"]
|
||||||
files = [
|
files = [
|
||||||
{file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"},
|
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
|
||||||
{file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"},
|
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -3123,14 +3124,14 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "urllib3"
|
name = "urllib3"
|
||||||
version = "2.5.0"
|
version = "2.2.2"
|
||||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.9"
|
python-versions = ">=3.8"
|
||||||
groups = ["main", "dev"]
|
groups = ["main", "dev"]
|
||||||
files = [
|
files = [
|
||||||
{file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
|
{file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
|
||||||
{file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
|
{file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
@ -3243,7 +3244,7 @@ description = "An XML Schema validator and decoder"
|
|||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
groups = ["main"]
|
groups = ["main"]
|
||||||
markers = "extra == \"saml2\" or extra == \"all\""
|
markers = "extra == \"all\" or extra == \"saml2\""
|
||||||
files = [
|
files = [
|
||||||
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
|
{file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"},
|
||||||
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
|
{file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"},
|
||||||
|
@ -74,10 +74,6 @@ select = [
|
|||||||
"PIE",
|
"PIE",
|
||||||
# flake8-executable
|
# flake8-executable
|
||||||
"EXE",
|
"EXE",
|
||||||
# flake8-logging
|
|
||||||
"LOG",
|
|
||||||
# flake8-logging-format
|
|
||||||
"G",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff.lint.isort]
|
[tool.ruff.lint.isort]
|
||||||
@ -101,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
|||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "matrix-synapse"
|
name = "matrix-synapse"
|
||||||
version = "1.133.0"
|
version = "1.131.0rc1"
|
||||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||||
license = "AGPL-3.0-or-later"
|
license = "AGPL-3.0-or-later"
|
||||||
|
@ -7,7 +7,7 @@ name = "synapse"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.81.0"
|
rust-version = "1.66.0"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "synapse"
|
name = "synapse"
|
||||||
@ -30,27 +30,19 @@ http = "1.1.0"
|
|||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
log = "0.4.17"
|
log = "0.4.17"
|
||||||
mime = "0.3.17"
|
mime = "0.3.17"
|
||||||
pyo3 = { version = "0.25.1", features = [
|
pyo3 = { version = "0.24.2", features = [
|
||||||
"macros",
|
"macros",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"abi3",
|
"abi3",
|
||||||
"abi3-py39",
|
"abi3-py39",
|
||||||
] }
|
] }
|
||||||
pyo3-log = "0.12.4"
|
pyo3-log = "0.12.0"
|
||||||
pythonize = "0.25.0"
|
pythonize = "0.24.0"
|
||||||
regex = "1.6.0"
|
regex = "1.6.0"
|
||||||
sha2 = "0.10.8"
|
sha2 = "0.10.8"
|
||||||
serde = { version = "1.0.144", features = ["derive"] }
|
serde = { version = "1.0.144", features = ["derive"] }
|
||||||
serde_json = "1.0.85"
|
serde_json = "1.0.85"
|
||||||
ulid = "1.1.2"
|
ulid = "1.1.2"
|
||||||
reqwest = { version = "0.12.15", default-features = false, features = [
|
|
||||||
"http2",
|
|
||||||
"stream",
|
|
||||||
"rustls-tls-native-roots",
|
|
||||||
] }
|
|
||||||
http-body-util = "0.1.3"
|
|
||||||
futures = "0.3.31"
|
|
||||||
tokio = { version = "1.44.2", features = ["rt", "rt-multi-thread"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
extension-module = ["pyo3/extension-module"]
|
extension-module = ["pyo3/extension-module"]
|
||||||
|
@ -58,15 +58,3 @@ impl NotFoundError {
|
|||||||
NotFoundError::new_err(())
|
NotFoundError::new_err(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
import_exception!(synapse.api.errors, HttpResponseException);
|
|
||||||
|
|
||||||
impl HttpResponseException {
|
|
||||||
pub fn new(status: StatusCode, bytes: Vec<u8>) -> pyo3::PyErr {
|
|
||||||
HttpResponseException::new_err((
|
|
||||||
status.as_u16(),
|
|
||||||
status.canonical_reason().unwrap_or_default(),
|
|
||||||
bytes,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,218 +0,0 @@
|
|||||||
/*
|
|
||||||
* This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
||||||
*
|
|
||||||
* Copyright (C) 2025 New Vector, Ltd
|
|
||||||
*
|
|
||||||
* This program is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU Affero General Public License as
|
|
||||||
* published by the Free Software Foundation, either version 3 of the
|
|
||||||
* License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* See the GNU Affero General Public License for more details:
|
|
||||||
* <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
use std::{collections::HashMap, future::Future, panic::AssertUnwindSafe, sync::LazyLock};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use futures::{FutureExt, TryStreamExt};
|
|
||||||
use pyo3::{exceptions::PyException, prelude::*, types::PyString};
|
|
||||||
use reqwest::RequestBuilder;
|
|
||||||
use tokio::runtime::Runtime;
|
|
||||||
|
|
||||||
use crate::errors::HttpResponseException;
|
|
||||||
|
|
||||||
/// The tokio runtime that we're using to run async Rust libs.
|
|
||||||
static RUNTIME: LazyLock<Runtime> = LazyLock::new(|| {
|
|
||||||
tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.worker_threads(4)
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
/// A reference to the `Deferred` python class.
|
|
||||||
static DEFERRED_CLASS: LazyLock<PyObject> = LazyLock::new(|| {
|
|
||||||
Python::with_gil(|py| {
|
|
||||||
py.import("twisted.internet.defer")
|
|
||||||
.expect("module 'twisted.internet.defer' should be importable")
|
|
||||||
.getattr("Deferred")
|
|
||||||
.expect("module 'twisted.internet.defer' should have a 'Deferred' class")
|
|
||||||
.unbind()
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
/// A reference to the twisted `reactor`.
|
|
||||||
static TWISTED_REACTOR: LazyLock<Py<PyModule>> = LazyLock::new(|| {
|
|
||||||
Python::with_gil(|py| {
|
|
||||||
py.import("twisted.internet.reactor")
|
|
||||||
.expect("module 'twisted.internet.reactor' should be importable")
|
|
||||||
.unbind()
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Called when registering modules with python.
|
|
||||||
pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
|
||||||
let child_module: Bound<'_, PyModule> = PyModule::new(py, "http_client")?;
|
|
||||||
child_module.add_class::<HttpClient>()?;
|
|
||||||
|
|
||||||
// Make sure we fail early if we can't build the lazy statics.
|
|
||||||
LazyLock::force(&RUNTIME);
|
|
||||||
LazyLock::force(&DEFERRED_CLASS);
|
|
||||||
|
|
||||||
m.add_submodule(&child_module)?;
|
|
||||||
|
|
||||||
// We need to manually add the module to sys.modules to make `from
|
|
||||||
// synapse.synapse_rust import acl` work.
|
|
||||||
py.import("sys")?
|
|
||||||
.getattr("modules")?
|
|
||||||
.set_item("synapse.synapse_rust.http_client", child_module)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[pyclass]
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HttpClient {
|
|
||||||
client: reqwest::Client,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[pymethods]
|
|
||||||
impl HttpClient {
|
|
||||||
#[new]
|
|
||||||
pub fn py_new(user_agent: &str) -> PyResult<HttpClient> {
|
|
||||||
// The twisted reactor can only be imported after Synapse has been
|
|
||||||
// imported, to allow Synapse to change the twisted reactor. If we try
|
|
||||||
// and import the reactor too early twisted installs a default reactor,
|
|
||||||
// which can't be replaced.
|
|
||||||
LazyLock::force(&TWISTED_REACTOR);
|
|
||||||
|
|
||||||
Ok(HttpClient {
|
|
||||||
client: reqwest::Client::builder()
|
|
||||||
.user_agent(user_agent)
|
|
||||||
.build()
|
|
||||||
.context("building reqwest client")?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get<'a>(
|
|
||||||
&self,
|
|
||||||
py: Python<'a>,
|
|
||||||
url: String,
|
|
||||||
response_limit: usize,
|
|
||||||
) -> PyResult<Bound<'a, PyAny>> {
|
|
||||||
self.send_request(py, self.client.get(url), response_limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn post<'a>(
|
|
||||||
&self,
|
|
||||||
py: Python<'a>,
|
|
||||||
url: String,
|
|
||||||
response_limit: usize,
|
|
||||||
headers: HashMap<String, String>,
|
|
||||||
request_body: String,
|
|
||||||
) -> PyResult<Bound<'a, PyAny>> {
|
|
||||||
let mut builder = self.client.post(url);
|
|
||||||
for (name, value) in headers {
|
|
||||||
builder = builder.header(name, value);
|
|
||||||
}
|
|
||||||
builder = builder.body(request_body);
|
|
||||||
|
|
||||||
self.send_request(py, builder, response_limit)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HttpClient {
|
|
||||||
fn send_request<'a>(
|
|
||||||
&self,
|
|
||||||
py: Python<'a>,
|
|
||||||
builder: RequestBuilder,
|
|
||||||
response_limit: usize,
|
|
||||||
) -> PyResult<Bound<'a, PyAny>> {
|
|
||||||
create_deferred(py, async move {
|
|
||||||
let response = builder.send().await.context("sending request")?;
|
|
||||||
|
|
||||||
let status = response.status();
|
|
||||||
|
|
||||||
let mut stream = response.bytes_stream();
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
while let Some(chunk) = stream.try_next().await.context("reading body")? {
|
|
||||||
if buffer.len() + chunk.len() > response_limit {
|
|
||||||
Err(anyhow::anyhow!("Response size too large"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer.extend_from_slice(&chunk);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !status.is_success() {
|
|
||||||
return Err(HttpResponseException::new(status, buffer));
|
|
||||||
}
|
|
||||||
|
|
||||||
let r = Python::with_gil(|py| buffer.into_pyobject(py).map(|o| o.unbind()))?;
|
|
||||||
|
|
||||||
Ok(r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a twisted deferred from the given future, spawning the task on the
|
|
||||||
/// tokio runtime.
|
|
||||||
///
|
|
||||||
/// Does not handle deferred cancellation or contextvars.
|
|
||||||
fn create_deferred<F, O>(py: Python, fut: F) -> PyResult<Bound<'_, PyAny>>
|
|
||||||
where
|
|
||||||
F: Future<Output = PyResult<O>> + Send + 'static,
|
|
||||||
for<'a> O: IntoPyObject<'a>,
|
|
||||||
{
|
|
||||||
let deferred = DEFERRED_CLASS.bind(py).call0()?;
|
|
||||||
let deferred_callback = deferred.getattr("callback")?.unbind();
|
|
||||||
let deferred_errback = deferred.getattr("errback")?.unbind();
|
|
||||||
|
|
||||||
RUNTIME.spawn(async move {
|
|
||||||
// TODO: Is it safe to assert unwind safety here? I think so, as we
|
|
||||||
// don't use anything that could be tainted by the panic afterwards.
|
|
||||||
// Note that `.spawn(..)` asserts unwind safety on the future too.
|
|
||||||
let res = AssertUnwindSafe(fut).catch_unwind().await;
|
|
||||||
|
|
||||||
Python::with_gil(move |py| {
|
|
||||||
// Flatten the panic into standard python error
|
|
||||||
let res = match res {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(panic_err) => {
|
|
||||||
let panic_message = get_panic_message(&panic_err);
|
|
||||||
Err(PyException::new_err(
|
|
||||||
PyString::new(py, panic_message).unbind(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Send the result to the deferred, via `.callback(..)` or `.errback(..)`
|
|
||||||
match res {
|
|
||||||
Ok(obj) => {
|
|
||||||
TWISTED_REACTOR
|
|
||||||
.call_method(py, "callFromThread", (deferred_callback, obj), None)
|
|
||||||
.expect("callFromThread should not fail"); // There's nothing we can really do with errors here
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
TWISTED_REACTOR
|
|
||||||
.call_method(py, "callFromThread", (deferred_errback, err), None)
|
|
||||||
.expect("callFromThread should not fail"); // There's nothing we can really do with errors here
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(deferred)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Try and get the panic message out of the panic
|
|
||||||
fn get_panic_message<'a>(panic_err: &'a (dyn std::any::Any + Send + 'static)) -> &'a str {
|
|
||||||
// Apparently this is how you extract the panic message from a panic
|
|
||||||
if let Some(str_slice) = panic_err.downcast_ref::<&str>() {
|
|
||||||
str_slice
|
|
||||||
} else if let Some(string) = panic_err.downcast_ref::<String>() {
|
|
||||||
string
|
|
||||||
} else {
|
|
||||||
"unknown error"
|
|
||||||
}
|
|
||||||
}
|
|
@ -27,7 +27,7 @@ pub enum IdentifierError {
|
|||||||
|
|
||||||
impl fmt::Display for IdentifierError {
|
impl fmt::Display for IdentifierError {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "{self:?}")
|
write!(f, "{:?}", self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ pub mod acl;
|
|||||||
pub mod errors;
|
pub mod errors;
|
||||||
pub mod events;
|
pub mod events;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
pub mod http_client;
|
|
||||||
pub mod identifier;
|
pub mod identifier;
|
||||||
pub mod matrix_const;
|
pub mod matrix_const;
|
||||||
pub mod push;
|
pub mod push;
|
||||||
@ -51,7 +50,6 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
|
|||||||
acl::register_module(py, m)?;
|
acl::register_module(py, m)?;
|
||||||
push::register_module(py, m)?;
|
push::register_module(py, m)?;
|
||||||
events::register_module(py, m)?;
|
events::register_module(py, m)?;
|
||||||
http_client::register_module(py, m)?;
|
|
||||||
rendezvous::register_module(py, m)?;
|
rendezvous::register_module(py, m)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,2 +0,0 @@
|
|||||||
If you want to update the meta schema, copy this folder and increase its version
|
|
||||||
number instead.
|
|
@ -1,29 +0,0 @@
|
|||||||
{
|
|
||||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
|
||||||
"$id": "https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json",
|
|
||||||
"$vocabulary": {
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/core": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/applicator": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/unevaluated": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/validation": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/meta-data": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/format-annotation": true,
|
|
||||||
"https://json-schema.org/draft/2020-12/vocab/content": true,
|
|
||||||
"https://element-hq.github.io/synapse/latest/schema/v1/vocab/documentation": false
|
|
||||||
},
|
|
||||||
"$ref": "https://json-schema.org/draft/2020-12/schema",
|
|
||||||
"properties": {
|
|
||||||
"io.element.type_name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Human-readable type of a schema that is displayed instead of the standard JSON Schema types like `object` or `integer`. In case the JSON Schema type contains `null`, this information should be presented alongside the human-readable type name.",
|
|
||||||
"examples": ["duration", "byte size"]
|
|
||||||
},
|
|
||||||
"io.element.post_description": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Additional description of a schema, better suited to be placed less prominently in the generated documentation, e.g., at the end of a section after listings of items and properties.",
|
|
||||||
"examples": [
|
|
||||||
"### Advanced uses\n\nThe spent coffee grounds can be added to compost for improving soil and growing plants."
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta http-equiv="refresh" content="0; URL=../meta.schema.json">
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>Redirecting to ../meta.schema.json…</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<p>Redirecting to <a href="../meta.schema.json">../meta.schema.json</a>…</p>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -243,7 +243,7 @@ def do_lint() -> Set[str]:
|
|||||||
importlib.import_module(module_info.name)
|
importlib.import_module(module_info.name)
|
||||||
except ModelCheckerException as e:
|
except ModelCheckerException as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Bad annotation found when importing %s", module_info.name
|
f"Bad annotation found when importing {module_info.name}"
|
||||||
)
|
)
|
||||||
failures.add(format_model_checker_exception(e))
|
failures.add(format_model_checker_exception(e))
|
||||||
|
|
||||||
|
@ -229,7 +229,6 @@ test_packages=(
|
|||||||
./tests/msc3902
|
./tests/msc3902
|
||||||
./tests/msc3967
|
./tests/msc3967
|
||||||
./tests/msc4140
|
./tests/msc4140
|
||||||
./tests/msc4155
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Enable dirty runs, so tests will reuse the same container where possible.
|
# Enable dirty runs, so tests will reuse the same container where possible.
|
||||||
|
@ -1,503 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Generate Synapse documentation from JSON Schema file."""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
from typing import Any, Optional
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
HEADER = """<!-- Document auto-generated by scripts-dev/gen_config_documentation.py -->
|
|
||||||
|
|
||||||
# Configuring Synapse
|
|
||||||
|
|
||||||
This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified
|
|
||||||
through the many configuration settings documented here — each config option is explained,
|
|
||||||
including what the default is, how to change the default and what sort of behaviour the setting governs.
|
|
||||||
Also included is an example configuration for each setting. If you don't want to spend a lot of time
|
|
||||||
thinking about options, the config as generated sets sensible defaults for all values. Do note however that the
|
|
||||||
database defaults to SQLite, which is not recommended for production usage. You can read more on this subject
|
|
||||||
[here](../../setup/installation.md#using-postgresql).
|
|
||||||
|
|
||||||
## Config Conventions
|
|
||||||
|
|
||||||
Configuration options that take a time period can be set using a number
|
|
||||||
followed by a letter. Letters have the following meanings:
|
|
||||||
|
|
||||||
* `s` = second
|
|
||||||
* `m` = minute
|
|
||||||
* `h` = hour
|
|
||||||
* `d` = day
|
|
||||||
* `w` = week
|
|
||||||
* `y` = year
|
|
||||||
|
|
||||||
For example, setting `redaction_retention_period: 5m` would remove redacted
|
|
||||||
messages from the database after 5 minutes, rather than 5 months.
|
|
||||||
|
|
||||||
In addition, configuration options referring to size use the following suffixes:
|
|
||||||
|
|
||||||
* `K` = KiB, or 1024 bytes
|
|
||||||
* `M` = MiB, or 1,048,576 bytes
|
|
||||||
* `G` = GiB, or 1,073,741,824 bytes
|
|
||||||
* `T` = TiB, or 1,099,511,627,776 bytes
|
|
||||||
|
|
||||||
For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes
|
|
||||||
for a user avatar.
|
|
||||||
|
|
||||||
## Config Validation
|
|
||||||
|
|
||||||
The configuration file can be validated with the following command:
|
|
||||||
```bash
|
|
||||||
python -m synapse.config read <config key to print> -c <path to config>
|
|
||||||
```
|
|
||||||
|
|
||||||
To validate the entire file, omit `read <config key to print>`:
|
|
||||||
```bash
|
|
||||||
python -m synapse.config -c <path to config>
|
|
||||||
```
|
|
||||||
|
|
||||||
To see how to set other options, check the help reference:
|
|
||||||
```bash
|
|
||||||
python -m synapse.config --help
|
|
||||||
```
|
|
||||||
|
|
||||||
### YAML
|
|
||||||
The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules
|
|
||||||
apply if you want your config file to be read properly. A few helpful things to know:
|
|
||||||
* `#` before any option in the config will comment out that setting and either a default (if available) will
|
|
||||||
be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and
|
|
||||||
applied, but in example #2 the setting will not be read and a default will be applied.
|
|
||||||
|
|
||||||
Example #1:
|
|
||||||
```yaml
|
|
||||||
pid_file: DATADIR/homeserver.pid
|
|
||||||
```
|
|
||||||
Example #2:
|
|
||||||
```yaml
|
|
||||||
#pid_file: DATADIR/homeserver.pid
|
|
||||||
```
|
|
||||||
* Indentation matters! The indentation before a setting
|
|
||||||
will determine whether a given setting is read as part of another
|
|
||||||
setting, or considered on its own. Thus, in example #1, the `enabled` setting
|
|
||||||
is read as a sub-option of the `presence` setting, and will be properly applied.
|
|
||||||
|
|
||||||
However, the lack of indentation before the `enabled` setting in example #2 means
|
|
||||||
that when reading the config, Synapse will consider both `presence` and `enabled` as
|
|
||||||
different settings. In this case, `presence` has no value, and thus a default applied, and `enabled`
|
|
||||||
is an option that Synapse doesn't recognize and thus ignores.
|
|
||||||
|
|
||||||
Example #1:
|
|
||||||
```yaml
|
|
||||||
presence:
|
|
||||||
enabled: false
|
|
||||||
```
|
|
||||||
Example #2:
|
|
||||||
```yaml
|
|
||||||
presence:
|
|
||||||
enabled: false
|
|
||||||
```
|
|
||||||
In this manual, all top-level settings (ones with no indentation) are identified
|
|
||||||
at the beginning of their section (i.e. "### `example_setting`") and
|
|
||||||
the sub-options, if any, are identified and listed in the body of the section.
|
|
||||||
In addition, each setting has an example of its usage, with the proper indentation
|
|
||||||
shown.
|
|
||||||
"""
|
|
||||||
SECTION_HEADERS = {
|
|
||||||
"modules": {
|
|
||||||
"title": "Modules",
|
|
||||||
"description": (
|
|
||||||
"Server admins can expand Synapse's functionality with external "
|
|
||||||
"modules.\n\n"
|
|
||||||
"See [here](../../modules/index.md) for more documentation on how "
|
|
||||||
"to configure or create custom modules for Synapse."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"server_name": {
|
|
||||||
"title": "Server",
|
|
||||||
"description": "Define your homeserver name and other base options.",
|
|
||||||
},
|
|
||||||
"admin_contact": {
|
|
||||||
"title": "Homeserver blocking",
|
|
||||||
"description": "Useful options for Synapse admins.",
|
|
||||||
},
|
|
||||||
"tls_certificate_path": {
|
|
||||||
"title": "TLS",
|
|
||||||
"description": "Options related to TLS.",
|
|
||||||
},
|
|
||||||
"federation_domain_whitelist": {
|
|
||||||
"title": "Federation",
|
|
||||||
"description": "Options related to federation.",
|
|
||||||
},
|
|
||||||
"event_cache_size": {
|
|
||||||
"title": "Caching",
|
|
||||||
"description": "Options related to caching.",
|
|
||||||
},
|
|
||||||
"database": {
|
|
||||||
"title": "Database",
|
|
||||||
"description": "Config options related to database settings.",
|
|
||||||
},
|
|
||||||
"log_config": {
|
|
||||||
"title": "Logging",
|
|
||||||
"description": ("Config options related to logging."),
|
|
||||||
},
|
|
||||||
"rc_message": {
|
|
||||||
"title": "Ratelimiting",
|
|
||||||
"description": (
|
|
||||||
"Options related to ratelimiting in Synapse.\n\n"
|
|
||||||
"Each ratelimiting configuration is made of two parameters:\n"
|
|
||||||
"- `per_second`: number of requests a client can send per second.\n"
|
|
||||||
"- `burst_count`: number of requests a client can send before "
|
|
||||||
"being throttled."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"enable_authenticated_media": {
|
|
||||||
"title": "Media Store",
|
|
||||||
"description": "Config options related to Synapse's media store.",
|
|
||||||
},
|
|
||||||
"recaptcha_public_key": {
|
|
||||||
"title": "Captcha",
|
|
||||||
"description": (
|
|
||||||
"See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"turn_uris": {
|
|
||||||
"title": "TURN",
|
|
||||||
"description": ("Options related to adding a TURN server to Synapse."),
|
|
||||||
},
|
|
||||||
"enable_registration": {
|
|
||||||
"title": "Registration",
|
|
||||||
"description": (
|
|
||||||
"Registration can be rate-limited using the parameters in the "
|
|
||||||
"[Ratelimiting](#ratelimiting) section of this manual."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"session_lifetime": {
|
|
||||||
"title": "User session management",
|
|
||||||
"description": ("Config options related to user session management."),
|
|
||||||
},
|
|
||||||
"enable_metrics": {
|
|
||||||
"title": "Metrics",
|
|
||||||
"description": ("Config options related to metrics."),
|
|
||||||
},
|
|
||||||
"room_prejoin_state": {
|
|
||||||
"title": "API Configuration",
|
|
||||||
"description": ("Config settings related to the client/server API."),
|
|
||||||
},
|
|
||||||
"signing_key_path": {
|
|
||||||
"title": "Signing Keys",
|
|
||||||
"description": ("Config options relating to signing keys."),
|
|
||||||
},
|
|
||||||
"saml2_config": {
|
|
||||||
"title": "Single sign-on integration",
|
|
||||||
"description": (
|
|
||||||
"The following settings can be used to make Synapse use a single sign-on provider for authentication, instead of its internal password database.\n\n"
|
|
||||||
"You will probably also want to set the following options to `false` to disable the regular login/registration flows:\n"
|
|
||||||
"* [`enable_registration`](#enable_registration)\n"
|
|
||||||
"* [`password_config.enabled`](#password_config)"
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"push": {
|
|
||||||
"title": "Push",
|
|
||||||
"description": ("Configuration settings related to push notifications."),
|
|
||||||
},
|
|
||||||
"encryption_enabled_by_default_for_room_type": {
|
|
||||||
"title": "Rooms",
|
|
||||||
"description": ("Config options relating to rooms."),
|
|
||||||
},
|
|
||||||
"opentracing": {
|
|
||||||
"title": "Opentracing",
|
|
||||||
"description": ("Configuration options related to Opentracing support."),
|
|
||||||
},
|
|
||||||
"worker_replication_secret": {
|
|
||||||
"title": "Coordinating workers",
|
|
||||||
"description": (
|
|
||||||
"Configuration options related to workers which belong in the main config file (usually called `homeserver.yaml`). A Synapse deployment can scale horizontally by running multiple Synapse processes called _workers_. Incoming requests are distributed between workers to handle higher loads. Some workers are privileged and can accept requests from other workers.\n\n"
|
|
||||||
"As a result, the worker configuration is divided into two parts.\n\n"
|
|
||||||
"1. The first part (in this section of the manual) defines which shardable tasks are delegated to privileged workers. This allows unprivileged workers to make requests to a privileged worker to act on their behalf.\n"
|
|
||||||
"2. [The second part](#individual-worker-configuration) controls the behaviour of individual workers in isolation.\n\n"
|
|
||||||
"For guidance on setting up workers, see the [worker documentation](../../workers.md)."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"worker_app": {
|
|
||||||
"title": "Individual worker configuration",
|
|
||||||
"description": (
|
|
||||||
"These options configure an individual worker, in its worker configuration file. They should be not be provided when configuring the main process.\n\n"
|
|
||||||
"Note also the configuration above for [coordinating a cluster of workers](#coordinating-workers).\n\n"
|
|
||||||
"For guidance on setting up workers, see the [worker documentation](../../workers.md)."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"background_updates": {
|
|
||||||
"title": "Background Updates",
|
|
||||||
"description": ("Configuration settings related to background updates."),
|
|
||||||
},
|
|
||||||
"auto_accept_invites": {
|
|
||||||
"title": "Auto Accept Invites",
|
|
||||||
"description": (
|
|
||||||
"Configuration settings related to automatically accepting invites."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
INDENT = " "
|
|
||||||
|
|
||||||
|
|
||||||
has_error = False
|
|
||||||
|
|
||||||
|
|
||||||
def error(text: str) -> None:
|
|
||||||
global has_error
|
|
||||||
print(f"ERROR: {text}", file=sys.stderr)
|
|
||||||
has_error = True
|
|
||||||
|
|
||||||
|
|
||||||
def indent(text: str, first_line: bool = True) -> str:
|
|
||||||
"""Indents each non-empty line of the given text."""
|
|
||||||
text = re.sub(r"(\n)([^\n])", r"\1" + INDENT + r"\2", text)
|
|
||||||
if first_line:
|
|
||||||
text = re.sub(r"^([^\n])", INDENT + r"\1", text)
|
|
||||||
|
|
||||||
return text
|
|
||||||
|
|
||||||
|
|
||||||
def em(s: Optional[str]) -> str:
|
|
||||||
"""Add emphasis to text."""
|
|
||||||
return f"*{s}*" if s else ""
|
|
||||||
|
|
||||||
|
|
||||||
def a(s: Optional[str], suffix: str = " ") -> str:
|
|
||||||
"""Appends a space if the given string is not empty."""
|
|
||||||
return s + suffix if s else ""
|
|
||||||
|
|
||||||
|
|
||||||
def p(s: Optional[str], prefix: str = " ") -> str:
|
|
||||||
"""Prepend a space if the given string is not empty."""
|
|
||||||
return prefix + s if s else ""
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_local_refs(schema: dict) -> dict:
|
|
||||||
"""Returns the given schema with local $ref properties replaced by their keywords.
|
|
||||||
|
|
||||||
Crude approximation that will override keywords.
|
|
||||||
"""
|
|
||||||
defs = schema["$defs"]
|
|
||||||
|
|
||||||
def replace_ref(d: Any) -> Any:
|
|
||||||
if isinstance(d, dict):
|
|
||||||
the_def = {}
|
|
||||||
if "$ref" in d:
|
|
||||||
# Found a "$ref" key.
|
|
||||||
def_name = d["$ref"].removeprefix("#/$defs/")
|
|
||||||
del d["$ref"]
|
|
||||||
the_def = defs[def_name]
|
|
||||||
|
|
||||||
new_dict = {k: replace_ref(v) for k, v in d.items()}
|
|
||||||
if common_keys := (new_dict.keys() & the_def.keys()) - {"properties"}:
|
|
||||||
print(
|
|
||||||
f"WARN: '{def_name}' overrides keys '{common_keys}'",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
|
|
||||||
new_dict_props = new_dict.get("properties", {})
|
|
||||||
the_def_props = the_def.get("properties", {})
|
|
||||||
if common_props := new_dict_props.keys() & the_def_props.keys():
|
|
||||||
print(
|
|
||||||
f"WARN: '{def_name}' overrides properties '{common_props}'",
|
|
||||||
file=sys.stderr,
|
|
||||||
)
|
|
||||||
if merged_props := {**new_dict_props, **the_def_props}:
|
|
||||||
return {**new_dict, **the_def, "properties": merged_props}
|
|
||||||
else:
|
|
||||||
return {**new_dict, **the_def}
|
|
||||||
|
|
||||||
elif isinstance(d, list):
|
|
||||||
return [replace_ref(v) for v in d]
|
|
||||||
else:
|
|
||||||
return d
|
|
||||||
|
|
||||||
return replace_ref(schema)
|
|
||||||
|
|
||||||
|
|
||||||
def sep(values: dict) -> str:
|
|
||||||
"""Separator between parts of the description."""
|
|
||||||
# If description is multiple paragraphs already, add new ones. Otherwise
|
|
||||||
# append to same paragraph.
|
|
||||||
return "\n\n" if "\n\n" in values.get("description", "") else " "
|
|
||||||
|
|
||||||
|
|
||||||
def type_str(values: dict) -> str:
|
|
||||||
"""Type of the current value."""
|
|
||||||
if t := values.get("io.element.type_name"):
|
|
||||||
# Allow custom overrides for the type name, for documentation clarity
|
|
||||||
return f"({t})"
|
|
||||||
if not (t := values.get("type")):
|
|
||||||
return ""
|
|
||||||
if not isinstance(t, list):
|
|
||||||
t = [t]
|
|
||||||
joined = "|".join(t)
|
|
||||||
return f"({joined})"
|
|
||||||
|
|
||||||
|
|
||||||
def items(values: dict) -> str:
|
|
||||||
"""A block listing properties of array items."""
|
|
||||||
if not (items := values.get("items")):
|
|
||||||
return ""
|
|
||||||
if not (item_props := items.get("properties")):
|
|
||||||
return ""
|
|
||||||
return "\nOptions for each entry include:\n\n" + "\n".join(
|
|
||||||
sub_section(k, v) for k, v in item_props.items()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def properties(values: dict) -> str:
|
|
||||||
"""A block listing object properties."""
|
|
||||||
if not (properties := values.get("properties")):
|
|
||||||
return ""
|
|
||||||
return "\nThis setting has the following sub-options:\n\n" + "\n".join(
|
|
||||||
sub_section(k, v) for k, v in properties.items()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def sub_section(prop: str, values: dict) -> str:
|
|
||||||
"""Formats a bullet point about the given sub-property."""
|
|
||||||
sep = lambda: globals()["sep"](values)
|
|
||||||
type_str = lambda: globals()["type_str"](values)
|
|
||||||
items = lambda: globals()["items"](values)
|
|
||||||
properties = lambda: globals()["properties"](values)
|
|
||||||
|
|
||||||
def default() -> str:
|
|
||||||
try:
|
|
||||||
default = values["default"]
|
|
||||||
return f"Defaults to `{json.dumps(default)}`."
|
|
||||||
except KeyError:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def description() -> str:
|
|
||||||
if not (description := values.get("description")):
|
|
||||||
error(f"missing description for {prop}")
|
|
||||||
return "MISSING DESCRIPTION\n"
|
|
||||||
|
|
||||||
return f"{description}{p(default(), sep())}\n"
|
|
||||||
|
|
||||||
return (
|
|
||||||
f"* `{prop}`{p(type_str())}: "
|
|
||||||
+ f"{indent(description(), first_line=False)}"
|
|
||||||
+ indent(items())
|
|
||||||
+ indent(properties())
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def section(prop: str, values: dict) -> str:
|
|
||||||
"""Formats a section about the given property."""
|
|
||||||
sep = lambda: globals()["sep"](values)
|
|
||||||
type_str = lambda: globals()["type_str"](values)
|
|
||||||
items = lambda: globals()["items"](values)
|
|
||||||
properties = lambda: globals()["properties"](values)
|
|
||||||
|
|
||||||
def is_simple_default() -> bool:
|
|
||||||
"""Whether the given default is simple enough for a one-liner."""
|
|
||||||
if not (d := values.get("default")):
|
|
||||||
return True
|
|
||||||
return not isinstance(d, dict) and not isinstance(d, list)
|
|
||||||
|
|
||||||
def default_str() -> str:
|
|
||||||
try:
|
|
||||||
default = values["default"]
|
|
||||||
except KeyError:
|
|
||||||
t = values.get("type", [])
|
|
||||||
if "object" == t or "object" in t:
|
|
||||||
# Skip objects as they probably have child defaults.
|
|
||||||
return ""
|
|
||||||
return "There is no default for this option."
|
|
||||||
|
|
||||||
if not is_simple_default():
|
|
||||||
# Show complex defaults as a code block instead.
|
|
||||||
return ""
|
|
||||||
return f"Defaults to `{json.dumps(default)}`."
|
|
||||||
|
|
||||||
def header() -> str:
|
|
||||||
try:
|
|
||||||
title = SECTION_HEADERS[prop]["title"]
|
|
||||||
description = SECTION_HEADERS[prop]["description"]
|
|
||||||
return f"## {title}\n\n{description}\n\n---\n"
|
|
||||||
except KeyError:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def title() -> str:
|
|
||||||
return f"### `{prop}`\n"
|
|
||||||
|
|
||||||
def description() -> str:
|
|
||||||
if not (description := values.get("description")):
|
|
||||||
error(f"missing description for {prop}")
|
|
||||||
return "MISSING DESCRIPTION\n"
|
|
||||||
return f"\n{a(em(type_str()))}{description}{p(default_str(), sep())}\n"
|
|
||||||
|
|
||||||
def example_str(example: Any) -> str:
|
|
||||||
return "```yaml\n" + f"{yaml.dump({prop: example}, sort_keys=False)}" + "```\n"
|
|
||||||
|
|
||||||
def default_example() -> str:
|
|
||||||
if is_simple_default():
|
|
||||||
return ""
|
|
||||||
default_cfg = example_str(values["default"])
|
|
||||||
return f"\nDefault configuration:\n{default_cfg}"
|
|
||||||
|
|
||||||
def examples() -> str:
|
|
||||||
if not (examples := values.get("examples")):
|
|
||||||
return ""
|
|
||||||
|
|
||||||
examples_str = "\n".join(example_str(e) for e in examples)
|
|
||||||
|
|
||||||
if len(examples) >= 2:
|
|
||||||
return f"\nExample configurations:\n{examples_str}"
|
|
||||||
else:
|
|
||||||
return f"\nExample configuration:\n{examples_str}"
|
|
||||||
|
|
||||||
def post_description() -> str:
|
|
||||||
# Sometimes it's helpful to have a description after the list of fields,
|
|
||||||
# e.g. with a subsection that consists only of text.
|
|
||||||
# This helps with that.
|
|
||||||
if not (description := values.get("io.element.post_description")):
|
|
||||||
return ""
|
|
||||||
return f"\n{description}\n\n"
|
|
||||||
|
|
||||||
return (
|
|
||||||
"---\n"
|
|
||||||
+ header()
|
|
||||||
+ title()
|
|
||||||
+ description()
|
|
||||||
+ items()
|
|
||||||
+ properties()
|
|
||||||
+ default_example()
|
|
||||||
+ examples()
|
|
||||||
+ post_description()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
def usage(err_msg: str) -> int:
|
|
||||||
script_name = (sys.argv[:1] or ["__main__.py"])[0]
|
|
||||||
print(err_msg, file=sys.stderr)
|
|
||||||
print(f"Usage: {script_name} <JSON Schema file>", file=sys.stderr)
|
|
||||||
print(f"\n{__doc__}", file=sys.stderr)
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
def read_json_file_arg() -> Any:
|
|
||||||
if len(sys.argv) > 2:
|
|
||||||
exit(usage("Too many arguments."))
|
|
||||||
if not (filepath := (sys.argv[1:] or [""])[0]):
|
|
||||||
exit(usage("No schema file provided."))
|
|
||||||
with open(filepath) as f:
|
|
||||||
return yaml.safe_load(f)
|
|
||||||
|
|
||||||
schema = read_json_file_arg()
|
|
||||||
schema = resolve_local_refs(schema)
|
|
||||||
|
|
||||||
sections = (section(k, v) for k, v in schema["properties"].items())
|
|
||||||
print(HEADER + "".join(sections), end="")
|
|
||||||
|
|
||||||
if has_error:
|
|
||||||
print("There were errors.", file=sys.stderr)
|
|
||||||
exit(2)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -139,6 +139,3 @@ cargo-fmt
|
|||||||
|
|
||||||
# Ensure type hints are correct.
|
# Ensure type hints are correct.
|
||||||
mypy
|
mypy
|
||||||
|
|
||||||
# Generate configuration documentation from the JSON Schema
|
|
||||||
./scripts-dev/gen_config_documentation.py schema/synapse-config.schema.yaml > docs/usage/configuration/config_documentation.md
|
|
||||||
|
@ -254,12 +254,6 @@ def _prepare() -> None:
|
|||||||
# Update the version specified in pyproject.toml.
|
# Update the version specified in pyproject.toml.
|
||||||
subprocess.check_output(["poetry", "version", new_version])
|
subprocess.check_output(["poetry", "version", new_version])
|
||||||
|
|
||||||
# Update config schema $id.
|
|
||||||
schema_file = "schema/synapse-config.schema.yaml"
|
|
||||||
major_minor_version = ".".join(new_version.split(".")[:2])
|
|
||||||
url = f"https://element-hq.github.io/synapse/schema/synapse/v{major_minor_version}/synapse-config.schema.json"
|
|
||||||
subprocess.check_output(["sed", "-i", f"0,/^\\$id: .*/s||$id: {url}|", schema_file])
|
|
||||||
|
|
||||||
# Generate changelogs.
|
# Generate changelogs.
|
||||||
generate_and_write_changelog(synapse_repo, current_version, new_version)
|
generate_and_write_changelog(synapse_repo, current_version, new_version)
|
||||||
|
|
||||||
|
@ -37,9 +37,7 @@ from synapse.appservice import ApplicationService
|
|||||||
from synapse.http import get_request_user_agent
|
from synapse.http import get_request_user_agent
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.logging.opentracing import trace
|
from synapse.logging.opentracing import trace
|
||||||
from synapse.state import CREATE_KEY, POWER_KEY
|
|
||||||
from synapse.types import Requester, create_requester
|
from synapse.types import Requester, create_requester
|
||||||
from synapse.types.state import StateFilter
|
|
||||||
from synapse.util.cancellation import cancellable
|
from synapse.util.cancellation import cancellable
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
@ -218,20 +216,18 @@ class BaseAuth:
|
|||||||
# by checking if they would (theoretically) be able to change the
|
# by checking if they would (theoretically) be able to change the
|
||||||
# m.room.canonical_alias events
|
# m.room.canonical_alias events
|
||||||
|
|
||||||
auth_events = await self._storage_controllers.state.get_current_state(
|
power_level_event = (
|
||||||
room_id,
|
await self._storage_controllers.state.get_current_state_event(
|
||||||
StateFilter.from_types(
|
room_id, EventTypes.PowerLevels, ""
|
||||||
[
|
)
|
||||||
POWER_KEY,
|
|
||||||
CREATE_KEY,
|
|
||||||
]
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
auth_events = {}
|
||||||
|
if power_level_event:
|
||||||
|
auth_events[(EventTypes.PowerLevels, "")] = power_level_event
|
||||||
|
|
||||||
send_level = event_auth.get_send_level(
|
send_level = event_auth.get_send_level(
|
||||||
EventTypes.CanonicalAlias,
|
EventTypes.CanonicalAlias, "", power_level_event
|
||||||
"",
|
|
||||||
auth_events.get(POWER_KEY),
|
|
||||||
)
|
)
|
||||||
user_level = event_auth.get_user_power_level(
|
user_level = event_auth.get_user_power_level(
|
||||||
requester.user.to_string(), auth_events
|
requester.user.to_string(), auth_events
|
||||||
|
@ -30,6 +30,9 @@ from authlib.oauth2.rfc7662 import IntrospectionToken
|
|||||||
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
|
||||||
from prometheus_client import Histogram
|
from prometheus_client import Histogram
|
||||||
|
|
||||||
|
from twisted.web.client import readBody
|
||||||
|
from twisted.web.http_headers import Headers
|
||||||
|
|
||||||
from synapse.api.auth.base import BaseAuth
|
from synapse.api.auth.base import BaseAuth
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
@ -40,14 +43,8 @@ from synapse.api.errors import (
|
|||||||
UnrecognizedRequestError,
|
UnrecognizedRequestError,
|
||||||
)
|
)
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.logging.context import PreserveLoggingContext
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
from synapse.logging.opentracing import (
|
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||||
active_span,
|
|
||||||
force_tracing,
|
|
||||||
inject_request_headers,
|
|
||||||
start_active_span,
|
|
||||||
)
|
|
||||||
from synapse.synapse_rust.http_client import HttpClient
|
|
||||||
from synapse.types import Requester, UserID, create_requester
|
from synapse.types import Requester, UserID, create_requester
|
||||||
from synapse.util import json_decoder
|
from synapse.util import json_decoder
|
||||||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||||
@ -182,10 +179,6 @@ class MSC3861DelegatedAuth(BaseAuth):
|
|||||||
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
|
self._admin_token: Callable[[], Optional[str]] = self._config.admin_token
|
||||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||||
|
|
||||||
self._rust_http_client = HttpClient(
|
|
||||||
user_agent=self._http_client.user_agent.decode("utf8")
|
|
||||||
)
|
|
||||||
|
|
||||||
# # Token Introspection Cache
|
# # Token Introspection Cache
|
||||||
# This remembers what users/devices are represented by which access tokens,
|
# This remembers what users/devices are represented by which access tokens,
|
||||||
# in order to reduce overall system load:
|
# in order to reduce overall system load:
|
||||||
@ -308,6 +301,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
|||||||
introspection_endpoint = await self._introspection_endpoint()
|
introspection_endpoint = await self._introspection_endpoint()
|
||||||
raw_headers: Dict[str, str] = {
|
raw_headers: Dict[str, str] = {
|
||||||
"Content-Type": "application/x-www-form-urlencoded",
|
"Content-Type": "application/x-www-form-urlencoded",
|
||||||
|
"User-Agent": str(self._http_client.user_agent, "utf-8"),
|
||||||
"Accept": "application/json",
|
"Accept": "application/json",
|
||||||
# Tell MAS that we support reading the device ID as an explicit
|
# Tell MAS that we support reading the device ID as an explicit
|
||||||
# value, not encoded in the scope. This is supported by MAS 0.15+
|
# value, not encoded in the scope. This is supported by MAS 0.15+
|
||||||
@ -321,34 +315,38 @@ class MSC3861DelegatedAuth(BaseAuth):
|
|||||||
uri, raw_headers, body = self._client_auth.prepare(
|
uri, raw_headers, body = self._client_auth.prepare(
|
||||||
method="POST", uri=introspection_endpoint, headers=raw_headers, body=body
|
method="POST", uri=introspection_endpoint, headers=raw_headers, body=body
|
||||||
)
|
)
|
||||||
|
headers = Headers({k: [v] for (k, v) in raw_headers.items()})
|
||||||
|
|
||||||
# Do the actual request
|
# Do the actual request
|
||||||
|
# We're not using the SimpleHttpClient util methods as we don't want to
|
||||||
|
# check the HTTP status code, and we do the body encoding ourselves.
|
||||||
|
|
||||||
logger.debug("Fetching token from MAS")
|
|
||||||
start_time = self._clock.time()
|
start_time = self._clock.time()
|
||||||
try:
|
try:
|
||||||
with start_active_span("mas-introspect-token"):
|
response = await self._http_client.request(
|
||||||
inject_request_headers(raw_headers)
|
method="POST",
|
||||||
with PreserveLoggingContext():
|
uri=uri,
|
||||||
resp_body = await self._rust_http_client.post(
|
data=body.encode("utf-8"),
|
||||||
url=uri,
|
headers=headers,
|
||||||
response_limit=1 * 1024 * 1024,
|
|
||||||
headers=raw_headers,
|
|
||||||
request_body=body,
|
|
||||||
)
|
)
|
||||||
except HttpResponseException as e:
|
|
||||||
end_time = self._clock.time()
|
resp_body = await make_deferred_yieldable(readBody(response))
|
||||||
introspection_response_timer.labels(e.code).observe(end_time - start_time)
|
|
||||||
raise
|
|
||||||
except Exception:
|
except Exception:
|
||||||
end_time = self._clock.time()
|
end_time = self._clock.time()
|
||||||
introspection_response_timer.labels("ERR").observe(end_time - start_time)
|
introspection_response_timer.labels("ERR").observe(end_time - start_time)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
logger.debug("Fetched token from MAS")
|
|
||||||
|
|
||||||
end_time = self._clock.time()
|
end_time = self._clock.time()
|
||||||
introspection_response_timer.labels(200).observe(end_time - start_time)
|
introspection_response_timer.labels(response.code).observe(
|
||||||
|
end_time - start_time
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.code < 200 or response.code >= 300:
|
||||||
|
raise HttpResponseException(
|
||||||
|
response.code,
|
||||||
|
response.phrase.decode("ascii", errors="replace"),
|
||||||
|
resp_body,
|
||||||
|
)
|
||||||
|
|
||||||
resp = json_decoder.decode(resp_body.decode("utf-8"))
|
resp = json_decoder.decode(resp_body.decode("utf-8"))
|
||||||
|
|
||||||
@ -477,7 +475,7 @@ class MSC3861DelegatedAuth(BaseAuth):
|
|||||||
# XXX: This is a temporary solution so that the admin API can be called by
|
# XXX: This is a temporary solution so that the admin API can be called by
|
||||||
# the OIDC provider. This will be removed once we have OIDC client
|
# the OIDC provider. This will be removed once we have OIDC client
|
||||||
# credentials grant support in matrix-authentication-service.
|
# credentials grant support in matrix-authentication-service.
|
||||||
logger.info("Admin toked used")
|
logging.info("Admin toked used")
|
||||||
# XXX: that user doesn't exist and won't be provisioned.
|
# XXX: that user doesn't exist and won't be provisioned.
|
||||||
# This is mostly fine for admin calls, but we should also think about doing
|
# This is mostly fine for admin calls, but we should also think about doing
|
||||||
# requesters without a user_id.
|
# requesters without a user_id.
|
||||||
|
@ -185,18 +185,12 @@ ServerNoticeLimitReached: Final = "m.server_notice.usage_limit_reached"
|
|||||||
|
|
||||||
class UserTypes:
|
class UserTypes:
|
||||||
"""Allows for user type specific behaviour. With the benefit of hindsight
|
"""Allows for user type specific behaviour. With the benefit of hindsight
|
||||||
'admin' and 'guest' users should also be UserTypes. Extra user types can be
|
'admin' and 'guest' users should also be UserTypes. Normal users are type None
|
||||||
added in the configuration. Normal users are type None or one of the extra
|
|
||||||
user types (if configured).
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SUPPORT: Final = "support"
|
SUPPORT: Final = "support"
|
||||||
BOT: Final = "bot"
|
BOT: Final = "bot"
|
||||||
ALL_BUILTIN_USER_TYPES: Final = (SUPPORT, BOT)
|
ALL_USER_TYPES: Final = (SUPPORT, BOT)
|
||||||
"""
|
|
||||||
The user types that are built-in to Synapse. Extra user types can be
|
|
||||||
added in the configuration.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class RelationTypes:
|
class RelationTypes:
|
||||||
@ -286,10 +280,6 @@ class AccountDataTypes:
|
|||||||
IGNORED_USER_LIST: Final = "m.ignored_user_list"
|
IGNORED_USER_LIST: Final = "m.ignored_user_list"
|
||||||
TAG: Final = "m.tag"
|
TAG: Final = "m.tag"
|
||||||
PUSH_RULES: Final = "m.push_rules"
|
PUSH_RULES: Final = "m.push_rules"
|
||||||
# MSC4155: Invite filtering
|
|
||||||
MSC4155_INVITE_PERMISSION_CONFIG: Final = (
|
|
||||||
"org.matrix.msc4155.invite_permission_config"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HistoryVisibility:
|
class HistoryVisibility:
|
||||||
|
@ -137,9 +137,6 @@ class Codes(str, Enum):
|
|||||||
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
|
PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE"
|
||||||
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
|
KEY_TOO_LARGE = "M_KEY_TOO_LARGE"
|
||||||
|
|
||||||
# Part of MSC4155
|
|
||||||
INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED"
|
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
"""An exception with integer code, a message string attributes and optional headers.
|
"""An exception with integer code, a message string attributes and optional headers.
|
||||||
@ -527,11 +524,7 @@ class InvalidCaptchaError(SynapseError):
|
|||||||
|
|
||||||
|
|
||||||
class LimitExceededError(SynapseError):
|
class LimitExceededError(SynapseError):
|
||||||
"""A client has sent too many requests and is being throttled.
|
"""A client has sent too many requests and is being throttled."""
|
||||||
|
|
||||||
Args:
|
|
||||||
pause: Optional time in seconds to pause before responding to the client.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -539,7 +532,6 @@ class LimitExceededError(SynapseError):
|
|||||||
code: int = 429,
|
code: int = 429,
|
||||||
retry_after_ms: Optional[int] = None,
|
retry_after_ms: Optional[int] = None,
|
||||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||||
pause: Optional[float] = None,
|
|
||||||
):
|
):
|
||||||
# Use HTTP header Retry-After to enable library-assisted retry handling.
|
# Use HTTP header Retry-After to enable library-assisted retry handling.
|
||||||
headers = (
|
headers = (
|
||||||
@ -550,7 +542,6 @@ class LimitExceededError(SynapseError):
|
|||||||
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
||||||
self.retry_after_ms = retry_after_ms
|
self.retry_after_ms = retry_after_ms
|
||||||
self.limiter_name = limiter_name
|
self.limiter_name = limiter_name
|
||||||
self.pause = pause
|
|
||||||
|
|
||||||
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
def error_dict(self, config: Optional["HomeServerConfig"]) -> "JsonDict":
|
||||||
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
from typing import TYPE_CHECKING, Dict, Hashable, Optional, Tuple
|
from typing import Dict, Hashable, Optional, Tuple
|
||||||
|
|
||||||
from synapse.api.errors import LimitExceededError
|
from synapse.api.errors import LimitExceededError
|
||||||
from synapse.config.ratelimiting import RatelimitSettings
|
from synapse.config.ratelimiting import RatelimitSettings
|
||||||
@ -28,12 +28,6 @@ from synapse.storage.databases.main import DataStore
|
|||||||
from synapse.types import Requester
|
from synapse.types import Requester
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
# To avoid circular imports:
|
|
||||||
from synapse.module_api.callbacks.ratelimit_callbacks import (
|
|
||||||
RatelimitModuleApiCallbacks,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Ratelimiter:
|
class Ratelimiter:
|
||||||
"""
|
"""
|
||||||
@ -78,14 +72,12 @@ class Ratelimiter:
|
|||||||
store: DataStore,
|
store: DataStore,
|
||||||
clock: Clock,
|
clock: Clock,
|
||||||
cfg: RatelimitSettings,
|
cfg: RatelimitSettings,
|
||||||
ratelimit_callbacks: Optional["RatelimitModuleApiCallbacks"] = None,
|
|
||||||
):
|
):
|
||||||
self.clock = clock
|
self.clock = clock
|
||||||
self.rate_hz = cfg.per_second
|
self.rate_hz = cfg.per_second
|
||||||
self.burst_count = cfg.burst_count
|
self.burst_count = cfg.burst_count
|
||||||
self.store = store
|
self.store = store
|
||||||
self._limiter_name = cfg.key
|
self._limiter_name = cfg.key
|
||||||
self._ratelimit_callbacks = ratelimit_callbacks
|
|
||||||
|
|
||||||
# A dictionary representing the token buckets tracked by this rate
|
# A dictionary representing the token buckets tracked by this rate
|
||||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||||
@ -173,20 +165,6 @@ class Ratelimiter:
|
|||||||
if override and not override.messages_per_second:
|
if override and not override.messages_per_second:
|
||||||
return True, -1.0
|
return True, -1.0
|
||||||
|
|
||||||
if requester and self._ratelimit_callbacks:
|
|
||||||
# Check if the user has a custom rate limit for this specific limiter
|
|
||||||
# as returned by the module API.
|
|
||||||
module_override = (
|
|
||||||
await self._ratelimit_callbacks.get_ratelimit_override_for_user(
|
|
||||||
requester.user.to_string(),
|
|
||||||
self._limiter_name,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if module_override:
|
|
||||||
rate_hz = module_override.per_second
|
|
||||||
burst_count = module_override.burst_count
|
|
||||||
|
|
||||||
# Override default values if set
|
# Override default values if set
|
||||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||||
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
rate_hz = rate_hz if rate_hz is not None else self.rate_hz
|
||||||
@ -338,10 +316,12 @@ class Ratelimiter:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not allowed:
|
if not allowed:
|
||||||
|
if pause:
|
||||||
|
await self.clock.sleep(pause)
|
||||||
|
|
||||||
raise LimitExceededError(
|
raise LimitExceededError(
|
||||||
limiter_name=self._limiter_name,
|
limiter_name=self._limiter_name,
|
||||||
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
|
retry_after_ms=int(1000 * (time_allowed - time_now_s)),
|
||||||
pause=pause,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -445,8 +445,8 @@ def listen_http(
|
|||||||
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
|
# getHost() returns a UNIXAddress which contains an instance variable of 'name'
|
||||||
# encoded as a byte string. Decode as utf-8 so pretty.
|
# encoded as a byte string. Decode as utf-8 so pretty.
|
||||||
logger.info(
|
logger.info(
|
||||||
"Synapse now listening on Unix Socket at: %s",
|
"Synapse now listening on Unix Socket at: "
|
||||||
ports[0].getHost().name.decode("utf-8"),
|
f"{ports[0].getHost().name.decode('utf-8')}"
|
||||||
)
|
)
|
||||||
|
|
||||||
return ports
|
return ports
|
||||||
|
@ -28,13 +28,15 @@ from prometheus_client import Gauge
|
|||||||
|
|
||||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||||
from synapse.types import JsonDict
|
from synapse.types import JsonDict
|
||||||
from synapse.util.constants import ONE_HOUR_SECONDS, ONE_MINUTE_SECONDS
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger("synapse.app.homeserver")
|
logger = logging.getLogger("synapse.app.homeserver")
|
||||||
|
|
||||||
|
ONE_MINUTE_SECONDS = 60
|
||||||
|
ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS
|
||||||
|
|
||||||
MILLISECONDS_PER_SECOND = 1000
|
MILLISECONDS_PER_SECOND = 1000
|
||||||
|
|
||||||
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
|
INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS
|
||||||
@ -171,7 +173,7 @@ async def phone_stats_home(
|
|||||||
stats["log_level"] = logging.getLevelName(log_level)
|
stats["log_level"] = logging.getLevelName(log_level)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Reporting stats to %s: %s", hs.config.metrics.report_stats_endpoint, stats
|
"Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
await hs.get_proxied_http_client().put_json(
|
await hs.get_proxied_http_client().put_json(
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||||
#
|
#
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
# Copyright (C) 2023, 2025 New Vector, Ltd
|
# Copyright (C) 2023 New Vector, Ltd
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU Affero General Public License as
|
# it under the terms of the GNU Affero General Public License as
|
||||||
@ -70,8 +70,6 @@ from typing import (
|
|||||||
Tuple,
|
Tuple,
|
||||||
)
|
)
|
||||||
|
|
||||||
from twisted.internet.interfaces import IDelayedCall
|
|
||||||
|
|
||||||
from synapse.appservice import (
|
from synapse.appservice import (
|
||||||
ApplicationService,
|
ApplicationService,
|
||||||
ApplicationServiceState,
|
ApplicationServiceState,
|
||||||
@ -452,20 +450,6 @@ class _TransactionController:
|
|||||||
recoverer.recover()
|
recoverer.recover()
|
||||||
logger.info("Now %i active recoverers", len(self.recoverers))
|
logger.info("Now %i active recoverers", len(self.recoverers))
|
||||||
|
|
||||||
def force_retry(self, service: ApplicationService) -> None:
|
|
||||||
"""Forces a Recoverer to attempt delivery of transations immediately.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
service:
|
|
||||||
"""
|
|
||||||
recoverer = self.recoverers.get(service.id)
|
|
||||||
if not recoverer:
|
|
||||||
# No need to force a retry on a happy AS.
|
|
||||||
logger.info("%s is not in recovery, not forcing retry", service.id)
|
|
||||||
return
|
|
||||||
|
|
||||||
recoverer.force_retry()
|
|
||||||
|
|
||||||
async def _is_service_up(self, service: ApplicationService) -> bool:
|
async def _is_service_up(self, service: ApplicationService) -> bool:
|
||||||
state = await self.store.get_appservice_state(service)
|
state = await self.store.get_appservice_state(service)
|
||||||
return state == ApplicationServiceState.UP or state is None
|
return state == ApplicationServiceState.UP or state is None
|
||||||
@ -498,12 +482,11 @@ class _Recoverer:
|
|||||||
self.service = service
|
self.service = service
|
||||||
self.callback = callback
|
self.callback = callback
|
||||||
self.backoff_counter = 1
|
self.backoff_counter = 1
|
||||||
self.scheduled_recovery: Optional[IDelayedCall] = None
|
|
||||||
|
|
||||||
def recover(self) -> None:
|
def recover(self) -> None:
|
||||||
delay = 2**self.backoff_counter
|
delay = 2**self.backoff_counter
|
||||||
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
||||||
self.scheduled_recovery = self.clock.call_later(
|
self.clock.call_later(
|
||||||
delay, run_as_background_process, "as-recoverer", self.retry
|
delay, run_as_background_process, "as-recoverer", self.retry
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -513,21 +496,6 @@ class _Recoverer:
|
|||||||
self.backoff_counter += 1
|
self.backoff_counter += 1
|
||||||
self.recover()
|
self.recover()
|
||||||
|
|
||||||
def force_retry(self) -> None:
|
|
||||||
"""Cancels the existing timer and forces an immediate retry in the background.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
service:
|
|
||||||
"""
|
|
||||||
# Prevent the existing backoff from occuring
|
|
||||||
if self.scheduled_recovery:
|
|
||||||
self.clock.cancel_call_later(self.scheduled_recovery)
|
|
||||||
# Run a retry, which will resechedule a recovery if it fails.
|
|
||||||
run_as_background_process(
|
|
||||||
"retry",
|
|
||||||
self.retry,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def retry(self) -> None:
|
async def retry(self) -> None:
|
||||||
logger.info("Starting retries on %s", self.service.id)
|
logger.info("Starting retries on %s", self.service.id)
|
||||||
try:
|
try:
|
||||||
|
@ -59,7 +59,6 @@ from synapse.config import ( # noqa: F401
|
|||||||
tls,
|
tls,
|
||||||
tracer,
|
tracer,
|
||||||
user_directory,
|
user_directory,
|
||||||
user_types,
|
|
||||||
voip,
|
voip,
|
||||||
workers,
|
workers,
|
||||||
)
|
)
|
||||||
@ -123,7 +122,6 @@ class RootConfig:
|
|||||||
retention: retention.RetentionConfig
|
retention: retention.RetentionConfig
|
||||||
background_updates: background_updates.BackgroundUpdateConfig
|
background_updates: background_updates.BackgroundUpdateConfig
|
||||||
auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
|
auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig
|
||||||
user_types: user_types.UserTypesConfig
|
|
||||||
|
|
||||||
config_classes: List[Type["Config"]] = ...
|
config_classes: List[Type["Config"]] = ...
|
||||||
config_files: List[str]
|
config_files: List[str]
|
||||||
|
@ -561,17 +561,8 @@ class ExperimentalConfig(Config):
|
|||||||
# MSC4076: Add `disable_badge_count`` to pusher configuration
|
# MSC4076: Add `disable_badge_count`` to pusher configuration
|
||||||
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
|
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
|
||||||
|
|
||||||
# MSC4235: Add `via` param to hierarchy endpoint
|
|
||||||
self.msc4235_enabled: bool = experimental.get("msc4235_enabled", False)
|
|
||||||
|
|
||||||
# MSC4263: Preventing MXID enumeration via key queries
|
# MSC4263: Preventing MXID enumeration via key queries
|
||||||
self.msc4263_limit_key_queries_to_users_who_share_rooms = experimental.get(
|
self.msc4263_limit_key_queries_to_users_who_share_rooms = experimental.get(
|
||||||
"msc4263_limit_key_queries_to_users_who_share_rooms",
|
"msc4263_limit_key_queries_to_users_who_share_rooms",
|
||||||
False,
|
False,
|
||||||
)
|
)
|
||||||
|
|
||||||
# MSC4267: Automatically forgetting rooms on leave
|
|
||||||
self.msc4267_enabled: bool = experimental.get("msc4267_enabled", False)
|
|
||||||
|
|
||||||
# MSC4155: Invite filtering
|
|
||||||
self.msc4155_enabled: bool = experimental.get("msc4155_enabled", False)
|
|
||||||
|
@ -94,21 +94,5 @@ class FederationConfig(Config):
|
|||||||
2**62,
|
2**62,
|
||||||
)
|
)
|
||||||
|
|
||||||
def is_domain_allowed_according_to_federation_whitelist(self, domain: str) -> bool:
|
|
||||||
"""
|
|
||||||
Returns whether a domain is allowed according to the federation whitelist. If a
|
|
||||||
federation whitelist is not set, all domains are allowed.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
domain: The domain to test.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the domain is allowed or if a whitelist is not set, False otherwise.
|
|
||||||
"""
|
|
||||||
if self.federation_domain_whitelist is None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return domain in self.federation_domain_whitelist
|
|
||||||
|
|
||||||
|
|
||||||
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
||||||
|
@ -59,7 +59,6 @@ from .third_party_event_rules import ThirdPartyRulesConfig
|
|||||||
from .tls import TlsConfig
|
from .tls import TlsConfig
|
||||||
from .tracer import TracerConfig
|
from .tracer import TracerConfig
|
||||||
from .user_directory import UserDirectoryConfig
|
from .user_directory import UserDirectoryConfig
|
||||||
from .user_types import UserTypesConfig
|
|
||||||
from .voip import VoipConfig
|
from .voip import VoipConfig
|
||||||
from .workers import WorkerConfig
|
from .workers import WorkerConfig
|
||||||
|
|
||||||
@ -108,5 +107,4 @@ class HomeServerConfig(RootConfig):
|
|||||||
ExperimentalConfig,
|
ExperimentalConfig,
|
||||||
BackgroundUpdateConfig,
|
BackgroundUpdateConfig,
|
||||||
AutoAcceptInvitesConfig,
|
AutoAcceptInvitesConfig,
|
||||||
UserTypesConfig,
|
|
||||||
]
|
]
|
||||||
|
@ -51,8 +51,6 @@ if TYPE_CHECKING:
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
DEFAULT_LOG_CONFIG = Template(
|
DEFAULT_LOG_CONFIG = Template(
|
||||||
"""\
|
"""\
|
||||||
# Log configuration for Synapse.
|
# Log configuration for Synapse.
|
||||||
@ -293,7 +291,7 @@ def _load_logging_config(log_config_path: str) -> None:
|
|||||||
log_config = yaml.safe_load(f.read())
|
log_config = yaml.safe_load(f.read())
|
||||||
|
|
||||||
if not log_config:
|
if not log_config:
|
||||||
logger.warning("Loaded a blank logging config?")
|
logging.warning("Loaded a blank logging config?")
|
||||||
|
|
||||||
# If the old structured logging configuration is being used, raise an error.
|
# If the old structured logging configuration is being used, raise an error.
|
||||||
if "structured" in log_config and log_config.get("structured"):
|
if "structured" in log_config and log_config.get("structured"):
|
||||||
@ -314,7 +312,7 @@ def _reload_logging_config(log_config_path: Optional[str]) -> None:
|
|||||||
return
|
return
|
||||||
|
|
||||||
_load_logging_config(log_config_path)
|
_load_logging_config(log_config_path)
|
||||||
logger.info("Reloaded log config from %s due to SIGHUP", log_config_path)
|
logging.info("Reloaded log config from %s due to SIGHUP", log_config_path)
|
||||||
|
|
||||||
|
|
||||||
def setup_logging(
|
def setup_logging(
|
||||||
@ -351,17 +349,17 @@ def setup_logging(
|
|||||||
appbase.register_sighup(_reload_logging_config, log_config_path)
|
appbase.register_sighup(_reload_logging_config, log_config_path)
|
||||||
|
|
||||||
# Log immediately so we can grep backwards.
|
# Log immediately so we can grep backwards.
|
||||||
logger.warning("***** STARTING SERVER *****")
|
logging.warning("***** STARTING SERVER *****")
|
||||||
logger.warning(
|
logging.warning(
|
||||||
"Server %s version %s",
|
"Server %s version %s",
|
||||||
sys.argv[0],
|
sys.argv[0],
|
||||||
SYNAPSE_VERSION,
|
SYNAPSE_VERSION,
|
||||||
)
|
)
|
||||||
logger.warning("Copyright (c) 2023 New Vector, Inc")
|
logging.warning("Copyright (c) 2023 New Vector, Inc")
|
||||||
logger.warning(
|
logging.warning(
|
||||||
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
|
"Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
|
||||||
)
|
)
|
||||||
logger.info("Server hostname: %s", config.server.server_name)
|
logging.info("Server hostname: %s", config.server.server_name)
|
||||||
logger.info("Public Base URL: %s", config.server.public_baseurl)
|
logging.info("Public Base URL: %s", config.server.public_baseurl)
|
||||||
logger.info("Instance name: %s", hs.get_instance_name())
|
logging.info("Instance name: %s", hs.get_instance_name())
|
||||||
logger.info("Twisted reactor: %s", type(reactor).__name__)
|
logging.info("Twisted reactor: %s", type(reactor).__name__)
|
||||||
|
@ -240,9 +240,3 @@ class RatelimitConfig(Config):
|
|||||||
"rc_delayed_event_mgmt",
|
"rc_delayed_event_mgmt",
|
||||||
defaults={"per_second": 1, "burst_count": 5},
|
defaults={"per_second": 1, "burst_count": 5},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.rc_reports = RatelimitSettings.parse(
|
|
||||||
config,
|
|
||||||
"rc_reports",
|
|
||||||
defaults={"per_second": 1, "burst_count": 5},
|
|
||||||
)
|
|
||||||
|
@ -27,7 +27,7 @@ from synapse.types import JsonDict
|
|||||||
|
|
||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.Logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class RoomDefaultEncryptionTypes:
|
class RoomDefaultEncryptionTypes:
|
||||||
@ -85,4 +85,4 @@ class RoomConfig(Config):
|
|||||||
|
|
||||||
# When enabled, users will forget rooms when they leave them, either via a
|
# When enabled, users will forget rooms when they leave them, either via a
|
||||||
# leave, kick or ban.
|
# leave, kick or ban.
|
||||||
self.forget_on_leave: bool = config.get("forget_rooms_on_leave", False)
|
self.forget_on_leave = config.get("forget_rooms_on_leave", False)
|
||||||
|
@ -41,7 +41,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
|
|||||||
from ._base import Config, ConfigError
|
from ._base import Config, ConfigError
|
||||||
from ._util import validate_config
|
from ._util import validate_config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.Logger(__name__)
|
||||||
|
|
||||||
DIRECT_TCP_ERROR = """
|
DIRECT_TCP_ERROR = """
|
||||||
Using direct TCP replication for workers is no longer supported.
|
Using direct TCP replication for workers is no longer supported.
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
#
|
|
||||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2025 New Vector, Ltd
|
|
||||||
#
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as
|
|
||||||
# published by the Free Software Foundation, either version 3 of the
|
|
||||||
# License, or (at your option) any later version.
|
|
||||||
#
|
|
||||||
# See the GNU Affero General Public License for more details:
|
|
||||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
||||||
#
|
|
||||||
|
|
||||||
from typing import Any, List, Optional
|
|
||||||
|
|
||||||
from synapse.api.constants import UserTypes
|
|
||||||
from synapse.types import JsonDict
|
|
||||||
|
|
||||||
from ._base import Config, ConfigError
|
|
||||||
|
|
||||||
|
|
||||||
class UserTypesConfig(Config):
|
|
||||||
section = "user_types"
|
|
||||||
|
|
||||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
|
||||||
user_types: JsonDict = config.get("user_types", {})
|
|
||||||
|
|
||||||
self.default_user_type: Optional[str] = user_types.get(
|
|
||||||
"default_user_type", None
|
|
||||||
)
|
|
||||||
self.extra_user_types: List[str] = user_types.get("extra_user_types", [])
|
|
||||||
|
|
||||||
all_user_types: List[str] = []
|
|
||||||
all_user_types.extend(UserTypes.ALL_BUILTIN_USER_TYPES)
|
|
||||||
all_user_types.extend(self.extra_user_types)
|
|
||||||
|
|
||||||
self.all_user_types = all_user_types
|
|
||||||
|
|
||||||
if self.default_user_type is not None:
|
|
||||||
if self.default_user_type not in all_user_types:
|
|
||||||
raise ConfigError(
|
|
||||||
f"Default user type {self.default_user_type} is not in the list of all user types: {all_user_types}"
|
|
||||||
)
|
|
@ -64,7 +64,6 @@ from synapse.api.room_versions import (
|
|||||||
RoomVersion,
|
RoomVersion,
|
||||||
RoomVersions,
|
RoomVersions,
|
||||||
)
|
)
|
||||||
from synapse.state import CREATE_KEY
|
|
||||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
MutableStateMap,
|
MutableStateMap,
|
||||||
@ -309,13 +308,6 @@ def check_state_dependent_auth_rules(
|
|||||||
|
|
||||||
auth_dict = {(e.type, e.state_key): e for e in auth_events}
|
auth_dict = {(e.type, e.state_key): e for e in auth_events}
|
||||||
|
|
||||||
# Later code relies on there being a create event e.g _can_federate, _is_membership_change_allowed
|
|
||||||
# so produce a more intelligible error if we don't have one.
|
|
||||||
if auth_dict.get(CREATE_KEY) is None:
|
|
||||||
raise AuthError(
|
|
||||||
403, f"Event {event.event_id} is missing a create event in auth_events."
|
|
||||||
)
|
|
||||||
|
|
||||||
# additional check for m.federate
|
# additional check for m.federate
|
||||||
creating_domain = get_domain_from_id(event.room_id)
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
originating_domain = get_domain_from_id(event.sender)
|
originating_domain = get_domain_from_id(event.sender)
|
||||||
@ -1018,16 +1010,11 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
|||||||
user_id: user's id to look up in power_levels
|
user_id: user's id to look up in power_levels
|
||||||
auth_events:
|
auth_events:
|
||||||
state in force at this point in the room (or rather, a subset of
|
state in force at this point in the room (or rather, a subset of
|
||||||
it including at least the create event, and possibly a power levels event).
|
it including at least the create event and power levels event.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
the user's power level in this room.
|
the user's power level in this room.
|
||||||
"""
|
"""
|
||||||
create_event = auth_events.get(CREATE_KEY)
|
|
||||||
assert create_event is not None, (
|
|
||||||
"A create event in the auth events chain is required to calculate user power level correctly,"
|
|
||||||
" but was not found. This indicates a bug"
|
|
||||||
)
|
|
||||||
power_level_event = get_power_level_event(auth_events)
|
power_level_event = get_power_level_event(auth_events)
|
||||||
if power_level_event:
|
if power_level_event:
|
||||||
level = power_level_event.content.get("users", {}).get(user_id)
|
level = power_level_event.content.get("users", {}).get(user_id)
|
||||||
@ -1041,6 +1028,12 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
|
|||||||
else:
|
else:
|
||||||
# if there is no power levels event, the creator gets 100 and everyone
|
# if there is no power levels event, the creator gets 100 and everyone
|
||||||
# else gets 0.
|
# else gets 0.
|
||||||
|
|
||||||
|
# some things which call this don't pass the create event: hack around
|
||||||
|
# that.
|
||||||
|
key = (EventTypes.Create, "")
|
||||||
|
create_event = auth_events.get(key)
|
||||||
|
if create_event is not None:
|
||||||
if create_event.room_version.implicit_room_creator:
|
if create_event.room_version.implicit_room_creator:
|
||||||
creator = create_event.sender
|
creator = create_event.sender
|
||||||
else:
|
else:
|
||||||
|
@ -208,6 +208,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
|||||||
depth: DictProperty[int] = DictProperty("depth")
|
depth: DictProperty[int] = DictProperty("depth")
|
||||||
content: DictProperty[JsonDict] = DictProperty("content")
|
content: DictProperty[JsonDict] = DictProperty("content")
|
||||||
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
|
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
|
||||||
|
origin: DictProperty[str] = DictProperty("origin")
|
||||||
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
|
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
|
||||||
room_id: DictProperty[str] = DictProperty("room_id")
|
room_id: DictProperty[str] = DictProperty("room_id")
|
||||||
sender: DictProperty[str] = DictProperty("sender")
|
sender: DictProperty[str] = DictProperty("sender")
|
||||||
|
@ -195,18 +195,15 @@ class InviteAutoAccepter:
|
|||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
if e.code == HTTPStatus.FORBIDDEN:
|
if e.code == HTTPStatus.FORBIDDEN:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: %s",
|
f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}"
|
||||||
e,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warn(
|
||||||
"Update_room_membership raised the following unexpected (SynapseError) exception: %s",
|
f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}"
|
||||||
e,
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(
|
logger.warn(
|
||||||
"Update_room_membership raised the following unexpected exception: %s",
|
f"Update_room_membership raised the following unexpected exception: {e}"
|
||||||
e,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
sleep = 2**retries
|
sleep = 2**retries
|
||||||
|
@ -302,8 +302,8 @@ def create_local_event_from_event_dict(
|
|||||||
event_dict: JsonDict,
|
event_dict: JsonDict,
|
||||||
internal_metadata_dict: Optional[JsonDict] = None,
|
internal_metadata_dict: Optional[JsonDict] = None,
|
||||||
) -> EventBase:
|
) -> EventBase:
|
||||||
"""Takes a fully formed event dict, ensuring that fields like
|
"""Takes a fully formed event dict, ensuring that fields like `origin`
|
||||||
`origin_server_ts` have correct values for a locally produced event,
|
and `origin_server_ts` have correct values for a locally produced event,
|
||||||
then signs and hashes it.
|
then signs and hashes it.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -319,6 +319,7 @@ def create_local_event_from_event_dict(
|
|||||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||||
|
|
||||||
|
event_dict["origin"] = hostname
|
||||||
event_dict.setdefault("origin_server_ts", time_now)
|
event_dict.setdefault("origin_server_ts", time_now)
|
||||||
|
|
||||||
event_dict.setdefault("unsigned", {})
|
event_dict.setdefault("unsigned", {})
|
||||||
|
@ -67,6 +67,7 @@ class EventValidator:
|
|||||||
"auth_events",
|
"auth_events",
|
||||||
"content",
|
"content",
|
||||||
"hashes",
|
"hashes",
|
||||||
|
"origin",
|
||||||
"prev_events",
|
"prev_events",
|
||||||
"sender",
|
"sender",
|
||||||
"type",
|
"type",
|
||||||
@ -76,6 +77,13 @@ class EventValidator:
|
|||||||
if k not in event:
|
if k not in event:
|
||||||
raise SynapseError(400, "Event does not have key %s" % (k,))
|
raise SynapseError(400, "Event does not have key %s" % (k,))
|
||||||
|
|
||||||
|
# Check that the following keys have string values
|
||||||
|
event_strings = ["origin"]
|
||||||
|
|
||||||
|
for s in event_strings:
|
||||||
|
if not isinstance(getattr(event, s), str):
|
||||||
|
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||||
|
|
||||||
# Depending on the room version, ensure the data is spec compliant JSON.
|
# Depending on the room version, ensure the data is spec compliant JSON.
|
||||||
if event.room_version.strict_canonicaljson:
|
if event.room_version.strict_canonicaljson:
|
||||||
validate_canonicaljson(event.get_pdu_json())
|
validate_canonicaljson(event.get_pdu_json())
|
||||||
|
@ -322,7 +322,8 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
|
|||||||
SynapseError: if the pdu is missing required fields or is otherwise
|
SynapseError: if the pdu is missing required fields or is otherwise
|
||||||
not a valid matrix event
|
not a valid matrix event
|
||||||
"""
|
"""
|
||||||
# we could probably enforce a bunch of other fields here (room_id, sender, etc.)
|
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||||
|
# origin, etc etc)
|
||||||
assert_params_in_dict(pdu_json, ("type", "depth"))
|
assert_params_in_dict(pdu_json, ("type", "depth"))
|
||||||
|
|
||||||
# Strip any unauthorized values from "unsigned" if they exist
|
# Strip any unauthorized values from "unsigned" if they exist
|
||||||
|
@ -1818,7 +1818,7 @@ class FederationClient(FederationBase):
|
|||||||
)
|
)
|
||||||
return timestamp_to_event_response
|
return timestamp_to_event_response
|
||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
logger.warning(
|
logger.warn(
|
||||||
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
|
"timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
|
||||||
room_id,
|
room_id,
|
||||||
timestamp,
|
timestamp,
|
||||||
|
@ -928,8 +928,7 @@ class FederationServer(FederationBase):
|
|||||||
# joins) or the full state (for full joins).
|
# joins) or the full state (for full joins).
|
||||||
# Return a 404 as we would if we weren't in the room at all.
|
# Return a 404 as we would if we weren't in the room at all.
|
||||||
logger.info(
|
logger.info(
|
||||||
"Rejecting /send_%s to %s because it's a partial state room",
|
f"Rejecting /send_{membership_type} to %s because it's a partial state room",
|
||||||
membership_type,
|
|
||||||
room_id,
|
room_id,
|
||||||
)
|
)
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
|
@ -342,8 +342,6 @@ class _DestinationWakeupQueue:
|
|||||||
destination, _ = self.queue.popitem(last=False)
|
destination, _ = self.queue.popitem(last=False)
|
||||||
|
|
||||||
queue = self.sender._get_per_destination_queue(destination)
|
queue = self.sender._get_per_destination_queue(destination)
|
||||||
if queue is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not queue._new_data_to_send:
|
if not queue._new_data_to_send:
|
||||||
# The per destination queue has already been woken up.
|
# The per destination queue has already been woken up.
|
||||||
@ -438,23 +436,12 @@ class FederationSender(AbstractFederationSender):
|
|||||||
self._wake_destinations_needing_catchup,
|
self._wake_destinations_needing_catchup,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_per_destination_queue(
|
def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
|
||||||
self, destination: str
|
|
||||||
) -> Optional[PerDestinationQueue]:
|
|
||||||
"""Get or create a PerDestinationQueue for the given destination
|
"""Get or create a PerDestinationQueue for the given destination
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
destination: server_name of remote server
|
destination: server_name of remote server
|
||||||
|
|
||||||
Returns:
|
|
||||||
None if the destination is not allowed by the federation whitelist.
|
|
||||||
Otherwise a PerDestinationQueue for this destination.
|
|
||||||
"""
|
"""
|
||||||
if not self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
|
|
||||||
destination
|
|
||||||
):
|
|
||||||
return None
|
|
||||||
|
|
||||||
queue = self._per_destination_queues.get(destination)
|
queue = self._per_destination_queues.get(destination)
|
||||||
if not queue:
|
if not queue:
|
||||||
queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
|
queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
|
||||||
@ -731,16 +718,6 @@ class FederationSender(AbstractFederationSender):
|
|||||||
# track the fact that we have a PDU for these destinations,
|
# track the fact that we have a PDU for these destinations,
|
||||||
# to allow us to perform catch-up later on if the remote is unreachable
|
# to allow us to perform catch-up later on if the remote is unreachable
|
||||||
# for a while.
|
# for a while.
|
||||||
# Filter out any destinations not present in the federation_domain_whitelist, if
|
|
||||||
# the whitelist exists. These destinations should not be sent to so let's not
|
|
||||||
# waste time or space keeping track of events destined for them.
|
|
||||||
destinations = [
|
|
||||||
d
|
|
||||||
for d in destinations
|
|
||||||
if self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
|
|
||||||
d
|
|
||||||
)
|
|
||||||
]
|
|
||||||
await self.store.store_destination_rooms_entries(
|
await self.store.store_destination_rooms_entries(
|
||||||
destinations,
|
destinations,
|
||||||
pdu.room_id,
|
pdu.room_id,
|
||||||
@ -755,12 +732,7 @@ class FederationSender(AbstractFederationSender):
|
|||||||
)
|
)
|
||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
queue = self._get_per_destination_queue(destination)
|
self._get_per_destination_queue(destination).send_pdu(pdu)
|
||||||
# We expect `queue` to not be None as we already filtered out
|
|
||||||
# non-whitelisted destinations above.
|
|
||||||
assert queue is not None
|
|
||||||
|
|
||||||
queue.send_pdu(pdu)
|
|
||||||
|
|
||||||
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
|
||||||
"""Send a RR to any other servers in the room
|
"""Send a RR to any other servers in the room
|
||||||
@ -869,16 +841,12 @@ class FederationSender(AbstractFederationSender):
|
|||||||
for domain in immediate_domains:
|
for domain in immediate_domains:
|
||||||
# Add to destination queue and wake the destination up
|
# Add to destination queue and wake the destination up
|
||||||
queue = self._get_per_destination_queue(domain)
|
queue = self._get_per_destination_queue(domain)
|
||||||
if queue is None:
|
|
||||||
continue
|
|
||||||
queue.queue_read_receipt(receipt)
|
queue.queue_read_receipt(receipt)
|
||||||
queue.attempt_new_transaction()
|
queue.attempt_new_transaction()
|
||||||
|
|
||||||
for domain in delay_domains:
|
for domain in delay_domains:
|
||||||
# Add to destination queue...
|
# Add to destination queue...
|
||||||
queue = self._get_per_destination_queue(domain)
|
queue = self._get_per_destination_queue(domain)
|
||||||
if queue is None:
|
|
||||||
continue
|
|
||||||
queue.queue_read_receipt(receipt)
|
queue.queue_read_receipt(receipt)
|
||||||
|
|
||||||
# ... and schedule the destination to be woken up.
|
# ... and schedule the destination to be woken up.
|
||||||
@ -914,10 +882,9 @@ class FederationSender(AbstractFederationSender):
|
|||||||
if self.is_mine_server_name(destination):
|
if self.is_mine_server_name(destination):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
queue = self._get_per_destination_queue(destination)
|
self._get_per_destination_queue(destination).send_presence(
|
||||||
if queue is None:
|
states, start_loop=False
|
||||||
continue
|
)
|
||||||
queue.send_presence(states, start_loop=False)
|
|
||||||
|
|
||||||
self._destination_wakeup_queue.add_to_queue(destination)
|
self._destination_wakeup_queue.add_to_queue(destination)
|
||||||
|
|
||||||
@ -967,8 +934,6 @@ class FederationSender(AbstractFederationSender):
|
|||||||
return
|
return
|
||||||
|
|
||||||
queue = self._get_per_destination_queue(edu.destination)
|
queue = self._get_per_destination_queue(edu.destination)
|
||||||
if queue is None:
|
|
||||||
return
|
|
||||||
if key:
|
if key:
|
||||||
queue.send_keyed_edu(edu, key)
|
queue.send_keyed_edu(edu, key)
|
||||||
else:
|
else:
|
||||||
@ -993,15 +958,9 @@ class FederationSender(AbstractFederationSender):
|
|||||||
|
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if immediate:
|
if immediate:
|
||||||
queue = self._get_per_destination_queue(destination)
|
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||||
if queue is None:
|
|
||||||
continue
|
|
||||||
queue.attempt_new_transaction()
|
|
||||||
else:
|
else:
|
||||||
queue = self._get_per_destination_queue(destination)
|
self._get_per_destination_queue(destination).mark_new_data()
|
||||||
if queue is None:
|
|
||||||
continue
|
|
||||||
queue.mark_new_data()
|
|
||||||
self._destination_wakeup_queue.add_to_queue(destination)
|
self._destination_wakeup_queue.add_to_queue(destination)
|
||||||
|
|
||||||
def wake_destination(self, destination: str) -> None:
|
def wake_destination(self, destination: str) -> None:
|
||||||
@ -1020,9 +979,7 @@ class FederationSender(AbstractFederationSender):
|
|||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
queue = self._get_per_destination_queue(destination)
|
self._get_per_destination_queue(destination).attempt_new_transaction()
|
||||||
if queue is not None:
|
|
||||||
queue.attempt_new_transaction()
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_current_token() -> int:
|
def get_current_token() -> int:
|
||||||
@ -1067,9 +1024,6 @@ class FederationSender(AbstractFederationSender):
|
|||||||
d
|
d
|
||||||
for d in destinations_to_wake
|
for d in destinations_to_wake
|
||||||
if self._federation_shard_config.should_handle(self._instance_name, d)
|
if self._federation_shard_config.should_handle(self._instance_name, d)
|
||||||
and self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist(
|
|
||||||
d
|
|
||||||
)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
for destination in destinations_to_wake:
|
for destination in destinations_to_wake:
|
||||||
|
@ -495,7 +495,7 @@ class AdminHandler:
|
|||||||
)
|
)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Redaction of event %s failed due to: %s", event.event_id, ex
|
f"Redaction of event {event.event_id} failed due to: {ex}"
|
||||||
)
|
)
|
||||||
result["failed_redactions"][event.event_id] = str(ex)
|
result["failed_redactions"][event.event_id] = str(ex)
|
||||||
await self._task_scheduler.update_task(task.id, result=result)
|
await self._task_scheduler.update_task(task.id, result=result)
|
||||||
|
@ -465,7 +465,9 @@ class ApplicationServicesHandler:
|
|||||||
service, "read_receipt"
|
service, "read_receipt"
|
||||||
)
|
)
|
||||||
if new_token is not None and new_token.stream <= from_key:
|
if new_token is not None and new_token.stream <= from_key:
|
||||||
logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
|
logger.debug(
|
||||||
|
"Rejecting token lower than or equal to stored: %s" % (new_token,)
|
||||||
|
)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
from_token = MultiWriterStreamToken(stream=from_key)
|
from_token = MultiWriterStreamToken(stream=from_key)
|
||||||
@ -507,7 +509,9 @@ class ApplicationServicesHandler:
|
|||||||
service, "presence"
|
service, "presence"
|
||||||
)
|
)
|
||||||
if new_token is not None and new_token <= from_key:
|
if new_token is not None and new_token <= from_key:
|
||||||
logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
|
logger.debug(
|
||||||
|
"Rejecting token lower than or equal to stored: %s" % (new_token,)
|
||||||
|
)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
for user in users:
|
for user in users:
|
||||||
|
@ -76,7 +76,7 @@ from synapse.storage.databases.main.registration import (
|
|||||||
LoginTokenLookupResult,
|
LoginTokenLookupResult,
|
||||||
LoginTokenReused,
|
LoginTokenReused,
|
||||||
)
|
)
|
||||||
from synapse.types import JsonDict, Requester, StrCollection, UserID
|
from synapse.types import JsonDict, Requester, UserID
|
||||||
from synapse.util import stringutils as stringutils
|
from synapse.util import stringutils as stringutils
|
||||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||||
from synapse.util.msisdn import phone_number_to_msisdn
|
from synapse.util.msisdn import phone_number_to_msisdn
|
||||||
@ -1547,31 +1547,6 @@ class AuthHandler:
|
|||||||
user_id, (token_id for _, token_id, _ in tokens_and_devices)
|
user_id, (token_id for _, token_id, _ in tokens_and_devices)
|
||||||
)
|
)
|
||||||
|
|
||||||
async def delete_access_tokens_for_devices(
|
|
||||||
self,
|
|
||||||
user_id: str,
|
|
||||||
device_ids: StrCollection,
|
|
||||||
) -> None:
|
|
||||||
"""Invalidate access tokens for the devices
|
|
||||||
|
|
||||||
Args:
|
|
||||||
user_id: ID of user the tokens belong to
|
|
||||||
device_ids: ID of device the tokens are associated with.
|
|
||||||
If None, tokens associated with any device (or no device) will
|
|
||||||
be deleted
|
|
||||||
"""
|
|
||||||
tokens_and_devices = await self.store.user_delete_access_tokens_for_devices(
|
|
||||||
user_id,
|
|
||||||
device_ids,
|
|
||||||
)
|
|
||||||
|
|
||||||
# see if any modules want to know about this
|
|
||||||
if self.password_auth_provider.on_logged_out_callbacks:
|
|
||||||
for token, _, device_id in tokens_and_devices:
|
|
||||||
await self.password_auth_provider.on_logged_out(
|
|
||||||
user_id=user_id, device_id=device_id, access_token=token
|
|
||||||
)
|
|
||||||
|
|
||||||
async def add_threepid(
|
async def add_threepid(
|
||||||
self, user_id: str, medium: str, address: str, validated_at: int
|
self, user_id: str, medium: str, address: str, validated_at: int
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -1920,7 +1895,7 @@ def load_single_legacy_password_auth_provider(
|
|||||||
try:
|
try:
|
||||||
provider = module(config=config, account_handler=api)
|
provider = module(config=config, account_handler=api)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Error while initializing %r: %s", module, e)
|
logger.error("Error while initializing %r: %s", module, e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# All methods that the module provides should be async, but this wasn't enforced
|
# All methods that the module provides should be async, but this wasn't enforced
|
||||||
@ -2453,7 +2428,7 @@ class PasswordAuthProvider:
|
|||||||
except CancelledError:
|
except CancelledError:
|
||||||
raise
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Module raised an exception in is_3pid_allowed: %s", e)
|
logger.error("Module raised an exception in is_3pid_allowed: %s", e)
|
||||||
raise SynapseError(code=500, msg="Internal Server Error")
|
raise SynapseError(code=500, msg="Internal Server Error")
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -96,14 +96,6 @@ class DeactivateAccountHandler:
|
|||||||
403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
|
403, "Deactivation of this user is forbidden", Codes.FORBIDDEN
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"%s requested deactivation of %s erase_data=%s id_server=%s",
|
|
||||||
requester.user,
|
|
||||||
user_id,
|
|
||||||
erase_data,
|
|
||||||
id_server,
|
|
||||||
)
|
|
||||||
|
|
||||||
# FIXME: Theoretically there is a race here wherein user resets
|
# FIXME: Theoretically there is a race here wherein user resets
|
||||||
# password using threepid.
|
# password using threepid.
|
||||||
|
|
||||||
|
@ -671,12 +671,12 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
except_device_id: optional device id which should not be deleted
|
except_device_id: optional device id which should not be deleted
|
||||||
"""
|
"""
|
||||||
device_map = await self.store.get_devices_by_user(user_id)
|
device_map = await self.store.get_devices_by_user(user_id)
|
||||||
|
device_ids = list(device_map)
|
||||||
if except_device_id is not None:
|
if except_device_id is not None:
|
||||||
device_map.pop(except_device_id, None)
|
device_ids = [d for d in device_ids if d != except_device_id]
|
||||||
user_device_ids = device_map.keys()
|
await self.delete_devices(user_id, device_ids)
|
||||||
await self.delete_devices(user_id, user_device_ids)
|
|
||||||
|
|
||||||
async def delete_devices(self, user_id: str, device_ids: StrCollection) -> None:
|
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||||
"""Delete several devices
|
"""Delete several devices
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -695,10 +695,17 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Delete data specific to each device. Not optimised as its an
|
# Delete data specific to each device. Not optimised as it is not
|
||||||
# experimental MSC.
|
# considered as part of a critical path.
|
||||||
if self.hs.config.experimental.msc3890_enabled:
|
|
||||||
for device_id in device_ids:
|
for device_id in device_ids:
|
||||||
|
await self._auth_handler.delete_access_tokens_for_user(
|
||||||
|
user_id, device_id=device_id
|
||||||
|
)
|
||||||
|
await self.store.delete_e2e_keys_by_device(
|
||||||
|
user_id=user_id, device_id=device_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.hs.config.experimental.msc3890_enabled:
|
||||||
# Remove any local notification settings for this device in accordance
|
# Remove any local notification settings for this device in accordance
|
||||||
# with MSC3890.
|
# with MSC3890.
|
||||||
await self._account_data_handler.remove_account_data_for_user(
|
await self._account_data_handler.remove_account_data_for_user(
|
||||||
@ -706,13 +713,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
f"org.matrix.msc3890.local_notification_settings.{device_id}",
|
f"org.matrix.msc3890.local_notification_settings.{device_id}",
|
||||||
)
|
)
|
||||||
|
|
||||||
# If we're deleting a lot of devices, a bunch of them may not have any
|
|
||||||
# to-device messages queued up. We filter those out to avoid scheduling
|
|
||||||
# unnecessary tasks.
|
|
||||||
devices_with_messages = await self.store.get_devices_with_messages(
|
|
||||||
user_id, device_ids
|
|
||||||
)
|
|
||||||
for device_id in devices_with_messages:
|
|
||||||
# Delete device messages asynchronously and in batches using the task scheduler
|
# Delete device messages asynchronously and in batches using the task scheduler
|
||||||
# We specify an upper stream id to avoid deleting non delivered messages
|
# We specify an upper stream id to avoid deleting non delivered messages
|
||||||
# if an user re-uses a device ID.
|
# if an user re-uses a device ID.
|
||||||
@ -726,10 +726,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
await self._auth_handler.delete_access_tokens_for_devices(
|
|
||||||
user_id, device_ids=device_ids
|
|
||||||
)
|
|
||||||
|
|
||||||
# Pushers are deleted after `delete_access_tokens_for_user` is called so that
|
# Pushers are deleted after `delete_access_tokens_for_user` is called so that
|
||||||
# modules using `on_logged_out` hook can use them if needed.
|
# modules using `on_logged_out` hook can use them if needed.
|
||||||
await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
|
await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
|
||||||
@ -823,7 +819,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
# This should only happen if there are no updates, so we bail.
|
# This should only happen if there are no updates, so we bail.
|
||||||
return
|
return
|
||||||
|
|
||||||
if logger.isEnabledFor(logging.DEBUG):
|
|
||||||
for device_id in device_ids:
|
for device_id in device_ids:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
|
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
|
||||||
@ -927,6 +922,9 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
# can't call self.delete_device because that will clobber the
|
# can't call self.delete_device because that will clobber the
|
||||||
# access token so call the storage layer directly
|
# access token so call the storage layer directly
|
||||||
await self.store.delete_devices(user_id, [old_device_id])
|
await self.store.delete_devices(user_id, [old_device_id])
|
||||||
|
await self.store.delete_e2e_keys_by_device(
|
||||||
|
user_id=user_id, device_id=old_device_id
|
||||||
|
)
|
||||||
|
|
||||||
# tell everyone that the old device is gone and that the dehydrated
|
# tell everyone that the old device is gone and that the dehydrated
|
||||||
# device has a new display name
|
# device has a new display name
|
||||||
@ -948,6 +946,7 @@ class DeviceHandler(DeviceWorkerHandler):
|
|||||||
raise errors.NotFoundError()
|
raise errors.NotFoundError()
|
||||||
|
|
||||||
await self.delete_devices(user_id, [device_id])
|
await self.delete_devices(user_id, [device_id])
|
||||||
|
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
|
||||||
|
|
||||||
@wrap_as_background_process("_handle_new_device_update_async")
|
@wrap_as_background_process("_handle_new_device_update_async")
|
||||||
async def _handle_new_device_update_async(self) -> None:
|
async def _handle_new_device_update_async(self) -> None:
|
||||||
@ -1601,7 +1600,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
|
|||||||
if prev_stream_id is not None and cached_devices == {
|
if prev_stream_id is not None and cached_devices == {
|
||||||
d["device_id"]: d for d in devices
|
d["device_id"]: d for d in devices
|
||||||
}:
|
}:
|
||||||
logger.info(
|
logging.info(
|
||||||
"Skipping device list resync for %s, as our cache matches already",
|
"Skipping device list resync for %s, as our cache matches already",
|
||||||
user_id,
|
user_id,
|
||||||
)
|
)
|
||||||
|
@ -282,7 +282,7 @@ class DirectoryHandler:
|
|||||||
except RequestSendFailed:
|
except RequestSendFailed:
|
||||||
raise SynapseError(502, "Failed to fetch alias")
|
raise SynapseError(502, "Failed to fetch alias")
|
||||||
except CodeMessageException as e:
|
except CodeMessageException as e:
|
||||||
logger.warning(
|
logging.warning(
|
||||||
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
|
"Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
|
||||||
)
|
)
|
||||||
if e.code == 404:
|
if e.code == 404:
|
||||||
|
@ -78,7 +78,6 @@ from synapse.replication.http.federation import (
|
|||||||
ReplicationStoreRoomOnOutlierMembershipRestServlet,
|
ReplicationStoreRoomOnOutlierMembershipRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
|
||||||
from synapse.storage.invite_rule import InviteRule
|
|
||||||
from synapse.types import JsonDict, StrCollection, get_domain_from_id
|
from synapse.types import JsonDict, StrCollection, get_domain_from_id
|
||||||
from synapse.types.state import StateFilter
|
from synapse.types.state import StateFilter
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
@ -1062,8 +1061,8 @@ class FederationHandler:
|
|||||||
if self.hs.config.server.block_non_admin_invites:
|
if self.hs.config.server.block_non_admin_invites:
|
||||||
raise SynapseError(403, "This server does not accept room invites")
|
raise SynapseError(403, "This server does not accept room invites")
|
||||||
|
|
||||||
spam_check = (
|
spam_check = await self._spam_checker_module_callbacks.user_may_invite(
|
||||||
await self._spam_checker_module_callbacks.federated_user_may_invite(event)
|
event.sender, event.state_key, event.room_id
|
||||||
)
|
)
|
||||||
if spam_check != NOT_SPAM:
|
if spam_check != NOT_SPAM:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
@ -1090,22 +1089,6 @@ class FederationHandler:
|
|||||||
if event.state_key == self._server_notices_mxid:
|
if event.state_key == self._server_notices_mxid:
|
||||||
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
|
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
|
||||||
|
|
||||||
# check the invitee's configuration and apply rules
|
|
||||||
invite_config = await self.store.get_invite_config_for_user(event.state_key)
|
|
||||||
rule = invite_config.get_invite_rule(event.sender)
|
|
||||||
if rule == InviteRule.BLOCK:
|
|
||||||
logger.info(
|
|
||||||
"Automatically rejecting invite from %s due to the invite filtering rules of %s",
|
|
||||||
event.sender,
|
|
||||||
event.state_key,
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"You are not permitted to invite this user.",
|
|
||||||
errcode=Codes.INVITE_BLOCKED,
|
|
||||||
)
|
|
||||||
# InviteRule.IGNORE is handled at the sync layer
|
|
||||||
|
|
||||||
# We retrieve the room member handler here as to not cause a cyclic dependency
|
# We retrieve the room member handler here as to not cause a cyclic dependency
|
||||||
member_handler = self.hs.get_room_member_handler()
|
member_handler = self.hs.get_room_member_handler()
|
||||||
# We don't rate limit based on room ID, as that should be done by
|
# We don't rate limit based on room ID, as that should be done by
|
||||||
|
@ -218,7 +218,7 @@ class IdentityHandler:
|
|||||||
|
|
||||||
return data
|
return data
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
logger.exception("3PID bind failed with Matrix error: %r", e)
|
logger.error("3PID bind failed with Matrix error: %r", e)
|
||||||
raise e.to_synapse_error()
|
raise e.to_synapse_error()
|
||||||
except RequestTimedOutError:
|
except RequestTimedOutError:
|
||||||
raise SynapseError(500, "Timed out contacting identity server")
|
raise SynapseError(500, "Timed out contacting identity server")
|
||||||
@ -323,7 +323,7 @@ class IdentityHandler:
|
|||||||
# The remote server probably doesn't support unbinding (yet)
|
# The remote server probably doesn't support unbinding (yet)
|
||||||
logger.warning("Received %d response while unbinding threepid", e.code)
|
logger.warning("Received %d response while unbinding threepid", e.code)
|
||||||
else:
|
else:
|
||||||
logger.exception("Failed to unbind threepid on identity server: %s", e)
|
logger.error("Failed to unbind threepid on identity server: %s", e)
|
||||||
raise SynapseError(500, "Failed to contact identity server")
|
raise SynapseError(500, "Failed to contact identity server")
|
||||||
except RequestTimedOutError:
|
except RequestTimedOutError:
|
||||||
raise SynapseError(500, "Timed out contacting identity server")
|
raise SynapseError(500, "Timed out contacting identity server")
|
||||||
|
@ -460,7 +460,7 @@ class MessageHandler:
|
|||||||
# date from the database in the same database transaction.
|
# date from the database in the same database transaction.
|
||||||
await self.store.expire_event(event_id)
|
await self.store.expire_event(event_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Could not expire event %s: %r", event_id, e)
|
logger.error("Could not expire event %s: %r", event_id, e)
|
||||||
|
|
||||||
# Schedule the expiry of the next event to expire.
|
# Schedule the expiry of the next event to expire.
|
||||||
await self._schedule_next_expiry()
|
await self._schedule_next_expiry()
|
||||||
@ -2061,8 +2061,7 @@ class EventCreationHandler:
|
|||||||
# dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
|
# dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to send dummy event into room %s. Will exclude it from "
|
"Failed to send dummy event into room %s. Will exclude it from "
|
||||||
"future attempts until cache expires",
|
"future attempts until cache expires" % (room_id,)
|
||||||
room_id,
|
|
||||||
)
|
)
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
|
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
|
||||||
@ -2121,9 +2120,7 @@ class EventCreationHandler:
|
|||||||
except AuthError:
|
except AuthError:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Failed to send dummy event into room %s for user %s due to "
|
"Failed to send dummy event into room %s for user %s due to "
|
||||||
"lack of power. Will try another user",
|
"lack of power. Will try another user" % (room_id, user_id)
|
||||||
room_id,
|
|
||||||
user_id,
|
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -563,13 +563,12 @@ class OidcProvider:
|
|||||||
raise ValueError("Unexpected subject")
|
raise ValueError("Unexpected subject")
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"OIDC Back-Channel Logout is enabled for issuer %r "
|
f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
|
||||||
"but it looks like the configured `user_mapping_provider` "
|
"but it looks like the configured `user_mapping_provider` "
|
||||||
"does not use the `sub` claim as subject. If it is the case, "
|
"does not use the `sub` claim as subject. If it is the case, "
|
||||||
"and you want Synapse to ignore the `sub` claim in OIDC "
|
"and you want Synapse to ignore the `sub` claim in OIDC "
|
||||||
"Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
|
"Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
|
||||||
"to `true` in the issuer config.",
|
"to `true` in the issuer config."
|
||||||
self.issuer,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -827,10 +826,10 @@ class OidcProvider:
|
|||||||
if response.code < 400:
|
if response.code < 400:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Invalid response from the authorization server: "
|
"Invalid response from the authorization server: "
|
||||||
'responded with a "%s" '
|
'responded with a "{status}" '
|
||||||
"but body has an error field: %r",
|
"but body has an error field: {error!r}".format(
|
||||||
status,
|
status=status, error=resp["error"]
|
||||||
resp["error"],
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
description = resp.get("error_description", error)
|
description = resp.get("error_description", error)
|
||||||
@ -1386,8 +1385,7 @@ class OidcProvider:
|
|||||||
# support dynamic registration in Synapse at some point.
|
# support dynamic registration in Synapse at some point.
|
||||||
if not self._config.backchannel_logout_enabled:
|
if not self._config.backchannel_logout_enabled:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Received an OIDC Back-Channel Logout request from issuer %r but it is disabled in config",
|
f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
|
||||||
self.issuer,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: this responds with a 400 status code, which is what the OIDC
|
# TODO: this responds with a 400 status code, which is what the OIDC
|
||||||
@ -1799,5 +1797,5 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
|
|||||||
extras[key] = template.render(user=userinfo).strip()
|
extras[key] = template.render(user=userinfo).strip()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Log an error and skip this value (don't break login for this).
|
# Log an error and skip this value (don't break login for this).
|
||||||
logger.exception("Failed to render OIDC extra attribute %s: %s", key, e)
|
logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e))
|
||||||
return extras
|
return extras
|
||||||
|
@ -115,7 +115,6 @@ class RegistrationHandler:
|
|||||||
self._user_consent_version = self.hs.config.consent.user_consent_version
|
self._user_consent_version = self.hs.config.consent.user_consent_version
|
||||||
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
|
||||||
self._server_name = hs.hostname
|
self._server_name = hs.hostname
|
||||||
self._user_types_config = hs.config.user_types
|
|
||||||
|
|
||||||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||||
|
|
||||||
@ -307,9 +306,6 @@ class RegistrationHandler:
|
|||||||
elif default_display_name is None:
|
elif default_display_name is None:
|
||||||
default_display_name = localpart
|
default_display_name = localpart
|
||||||
|
|
||||||
if user_type is None:
|
|
||||||
user_type = self._user_types_config.default_user_type
|
|
||||||
|
|
||||||
await self.register_with_store(
|
await self.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
@ -506,7 +502,7 @@ class RegistrationHandler:
|
|||||||
ratelimit=False,
|
ratelimit=False,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Failed to join new user to %r: %r", r, e)
|
logger.error("Failed to join new user to %r: %r", r, e)
|
||||||
|
|
||||||
async def _join_rooms(self, user_id: str) -> None:
|
async def _join_rooms(self, user_id: str) -> None:
|
||||||
"""
|
"""
|
||||||
@ -596,7 +592,7 @@ class RegistrationHandler:
|
|||||||
# moving away from bare excepts is a good thing to do.
|
# moving away from bare excepts is a good thing to do.
|
||||||
logger.error("Failed to join new user to %r: %r", r, e)
|
logger.error("Failed to join new user to %r: %r", r, e)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Failed to join new user to %r: %r", r, e)
|
logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
|
||||||
|
|
||||||
async def _auto_join_rooms(self, user_id: str) -> None:
|
async def _auto_join_rooms(self, user_id: str) -> None:
|
||||||
"""Automatically joins users to auto join rooms - creating the room in the first place
|
"""Automatically joins users to auto join rooms - creating the room in the first place
|
||||||
|
@ -1,98 +0,0 @@
|
|||||||
#
|
|
||||||
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
||||||
#
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
# Copyright (C) 2023 New Vector, Ltd
|
|
||||||
#
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as
|
|
||||||
# published by the Free Software Foundation, either version 3 of the
|
|
||||||
# License, or (at your option) any later version.
|
|
||||||
#
|
|
||||||
# See the GNU Affero General Public License for more details:
|
|
||||||
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
import logging
|
|
||||||
from http import HTTPStatus
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
from synapse.api.errors import Codes, SynapseError
|
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
|
||||||
from synapse.types import (
|
|
||||||
Requester,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from synapse.server import HomeServer
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ReportsHandler:
|
|
||||||
def __init__(self, hs: "HomeServer"):
|
|
||||||
self._hs = hs
|
|
||||||
self._store = hs.get_datastores().main
|
|
||||||
self._clock = hs.get_clock()
|
|
||||||
|
|
||||||
# Ratelimiter for management of existing delayed events,
|
|
||||||
# keyed by the requesting user ID.
|
|
||||||
self._reports_ratelimiter = Ratelimiter(
|
|
||||||
store=self._store,
|
|
||||||
clock=self._clock,
|
|
||||||
cfg=hs.config.ratelimiting.rc_reports,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def report_user(
|
|
||||||
self, requester: Requester, target_user_id: str, reason: str
|
|
||||||
) -> None:
|
|
||||||
"""Files a report against a user from a user.
|
|
||||||
|
|
||||||
Rate and size limits are applied to the report. If the user being reported
|
|
||||||
does not belong to this server, the report is ignored. This check is done
|
|
||||||
after the limits to reduce DoS potential.
|
|
||||||
|
|
||||||
If the user being reported belongs to this server, but doesn't exist, we
|
|
||||||
similarly ignore the report. The spec allows us to return an error if we
|
|
||||||
want to, but we choose to hide that user's existence instead.
|
|
||||||
|
|
||||||
If the report is otherwise valid (for a user which exists on our server),
|
|
||||||
we append it to the database for later processing.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
requester - The user filing the report.
|
|
||||||
target_user_id - The user being reported.
|
|
||||||
reason - The user-supplied reason the user is being reported.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
SynapseError for BAD_REQUEST/BAD_JSON if the reason is too long.
|
|
||||||
"""
|
|
||||||
|
|
||||||
await self._check_limits(requester)
|
|
||||||
|
|
||||||
if len(reason) > 1000:
|
|
||||||
raise SynapseError(
|
|
||||||
HTTPStatus.BAD_REQUEST,
|
|
||||||
"Reason must be less than 1000 characters",
|
|
||||||
Codes.BAD_JSON,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self._hs.is_mine_id(target_user_id):
|
|
||||||
return # hide that they're not ours/that we can't do anything about them
|
|
||||||
|
|
||||||
user = await self._store.get_user_by_id(target_user_id)
|
|
||||||
if user is None:
|
|
||||||
return # hide that they don't exist
|
|
||||||
|
|
||||||
await self._store.add_user_report(
|
|
||||||
target_user_id=target_user_id,
|
|
||||||
user_id=requester.user.to_string(),
|
|
||||||
reason=reason,
|
|
||||||
received_ts=self._clock.time_msec(),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _check_limits(self, requester: Requester) -> None:
|
|
||||||
await self._reports_ratelimiter.ratelimit(
|
|
||||||
requester,
|
|
||||||
requester.user.to_string(),
|
|
||||||
)
|
|
@ -468,6 +468,17 @@ class RoomCreationHandler:
|
|||||||
"""
|
"""
|
||||||
user_id = requester.user.to_string()
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
|
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
||||||
|
user_id
|
||||||
|
)
|
||||||
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"You are not permitted to create rooms",
|
||||||
|
errcode=spam_check[0],
|
||||||
|
additional_fields=spam_check[1],
|
||||||
|
)
|
||||||
|
|
||||||
creation_content: JsonDict = {
|
creation_content: JsonDict = {
|
||||||
"room_version": new_room_version.identifier,
|
"room_version": new_room_version.identifier,
|
||||||
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
|
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
|
||||||
@ -574,24 +585,6 @@ class RoomCreationHandler:
|
|||||||
if current_power_level_int < needed_power_level:
|
if current_power_level_int < needed_power_level:
|
||||||
user_power_levels[user_id] = needed_power_level
|
user_power_levels[user_id] = needed_power_level
|
||||||
|
|
||||||
# We construct what the body of a call to /createRoom would look like for passing
|
|
||||||
# to the spam checker. We don't include a preset here, as we expect the
|
|
||||||
# initial state to contain everything we need.
|
|
||||||
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
|
||||||
user_id,
|
|
||||||
{
|
|
||||||
"creation_content": creation_content,
|
|
||||||
"initial_state": list(initial_state.items()),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"You are not permitted to create rooms",
|
|
||||||
errcode=spam_check[0],
|
|
||||||
additional_fields=spam_check[1],
|
|
||||||
)
|
|
||||||
|
|
||||||
await self._send_events_for_new_room(
|
await self._send_events_for_new_room(
|
||||||
requester,
|
requester,
|
||||||
new_room_id,
|
new_room_id,
|
||||||
@ -698,7 +691,7 @@ class RoomCreationHandler:
|
|||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
# again I'm not really expecting this to fail, but if it does, I'd rather
|
# again I'm not really expecting this to fail, but if it does, I'd rather
|
||||||
# we returned the new room to the client at this point.
|
# we returned the new room to the client at this point.
|
||||||
logger.exception("Unable to send updated alias events in old room: %s", e)
|
logger.error("Unable to send updated alias events in old room: %s", e)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.event_creation_handler.create_and_send_nonmember_event(
|
await self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
@ -715,7 +708,7 @@ class RoomCreationHandler:
|
|||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
# again I'm not really expecting this to fail, but if it does, I'd rather
|
# again I'm not really expecting this to fail, but if it does, I'd rather
|
||||||
# we returned the new room to the client at this point.
|
# we returned the new room to the client at this point.
|
||||||
logger.exception("Unable to send updated alias events in new room: %s", e)
|
logger.error("Unable to send updated alias events in new room: %s", e)
|
||||||
|
|
||||||
async def create_room(
|
async def create_room(
|
||||||
self,
|
self,
|
||||||
@ -793,7 +786,7 @@ class RoomCreationHandler:
|
|||||||
|
|
||||||
if not is_requester_admin:
|
if not is_requester_admin:
|
||||||
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
||||||
user_id, config
|
user_id
|
||||||
)
|
)
|
||||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||||
raise SynapseError(
|
raise SynapseError(
|
||||||
|
@ -53,7 +53,6 @@ from synapse.metrics import event_processing_positions
|
|||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
|
from synapse.replication.http.push import ReplicationCopyPusherRestServlet
|
||||||
from synapse.storage.databases.main.state_deltas import StateDelta
|
from synapse.storage.databases.main.state_deltas import StateDelta
|
||||||
from synapse.storage.invite_rule import InviteRule
|
|
||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
JsonDict,
|
JsonDict,
|
||||||
Requester,
|
Requester,
|
||||||
@ -159,7 +158,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
store=self.store,
|
store=self.store,
|
||||||
clock=self.clock,
|
clock=self.clock,
|
||||||
cfg=hs.config.ratelimiting.rc_invites_per_room,
|
cfg=hs.config.ratelimiting.rc_invites_per_room,
|
||||||
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Ratelimiter for invites, keyed by recipient (across all rooms, all
|
# Ratelimiter for invites, keyed by recipient (across all rooms, all
|
||||||
@ -168,7 +166,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
store=self.store,
|
store=self.store,
|
||||||
clock=self.clock,
|
clock=self.clock,
|
||||||
cfg=hs.config.ratelimiting.rc_invites_per_user,
|
cfg=hs.config.ratelimiting.rc_invites_per_user,
|
||||||
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Ratelimiter for invites, keyed by issuer (across all rooms, all
|
# Ratelimiter for invites, keyed by issuer (across all rooms, all
|
||||||
@ -177,7 +174,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
store=self.store,
|
store=self.store,
|
||||||
clock=self.clock,
|
clock=self.clock,
|
||||||
cfg=hs.config.ratelimiting.rc_invites_per_issuer,
|
cfg=hs.config.ratelimiting.rc_invites_per_issuer,
|
||||||
ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self._third_party_invite_limiter = Ratelimiter(
|
self._third_party_invite_limiter = Ratelimiter(
|
||||||
@ -916,23 +912,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
additional_fields=block_invite_result[1],
|
additional_fields=block_invite_result[1],
|
||||||
)
|
)
|
||||||
|
|
||||||
# check the invitee's configuration and apply rules. Admins on the server can bypass.
|
|
||||||
if not is_requester_admin:
|
|
||||||
invite_config = await self.store.get_invite_config_for_user(target_id)
|
|
||||||
rule = invite_config.get_invite_rule(requester.user.to_string())
|
|
||||||
if rule == InviteRule.BLOCK:
|
|
||||||
logger.info(
|
|
||||||
"Automatically rejecting invite from %s due to the the invite filtering rules of %s",
|
|
||||||
target_id,
|
|
||||||
requester.user,
|
|
||||||
)
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"You are not permitted to invite this user.",
|
|
||||||
errcode=Codes.INVITE_BLOCKED,
|
|
||||||
)
|
|
||||||
# InviteRule.IGNORE is handled at the sync layer.
|
|
||||||
|
|
||||||
# An empty prev_events list is allowed as long as the auth_event_ids are present
|
# An empty prev_events list is allowed as long as the auth_event_ids are present
|
||||||
if prev_event_ids is not None:
|
if prev_event_ids is not None:
|
||||||
return await self._local_membership_update(
|
return await self._local_membership_update(
|
||||||
@ -1572,7 +1551,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||||||
require_consent=False,
|
require_consent=False,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception("Error kicking guest user: %s", e)
|
logger.exception("Error kicking guest user: %s" % (e,))
|
||||||
|
|
||||||
async def lookup_room_alias(
|
async def lookup_room_alias(
|
||||||
self, room_alias: RoomAlias
|
self, room_alias: RoomAlias
|
||||||
|
@ -54,9 +54,6 @@ class RoomPolicyHandler:
|
|||||||
Returns:
|
Returns:
|
||||||
bool: True if the event is allowed in the room, False otherwise.
|
bool: True if the event is allowed in the room, False otherwise.
|
||||||
"""
|
"""
|
||||||
if event.type == "org.matrix.msc4284.policy" and event.state_key is not None:
|
|
||||||
return True # always allow policy server change events
|
|
||||||
|
|
||||||
policy_event = await self._storage_controllers.state.get_current_state_event(
|
policy_event = await self._storage_controllers.state.get_current_state_event(
|
||||||
event.room_id, "org.matrix.msc4284.policy", ""
|
event.room_id, "org.matrix.msc4284.policy", ""
|
||||||
)
|
)
|
||||||
|
@ -111,15 +111,7 @@ class RoomSummaryHandler:
|
|||||||
# If a user tries to fetch the same page multiple times in quick succession,
|
# If a user tries to fetch the same page multiple times in quick succession,
|
||||||
# only process the first attempt and return its result to subsequent requests.
|
# only process the first attempt and return its result to subsequent requests.
|
||||||
self._pagination_response_cache: ResponseCache[
|
self._pagination_response_cache: ResponseCache[
|
||||||
Tuple[
|
Tuple[str, str, bool, Optional[int], Optional[int], Optional[str]]
|
||||||
str,
|
|
||||||
str,
|
|
||||||
bool,
|
|
||||||
Optional[int],
|
|
||||||
Optional[int],
|
|
||||||
Optional[str],
|
|
||||||
Optional[Tuple[str, ...]],
|
|
||||||
]
|
|
||||||
] = ResponseCache(
|
] = ResponseCache(
|
||||||
hs.get_clock(),
|
hs.get_clock(),
|
||||||
"get_room_hierarchy",
|
"get_room_hierarchy",
|
||||||
@ -134,7 +126,6 @@ class RoomSummaryHandler:
|
|||||||
max_depth: Optional[int] = None,
|
max_depth: Optional[int] = None,
|
||||||
limit: Optional[int] = None,
|
limit: Optional[int] = None,
|
||||||
from_token: Optional[str] = None,
|
from_token: Optional[str] = None,
|
||||||
remote_room_hosts: Optional[Tuple[str, ...]] = None,
|
|
||||||
) -> JsonDict:
|
) -> JsonDict:
|
||||||
"""
|
"""
|
||||||
Implementation of the room hierarchy C-S API.
|
Implementation of the room hierarchy C-S API.
|
||||||
@ -152,9 +143,6 @@ class RoomSummaryHandler:
|
|||||||
limit: An optional limit on the number of rooms to return per
|
limit: An optional limit on the number of rooms to return per
|
||||||
page. Must be a positive integer.
|
page. Must be a positive integer.
|
||||||
from_token: An optional pagination token.
|
from_token: An optional pagination token.
|
||||||
remote_room_hosts: An optional list of remote homeserver server names. If defined,
|
|
||||||
each host will be used to try and fetch the room hierarchy. Must be a tuple so
|
|
||||||
that it can be hashed by the `RoomSummaryHandler._pagination_response_cache`.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The JSON hierarchy dictionary.
|
The JSON hierarchy dictionary.
|
||||||
@ -174,7 +162,6 @@ class RoomSummaryHandler:
|
|||||||
max_depth,
|
max_depth,
|
||||||
limit,
|
limit,
|
||||||
from_token,
|
from_token,
|
||||||
remote_room_hosts,
|
|
||||||
),
|
),
|
||||||
self._get_room_hierarchy,
|
self._get_room_hierarchy,
|
||||||
requester.user.to_string(),
|
requester.user.to_string(),
|
||||||
@ -183,7 +170,6 @@ class RoomSummaryHandler:
|
|||||||
max_depth,
|
max_depth,
|
||||||
limit,
|
limit,
|
||||||
from_token,
|
from_token,
|
||||||
remote_room_hosts,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _get_room_hierarchy(
|
async def _get_room_hierarchy(
|
||||||
@ -194,7 +180,6 @@ class RoomSummaryHandler:
|
|||||||
max_depth: Optional[int] = None,
|
max_depth: Optional[int] = None,
|
||||||
limit: Optional[int] = None,
|
limit: Optional[int] = None,
|
||||||
from_token: Optional[str] = None,
|
from_token: Optional[str] = None,
|
||||||
remote_room_hosts: Optional[Tuple[str, ...]] = None,
|
|
||||||
) -> JsonDict:
|
) -> JsonDict:
|
||||||
"""See docstring for SpaceSummaryHandler.get_room_hierarchy."""
|
"""See docstring for SpaceSummaryHandler.get_room_hierarchy."""
|
||||||
|
|
||||||
@ -214,7 +199,7 @@ class RoomSummaryHandler:
|
|||||||
|
|
||||||
if not local_room:
|
if not local_room:
|
||||||
room_hierarchy = await self._summarize_remote_room_hierarchy(
|
room_hierarchy = await self._summarize_remote_room_hierarchy(
|
||||||
_RoomQueueEntry(requested_room_id, remote_room_hosts or ()),
|
_RoomQueueEntry(requested_room_id, ()),
|
||||||
False,
|
False,
|
||||||
)
|
)
|
||||||
root_room_entry = room_hierarchy[0]
|
root_room_entry = room_hierarchy[0]
|
||||||
@ -255,7 +240,7 @@ class RoomSummaryHandler:
|
|||||||
processed_rooms = set(pagination_session["processed_rooms"])
|
processed_rooms = set(pagination_session["processed_rooms"])
|
||||||
else:
|
else:
|
||||||
# The queue of rooms to process, the next room is last on the stack.
|
# The queue of rooms to process, the next room is last on the stack.
|
||||||
room_queue = [_RoomQueueEntry(requested_room_id, remote_room_hosts or ())]
|
room_queue = [_RoomQueueEntry(requested_room_id, ())]
|
||||||
|
|
||||||
# Rooms we have already processed.
|
# Rooms we have already processed.
|
||||||
processed_rooms = set()
|
processed_rooms = set()
|
||||||
@ -716,7 +701,7 @@ class RoomSummaryHandler:
|
|||||||
# The API doesn't return the room version so assume that a
|
# The API doesn't return the room version so assume that a
|
||||||
# join rule of knock is valid.
|
# join rule of knock is valid.
|
||||||
if (
|
if (
|
||||||
room.get("join_rule", JoinRules.PUBLIC)
|
room.get("join_rule")
|
||||||
in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
|
in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
|
||||||
or room.get("world_readable") is True
|
or room.get("world_readable") is True
|
||||||
):
|
):
|
||||||
|
@ -124,7 +124,7 @@ class SamlHandler:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Since SAML sessions timeout it is useful to log when they were created.
|
# Since SAML sessions timeout it is useful to log when they were created.
|
||||||
logger.info("Initiating a new SAML session: %s", reqid)
|
logger.info("Initiating a new SAML session: %s" % (reqid,))
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
self._outstanding_requests_dict[reqid] = Saml2SessionData(
|
self._outstanding_requests_dict[reqid] = Saml2SessionData(
|
||||||
|
@ -238,7 +238,7 @@ class SendEmailHandler:
|
|||||||
multipart_msg.attach(text_part)
|
multipart_msg.attach(text_part)
|
||||||
multipart_msg.attach(html_part)
|
multipart_msg.attach(html_part)
|
||||||
|
|
||||||
logger.info("Sending email to %s", email_address)
|
logger.info("Sending email to %s" % email_address)
|
||||||
|
|
||||||
await self._sendmail(
|
await self._sendmail(
|
||||||
self._reactor,
|
self._reactor,
|
||||||
|
@ -23,7 +23,6 @@ from typing import (
|
|||||||
List,
|
List,
|
||||||
Literal,
|
Literal,
|
||||||
Mapping,
|
Mapping,
|
||||||
MutableMapping,
|
|
||||||
Optional,
|
Optional,
|
||||||
Set,
|
Set,
|
||||||
Tuple,
|
Tuple,
|
||||||
@ -50,7 +49,6 @@ from synapse.storage.databases.main.state import (
|
|||||||
Sentinel as StateSentinel,
|
Sentinel as StateSentinel,
|
||||||
)
|
)
|
||||||
from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
|
from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
|
||||||
from synapse.storage.invite_rule import InviteRule
|
|
||||||
from synapse.storage.roommember import (
|
from synapse.storage.roommember import (
|
||||||
RoomsForUser,
|
RoomsForUser,
|
||||||
RoomsForUserSlidingSync,
|
RoomsForUserSlidingSync,
|
||||||
@ -74,7 +72,6 @@ from synapse.types.handlers.sliding_sync import (
|
|||||||
SlidingSyncResult,
|
SlidingSyncResult,
|
||||||
)
|
)
|
||||||
from synapse.types.state import StateFilter
|
from synapse.types.state import StateFilter
|
||||||
from synapse.util import MutableOverlayMapping
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
@ -247,13 +244,11 @@ class SlidingSyncRoomLists:
|
|||||||
# Note: this won't include rooms the user has left themselves. We add back
|
# Note: this won't include rooms the user has left themselves. We add back
|
||||||
# `newly_left` rooms below. This is more efficient than fetching all rooms and
|
# `newly_left` rooms below. This is more efficient than fetching all rooms and
|
||||||
# then filtering out the old left rooms.
|
# then filtering out the old left rooms.
|
||||||
room_membership_for_user_map: MutableMapping[str, RoomsForUserSlidingSync] = (
|
room_membership_for_user_map = (
|
||||||
MutableOverlayMapping(
|
|
||||||
await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots(
|
await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots(
|
||||||
user_id
|
user_id
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
# To play nice with the rewind logic below, we need to go fetch the rooms the
|
# To play nice with the rewind logic below, we need to go fetch the rooms the
|
||||||
# user has left themselves but only if it changed after the `to_token`.
|
# user has left themselves but only if it changed after the `to_token`.
|
||||||
#
|
#
|
||||||
@ -272,26 +267,32 @@ class SlidingSyncRoomLists:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
if self_leave_room_membership_for_user_map:
|
if self_leave_room_membership_for_user_map:
|
||||||
|
# FIXME: It would be nice to avoid this copy but since
|
||||||
|
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
|
||||||
|
# can't return a mutable value like a `dict`. We make the copy to get a
|
||||||
|
# mutable dict that we can change. We try to only make a copy when necessary
|
||||||
|
# (if we actually need to change something) as in most cases, the logic
|
||||||
|
# doesn't need to run.
|
||||||
|
room_membership_for_user_map = dict(room_membership_for_user_map)
|
||||||
room_membership_for_user_map.update(self_leave_room_membership_for_user_map)
|
room_membership_for_user_map.update(self_leave_room_membership_for_user_map)
|
||||||
|
|
||||||
# Remove invites from ignored users
|
# Remove invites from ignored users
|
||||||
ignored_users = await self.store.ignored_users(user_id)
|
ignored_users = await self.store.ignored_users(user_id)
|
||||||
invite_config = await self.store.get_invite_config_for_user(user_id)
|
|
||||||
if ignored_users:
|
if ignored_users:
|
||||||
|
# FIXME: It would be nice to avoid this copy but since
|
||||||
|
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
|
||||||
|
# can't return a mutable value like a `dict`. We make the copy to get a
|
||||||
|
# mutable dict that we can change. We try to only make a copy when necessary
|
||||||
|
# (if we actually need to change something) as in most cases, the logic
|
||||||
|
# doesn't need to run.
|
||||||
|
room_membership_for_user_map = dict(room_membership_for_user_map)
|
||||||
# Make a copy so we don't run into an error: `dictionary changed size during
|
# Make a copy so we don't run into an error: `dictionary changed size during
|
||||||
# iteration`, when we remove items
|
# iteration`, when we remove items
|
||||||
for room_id in list(room_membership_for_user_map.keys()):
|
for room_id in list(room_membership_for_user_map.keys()):
|
||||||
room_for_user_sliding_sync = room_membership_for_user_map[room_id]
|
room_for_user_sliding_sync = room_membership_for_user_map[room_id]
|
||||||
if (
|
if (
|
||||||
room_for_user_sliding_sync.membership == Membership.INVITE
|
room_for_user_sliding_sync.membership == Membership.INVITE
|
||||||
and room_for_user_sliding_sync.sender
|
and room_for_user_sliding_sync.sender in ignored_users
|
||||||
and (
|
|
||||||
room_for_user_sliding_sync.sender in ignored_users
|
|
||||||
or invite_config.get_invite_rule(
|
|
||||||
room_for_user_sliding_sync.sender
|
|
||||||
)
|
|
||||||
== InviteRule.IGNORE
|
|
||||||
)
|
|
||||||
):
|
):
|
||||||
room_membership_for_user_map.pop(room_id, None)
|
room_membership_for_user_map.pop(room_id, None)
|
||||||
|
|
||||||
@ -306,6 +307,13 @@ class SlidingSyncRoomLists:
|
|||||||
sync_config.user, room_membership_for_user_map, to_token=to_token
|
sync_config.user, room_membership_for_user_map, to_token=to_token
|
||||||
)
|
)
|
||||||
if changes:
|
if changes:
|
||||||
|
# FIXME: It would be nice to avoid this copy but since
|
||||||
|
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
|
||||||
|
# can't return a mutable value like a `dict`. We make the copy to get a
|
||||||
|
# mutable dict that we can change. We try to only make a copy when necessary
|
||||||
|
# (if we actually need to change something) as in most cases, the logic
|
||||||
|
# doesn't need to run.
|
||||||
|
room_membership_for_user_map = dict(room_membership_for_user_map)
|
||||||
for room_id, change in changes.items():
|
for room_id, change in changes.items():
|
||||||
if change is None:
|
if change is None:
|
||||||
# Remove rooms that the user joined after the `to_token`
|
# Remove rooms that the user joined after the `to_token`
|
||||||
@ -347,6 +355,13 @@ class SlidingSyncRoomLists:
|
|||||||
newly_left_room_map.keys() - room_membership_for_user_map.keys()
|
newly_left_room_map.keys() - room_membership_for_user_map.keys()
|
||||||
)
|
)
|
||||||
if missing_newly_left_rooms:
|
if missing_newly_left_rooms:
|
||||||
|
# FIXME: It would be nice to avoid this copy but since
|
||||||
|
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
|
||||||
|
# can't return a mutable value like a `dict`. We make the copy to get a
|
||||||
|
# mutable dict that we can change. We try to only make a copy when necessary
|
||||||
|
# (if we actually need to change something) as in most cases, the logic
|
||||||
|
# doesn't need to run.
|
||||||
|
room_membership_for_user_map = dict(room_membership_for_user_map)
|
||||||
for room_id in missing_newly_left_rooms:
|
for room_id in missing_newly_left_rooms:
|
||||||
newly_left_room_for_user = newly_left_room_map[room_id]
|
newly_left_room_for_user = newly_left_room_map[room_id]
|
||||||
# This should be a given
|
# This should be a given
|
||||||
@ -437,10 +452,6 @@ class SlidingSyncRoomLists:
|
|||||||
else:
|
else:
|
||||||
room_membership_for_user_map.pop(room_id, None)
|
room_membership_for_user_map.pop(room_id, None)
|
||||||
|
|
||||||
# Remove any rooms that we globally exclude from sync.
|
|
||||||
for room_id in self.rooms_to_exclude_globally:
|
|
||||||
room_membership_for_user_map.pop(room_id, None)
|
|
||||||
|
|
||||||
dm_room_ids = await self._get_dm_rooms_for_user(user_id)
|
dm_room_ids = await self._get_dm_rooms_for_user(user_id)
|
||||||
|
|
||||||
if sync_config.lists:
|
if sync_config.lists:
|
||||||
@ -557,6 +568,14 @@ class SlidingSyncRoomLists:
|
|||||||
|
|
||||||
if sync_config.room_subscriptions:
|
if sync_config.room_subscriptions:
|
||||||
with start_active_span("assemble_room_subscriptions"):
|
with start_active_span("assemble_room_subscriptions"):
|
||||||
|
# FIXME: It would be nice to avoid this copy but since
|
||||||
|
# `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it
|
||||||
|
# can't return a mutable value like a `dict`. We make the copy to get a
|
||||||
|
# mutable dict that we can change. We try to only make a copy when necessary
|
||||||
|
# (if we actually need to change something) as in most cases, the logic
|
||||||
|
# doesn't need to run.
|
||||||
|
room_membership_for_user_map = dict(room_membership_for_user_map)
|
||||||
|
|
||||||
# Find which rooms are partially stated and may need to be filtered out
|
# Find which rooms are partially stated and may need to be filtered out
|
||||||
# depending on the `required_state` requested (see below).
|
# depending on the `required_state` requested (see below).
|
||||||
partial_state_rooms = await self.store.get_partial_rooms()
|
partial_state_rooms = await self.store.get_partial_rooms()
|
||||||
|
@ -1230,16 +1230,12 @@ class SsoHandler:
|
|||||||
if expected_user_id is not None and user_id != expected_user_id:
|
if expected_user_id is not None and user_id != expected_user_id:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Received a logout notification from SSO provider "
|
"Received a logout notification from SSO provider "
|
||||||
"%r for the user %r, but with "
|
f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
|
||||||
"a session ID (%r) which belongs to "
|
f"a session ID ({auth_provider_session_id!r}) which belongs to "
|
||||||
"%r. This may happen when the SSO provider user mapper "
|
f"{user_id!r}. This may happen when the SSO provider user mapper "
|
||||||
"uses something else than the standard attribute as mapping ID. "
|
"uses something else than the standard attribute as mapping ID. "
|
||||||
"For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
|
"For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
|
||||||
"in the provider config if that is the case.",
|
"in the provider config if that is the case."
|
||||||
auth_provider_id,
|
|
||||||
expected_user_id,
|
|
||||||
auth_provider_session_id,
|
|
||||||
user_id,
|
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -66,7 +66,6 @@ from synapse.logging.opentracing import (
|
|||||||
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
|
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
|
||||||
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
|
||||||
from synapse.storage.databases.main.stream import PaginateFunction
|
from synapse.storage.databases.main.stream import PaginateFunction
|
||||||
from synapse.storage.invite_rule import InviteRule
|
|
||||||
from synapse.storage.roommember import MemberSummary
|
from synapse.storage.roommember import MemberSummary
|
||||||
from synapse.types import (
|
from synapse.types import (
|
||||||
DeviceListUpdates,
|
DeviceListUpdates,
|
||||||
@ -2550,7 +2549,6 @@ class SyncHandler:
|
|||||||
room_entries: List[RoomSyncResultBuilder] = []
|
room_entries: List[RoomSyncResultBuilder] = []
|
||||||
invited: List[InvitedSyncResult] = []
|
invited: List[InvitedSyncResult] = []
|
||||||
knocked: List[KnockedSyncResult] = []
|
knocked: List[KnockedSyncResult] = []
|
||||||
invite_config = await self.store.get_invite_config_for_user(user_id)
|
|
||||||
for room_id, events in mem_change_events_by_room_id.items():
|
for room_id, events in mem_change_events_by_room_id.items():
|
||||||
# The body of this loop will add this room to at least one of the five lists
|
# The body of this loop will add this room to at least one of the five lists
|
||||||
# above. Things get messy if you've e.g. joined, left, joined then left the
|
# above. Things get messy if you've e.g. joined, left, joined then left the
|
||||||
@ -2633,11 +2631,7 @@ class SyncHandler:
|
|||||||
# Only bother if we're still currently invited
|
# Only bother if we're still currently invited
|
||||||
should_invite = last_non_join.membership == Membership.INVITE
|
should_invite = last_non_join.membership == Membership.INVITE
|
||||||
if should_invite:
|
if should_invite:
|
||||||
if (
|
if last_non_join.sender not in ignored_users:
|
||||||
last_non_join.sender not in ignored_users
|
|
||||||
and invite_config.get_invite_rule(last_non_join.sender)
|
|
||||||
!= InviteRule.IGNORE
|
|
||||||
):
|
|
||||||
invite_room_sync = InvitedSyncResult(room_id, invite=last_non_join)
|
invite_room_sync = InvitedSyncResult(room_id, invite=last_non_join)
|
||||||
if invite_room_sync:
|
if invite_room_sync:
|
||||||
invited.append(invite_room_sync)
|
invited.append(invite_room_sync)
|
||||||
@ -2792,7 +2786,6 @@ class SyncHandler:
|
|||||||
membership_list=Membership.LIST,
|
membership_list=Membership.LIST,
|
||||||
excluded_rooms=sync_result_builder.excluded_room_ids,
|
excluded_rooms=sync_result_builder.excluded_room_ids,
|
||||||
)
|
)
|
||||||
invite_config = await self.store.get_invite_config_for_user(user_id)
|
|
||||||
|
|
||||||
room_entries = []
|
room_entries = []
|
||||||
invited = []
|
invited = []
|
||||||
@ -2818,8 +2811,6 @@ class SyncHandler:
|
|||||||
elif event.membership == Membership.INVITE:
|
elif event.membership == Membership.INVITE:
|
||||||
if event.sender in ignored_users:
|
if event.sender in ignored_users:
|
||||||
continue
|
continue
|
||||||
if invite_config.get_invite_rule(event.sender) == InviteRule.IGNORE:
|
|
||||||
continue
|
|
||||||
invite = await self.store.get_event(event.event_id)
|
invite = await self.store.get_event(event.event_id)
|
||||||
invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite))
|
invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite))
|
||||||
elif event.membership == Membership.KNOCK:
|
elif event.membership == Membership.KNOCK:
|
||||||
@ -3074,10 +3065,8 @@ class SyncHandler:
|
|||||||
if batch.limited and since_token:
|
if batch.limited and since_token:
|
||||||
user_id = sync_result_builder.sync_config.user.to_string()
|
user_id = sync_result_builder.sync_config.user.to_string()
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Incremental gappy sync of %s for user %s with %d state events",
|
"Incremental gappy sync of %s for user %s with %d state events"
|
||||||
room_id,
|
% (room_id, user_id, len(state))
|
||||||
user_id,
|
|
||||||
len(state),
|
|
||||||
)
|
)
|
||||||
elif room_builder.rtype == "archived":
|
elif room_builder.rtype == "archived":
|
||||||
archived_room_sync = ArchivedSyncResult(
|
archived_room_sync = ArchivedSyncResult(
|
||||||
|
@ -749,9 +749,10 @@ class UserDirectoryHandler(StateDeltasHandler):
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception(
|
logger.error(
|
||||||
"Failed to refresh profile for %r due to unhandled exception",
|
"Failed to refresh profile for %r due to unhandled exception",
|
||||||
user_id,
|
user_id,
|
||||||
|
exc_info=True,
|
||||||
)
|
)
|
||||||
await self.store.set_remote_user_profile_in_user_dir_stale(
|
await self.store.set_remote_user_profile_in_user_dir_stale(
|
||||||
user_id,
|
user_id,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user