mirror of
https://github.com/element-hq/synapse.git
synced 2025-07-04 00:00:27 -04:00
Compare commits
16 Commits
170f8050db
...
0ae115c3cd
Author | SHA1 | Date | |
---|---|---|---|
|
0ae115c3cd | ||
|
cc8da2c5ed | ||
|
c17fd947f3 | ||
|
24bcdb3f3c | ||
|
e3ed93adf3 | ||
|
214ac2f005 | ||
|
c471e84697 | ||
|
291880012f | ||
|
a2bee2f255 | ||
|
3878699df7 | ||
|
b35c6483d5 | ||
|
bfb3a6e700 | ||
|
8afea3d51d | ||
|
db710cf29b | ||
|
cb75484734 | ||
|
40b647d1cd |
4
.github/workflows/release-artifacts.yml
vendored
4
.github/workflows/release-artifacts.yml
vendored
@ -111,7 +111,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
os: [ubuntu-24.04, macos-13]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
@ -139,7 +139,7 @@ jobs:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.23.0
|
||||
run: python -m pip install cibuildwheel==3.0.0
|
||||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
18
CHANGES.md
18
CHANGES.md
@ -1,3 +1,21 @@
|
||||
# Synapse 1.133.0 (2025-07-01)
|
||||
|
||||
Pre-built wheels are now built using the [manylinux_2_28](https://github.com/pypa/manylinux#manylinux_2_28-almalinux-8-based) base, which is expected to be compatible with distros using glibc 2.28 or later, including:
|
||||
|
||||
- Debian 10+
|
||||
- Ubuntu 18.10+
|
||||
- Fedora 29+
|
||||
- CentOS/RHEL 8+
|
||||
|
||||
Previously, wheels were built using the [manylinux2014](https://github.com/pypa/manylinux#manylinux2014-centos-7-based-glibc-217) base, which was expected to be compatible with distros using glibc 2.17 or later.
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Bump `cibuildwheel` to 3.0.0 to fix the `manylinux` wheel builds. ([\#18615](https://github.com/element-hq/synapse/issues/18615))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.133.0rc1 (2025-06-24)
|
||||
|
||||
### Features
|
||||
|
1
changelog.d/18070.feature
Normal file
1
changelog.d/18070.feature
Normal file
@ -0,0 +1 @@
|
||||
Support for [MSC4235](https://github.com/matrix-org/matrix-spec-proposals/pull/4235): via query param for hierarchy endpoint. Contributed by Krishan (@kfiven).
|
1
changelog.d/18196.feature
Normal file
1
changelog.d/18196.feature
Normal file
@ -0,0 +1 @@
|
||||
Add `forget_forced_upon_leave` capability as per [MSC4267](https://github.com/matrix-org/matrix-spec-proposals/pull/4267).
|
1
changelog.d/18418.removal
Normal file
1
changelog.d/18418.removal
Normal file
@ -0,0 +1 @@
|
||||
Stop adding the "origin" field to newly-created events (PDUs).
|
1
changelog.d/18519.doc
Normal file
1
changelog.d/18519.doc
Normal file
@ -0,0 +1 @@
|
||||
Fix documentation of the Delete Room Admin API's status field.
|
1
changelog.d/18562.feature
Normal file
1
changelog.d/18562.feature
Normal file
@ -0,0 +1 @@
|
||||
Add `msc4133_key_allowlist` experimental option to configure a list of custom profile keys that users may set.
|
1
changelog.d/18573.misc
Normal file
1
changelog.d/18573.misc
Normal file
@ -0,0 +1 @@
|
||||
Improve docstring on `simple_upsert_many`.
|
1
changelog.d/18582.bugfix
Normal file
1
changelog.d/18582.bugfix
Normal file
@ -0,0 +1 @@
|
||||
Improve performance of device deletion by adding missing index.
|
1
changelog.d/18602.misc
Normal file
1
changelog.d/18602.misc
Normal file
@ -0,0 +1 @@
|
||||
Speed up bulk device deletion.
|
1
changelog.d/18605.bugfix
Normal file
1
changelog.d/18605.bugfix
Normal file
@ -0,0 +1 @@
|
||||
Ensure policy servers are not asked to scan policy server change events, allowing rooms to disable the use of a policy server while the policy server is down.
|
1
changelog.d/18625.misc
Normal file
1
changelog.d/18625.misc
Normal file
@ -0,0 +1 @@
|
||||
Log the room ID we're purging state for.
|
@ -45,6 +45,10 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None:
|
||||
colors = {"red", "green", "blue", "yellow", "purple"}
|
||||
|
||||
for pdu in pdus:
|
||||
# TODO: The "origin" field has since been removed from events generated
|
||||
# by Synapse. We should consider removing it here as well but since this
|
||||
# is part of `contrib/`, it is left for the community to revise and ensure things
|
||||
# still work correctly.
|
||||
origins.add(pdu.get("origin"))
|
||||
|
||||
color_map = {color: color for color in colors if color in origins}
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.133.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.133.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jul 2025 13:13:24 +0000
|
||||
|
||||
matrix-synapse-py3 (1.133.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.133.0rc1.
|
||||
|
@ -117,7 +117,6 @@ It returns a JSON body like the following:
|
||||
"hashes": {
|
||||
"sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw"
|
||||
},
|
||||
"origin": "matrix.org",
|
||||
"origin_server_ts": 1592291711430,
|
||||
"prev_events": [
|
||||
"$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M"
|
||||
|
@ -806,7 +806,7 @@ A response body like the following is returned:
|
||||
}, {
|
||||
"delete_id": "delete_id2",
|
||||
"room_id": "!roomid:example.com",
|
||||
"status": "purging",
|
||||
"status": "active",
|
||||
"shutdown_room": {
|
||||
"kicked_users": [
|
||||
"@foobar:example.com"
|
||||
@ -843,7 +843,7 @@ A response body like the following is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "purging",
|
||||
"status": "active",
|
||||
"delete_id": "bHkCNQpHqOaFhPtK",
|
||||
"room_id": "!roomid:example.com",
|
||||
"shutdown_room": {
|
||||
@ -876,8 +876,8 @@ The following fields are returned in the JSON response body:
|
||||
- `delete_id` - The ID for this purge
|
||||
- `room_id` - The ID of the room being deleted
|
||||
- `status` - The status will be one of:
|
||||
- `shutting_down` - The process is removing users from the room.
|
||||
- `purging` - The process is purging the room and event data from database.
|
||||
- `scheduled` - The deletion is waiting to be started
|
||||
- `active` - The process is purging the room and event data from database.
|
||||
- `complete` - The process has completed successfully.
|
||||
- `failed` - The process is aborted, an error has occurred.
|
||||
- `error` - A string that shows an error message if `status` is `failed`.
|
||||
|
@ -101,7 +101,7 @@ module-name = "synapse.synapse_rust"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.133.0rc1"
|
||||
version = "1.133.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
import enum
|
||||
from functools import cache
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
from typing import TYPE_CHECKING, Any, List, Optional
|
||||
|
||||
import attr
|
||||
import attr.validators
|
||||
@ -552,6 +552,18 @@ class ExperimentalConfig(Config):
|
||||
# MSC4133: Custom profile fields
|
||||
self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False)
|
||||
|
||||
self.msc4133_key_allowlist: Optional[List[str]] = experimental.get(
|
||||
"msc4133_key_allowlist"
|
||||
)
|
||||
if self.msc4133_key_allowlist is not None:
|
||||
if not isinstance(self.msc4133_key_allowlist, list) or not all(
|
||||
isinstance(k, str) for k in self.msc4133_key_allowlist
|
||||
):
|
||||
raise ConfigError(
|
||||
"experimental_features.msc4133_key_allowlist must be a list of strings",
|
||||
("experimental", "msc4133_key_allowlist"),
|
||||
)
|
||||
|
||||
# MSC4210: Remove legacy mentions
|
||||
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
|
||||
|
||||
@ -561,11 +573,17 @@ class ExperimentalConfig(Config):
|
||||
# MSC4076: Add `disable_badge_count`` to pusher configuration
|
||||
self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False)
|
||||
|
||||
# MSC4235: Add `via` param to hierarchy endpoint
|
||||
self.msc4235_enabled: bool = experimental.get("msc4235_enabled", False)
|
||||
|
||||
# MSC4263: Preventing MXID enumeration via key queries
|
||||
self.msc4263_limit_key_queries_to_users_who_share_rooms = experimental.get(
|
||||
"msc4263_limit_key_queries_to_users_who_share_rooms",
|
||||
False,
|
||||
)
|
||||
|
||||
# MSC4267: Automatically forgetting rooms on leave
|
||||
self.msc4267_enabled: bool = experimental.get("msc4267_enabled", False)
|
||||
|
||||
# MSC4155: Invite filtering
|
||||
self.msc4155_enabled: bool = experimental.get("msc4155_enabled", False)
|
||||
|
@ -85,4 +85,4 @@ class RoomConfig(Config):
|
||||
|
||||
# When enabled, users will forget rooms when they leave them, either via a
|
||||
# leave, kick or ban.
|
||||
self.forget_on_leave = config.get("forget_rooms_on_leave", False)
|
||||
self.forget_on_leave: bool = config.get("forget_rooms_on_leave", False)
|
||||
|
@ -208,7 +208,6 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
depth: DictProperty[int] = DictProperty("depth")
|
||||
content: DictProperty[JsonDict] = DictProperty("content")
|
||||
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
|
||||
origin: DictProperty[str] = DictProperty("origin")
|
||||
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
|
||||
room_id: DictProperty[str] = DictProperty("room_id")
|
||||
sender: DictProperty[str] = DictProperty("sender")
|
||||
|
@ -302,8 +302,8 @@ def create_local_event_from_event_dict(
|
||||
event_dict: JsonDict,
|
||||
internal_metadata_dict: Optional[JsonDict] = None,
|
||||
) -> EventBase:
|
||||
"""Takes a fully formed event dict, ensuring that fields like `origin`
|
||||
and `origin_server_ts` have correct values for a locally produced event,
|
||||
"""Takes a fully formed event dict, ensuring that fields like
|
||||
`origin_server_ts` have correct values for a locally produced event,
|
||||
then signs and hashes it.
|
||||
"""
|
||||
|
||||
@ -319,7 +319,6 @@ def create_local_event_from_event_dict(
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
event_dict.setdefault("origin_server_ts", time_now)
|
||||
|
||||
event_dict.setdefault("unsigned", {})
|
||||
|
@ -67,7 +67,6 @@ class EventValidator:
|
||||
"auth_events",
|
||||
"content",
|
||||
"hashes",
|
||||
"origin",
|
||||
"prev_events",
|
||||
"sender",
|
||||
"type",
|
||||
@ -77,13 +76,6 @@ class EventValidator:
|
||||
if k not in event:
|
||||
raise SynapseError(400, "Event does not have key %s" % (k,))
|
||||
|
||||
# Check that the following keys have string values
|
||||
event_strings = ["origin"]
|
||||
|
||||
for s in event_strings:
|
||||
if not isinstance(getattr(event, s), str):
|
||||
raise SynapseError(400, "'%s' not a string type" % (s,))
|
||||
|
||||
# Depending on the room version, ensure the data is spec compliant JSON.
|
||||
if event.room_version.strict_canonicaljson:
|
||||
validate_canonicaljson(event.get_pdu_json())
|
||||
|
@ -322,8 +322,7 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB
|
||||
SynapseError: if the pdu is missing required fields or is otherwise
|
||||
not a valid matrix event
|
||||
"""
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||
# origin, etc etc)
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender, etc.)
|
||||
assert_params_in_dict(pdu_json, ("type", "depth"))
|
||||
|
||||
# Strip any unauthorized values from "unsigned" if they exist
|
||||
|
@ -76,7 +76,7 @@ from synapse.storage.databases.main.registration import (
|
||||
LoginTokenLookupResult,
|
||||
LoginTokenReused,
|
||||
)
|
||||
from synapse.types import JsonDict, Requester, UserID
|
||||
from synapse.types import JsonDict, Requester, StrCollection, UserID
|
||||
from synapse.util import stringutils as stringutils
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
@ -1547,6 +1547,31 @@ class AuthHandler:
|
||||
user_id, (token_id for _, token_id, _ in tokens_and_devices)
|
||||
)
|
||||
|
||||
async def delete_access_tokens_for_devices(
|
||||
self,
|
||||
user_id: str,
|
||||
device_ids: StrCollection,
|
||||
) -> None:
|
||||
"""Invalidate access tokens for the devices
|
||||
|
||||
Args:
|
||||
user_id: ID of user the tokens belong to
|
||||
device_ids: ID of device the tokens are associated with.
|
||||
If None, tokens associated with any device (or no device) will
|
||||
be deleted
|
||||
"""
|
||||
tokens_and_devices = await self.store.user_delete_access_tokens_for_devices(
|
||||
user_id,
|
||||
device_ids,
|
||||
)
|
||||
|
||||
# see if any modules want to know about this
|
||||
if self.password_auth_provider.on_logged_out_callbacks:
|
||||
for token, _, device_id in tokens_and_devices:
|
||||
await self.password_auth_provider.on_logged_out(
|
||||
user_id=user_id, device_id=device_id, access_token=token
|
||||
)
|
||||
|
||||
async def add_threepid(
|
||||
self, user_id: str, medium: str, address: str, validated_at: int
|
||||
) -> None:
|
||||
|
@ -671,12 +671,12 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
except_device_id: optional device id which should not be deleted
|
||||
"""
|
||||
device_map = await self.store.get_devices_by_user(user_id)
|
||||
device_ids = list(device_map)
|
||||
if except_device_id is not None:
|
||||
device_ids = [d for d in device_ids if d != except_device_id]
|
||||
await self.delete_devices(user_id, device_ids)
|
||||
device_map.pop(except_device_id, None)
|
||||
user_device_ids = device_map.keys()
|
||||
await self.delete_devices(user_id, user_device_ids)
|
||||
|
||||
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||
async def delete_devices(self, user_id: str, device_ids: StrCollection) -> None:
|
||||
"""Delete several devices
|
||||
|
||||
Args:
|
||||
@ -695,17 +695,10 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
else:
|
||||
raise
|
||||
|
||||
# Delete data specific to each device. Not optimised as it is not
|
||||
# considered as part of a critical path.
|
||||
for device_id in device_ids:
|
||||
await self._auth_handler.delete_access_tokens_for_user(
|
||||
user_id, device_id=device_id
|
||||
)
|
||||
await self.store.delete_e2e_keys_by_device(
|
||||
user_id=user_id, device_id=device_id
|
||||
)
|
||||
|
||||
if self.hs.config.experimental.msc3890_enabled:
|
||||
# Delete data specific to each device. Not optimised as its an
|
||||
# experimental MSC.
|
||||
if self.hs.config.experimental.msc3890_enabled:
|
||||
for device_id in device_ids:
|
||||
# Remove any local notification settings for this device in accordance
|
||||
# with MSC3890.
|
||||
await self._account_data_handler.remove_account_data_for_user(
|
||||
@ -713,6 +706,13 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
f"org.matrix.msc3890.local_notification_settings.{device_id}",
|
||||
)
|
||||
|
||||
# If we're deleting a lot of devices, a bunch of them may not have any
|
||||
# to-device messages queued up. We filter those out to avoid scheduling
|
||||
# unnecessary tasks.
|
||||
devices_with_messages = await self.store.get_devices_with_messages(
|
||||
user_id, device_ids
|
||||
)
|
||||
for device_id in devices_with_messages:
|
||||
# Delete device messages asynchronously and in batches using the task scheduler
|
||||
# We specify an upper stream id to avoid deleting non delivered messages
|
||||
# if an user re-uses a device ID.
|
||||
@ -726,6 +726,10 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
},
|
||||
)
|
||||
|
||||
await self._auth_handler.delete_access_tokens_for_devices(
|
||||
user_id, device_ids=device_ids
|
||||
)
|
||||
|
||||
# Pushers are deleted after `delete_access_tokens_for_user` is called so that
|
||||
# modules using `on_logged_out` hook can use them if needed.
|
||||
await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
|
||||
@ -819,10 +823,11 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# This should only happen if there are no updates, so we bail.
|
||||
return
|
||||
|
||||
for device_id in device_ids:
|
||||
logger.debug(
|
||||
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
for device_id in device_ids:
|
||||
logger.debug(
|
||||
"Notifying about update %r/%r, ID: %r", user_id, device_id, position
|
||||
)
|
||||
|
||||
# specify the user ID too since the user should always get their own device list
|
||||
# updates, even if they aren't in any rooms.
|
||||
@ -922,9 +927,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
# can't call self.delete_device because that will clobber the
|
||||
# access token so call the storage layer directly
|
||||
await self.store.delete_devices(user_id, [old_device_id])
|
||||
await self.store.delete_e2e_keys_by_device(
|
||||
user_id=user_id, device_id=old_device_id
|
||||
)
|
||||
|
||||
# tell everyone that the old device is gone and that the dehydrated
|
||||
# device has a new display name
|
||||
@ -946,7 +948,6 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
raise errors.NotFoundError()
|
||||
|
||||
await self.delete_devices(user_id, [device_id])
|
||||
await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
|
||||
|
||||
@wrap_as_background_process("_handle_new_device_update_async")
|
||||
async def _handle_new_device_update_async(self) -> None:
|
||||
|
@ -481,6 +481,14 @@ class ProfileHandler:
|
||||
if not by_admin and target_user != requester.user:
|
||||
raise AuthError(403, "Cannot set another user's profile")
|
||||
|
||||
allowlist = self.hs.config.experimental.msc4133_key_allowlist
|
||||
if allowlist is not None and field_name not in allowlist:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Changing this profile field is disabled on this server",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
await self.store.set_profile_field(target_user, field_name, new_value)
|
||||
|
||||
# Custom fields do not propagate into the user directory *or* rooms.
|
||||
|
@ -54,6 +54,9 @@ class RoomPolicyHandler:
|
||||
Returns:
|
||||
bool: True if the event is allowed in the room, False otherwise.
|
||||
"""
|
||||
if event.type == "org.matrix.msc4284.policy" and event.state_key is not None:
|
||||
return True # always allow policy server change events
|
||||
|
||||
policy_event = await self._storage_controllers.state.get_current_state_event(
|
||||
event.room_id, "org.matrix.msc4284.policy", ""
|
||||
)
|
||||
|
@ -111,7 +111,15 @@ class RoomSummaryHandler:
|
||||
# If a user tries to fetch the same page multiple times in quick succession,
|
||||
# only process the first attempt and return its result to subsequent requests.
|
||||
self._pagination_response_cache: ResponseCache[
|
||||
Tuple[str, str, bool, Optional[int], Optional[int], Optional[str]]
|
||||
Tuple[
|
||||
str,
|
||||
str,
|
||||
bool,
|
||||
Optional[int],
|
||||
Optional[int],
|
||||
Optional[str],
|
||||
Optional[Tuple[str, ...]],
|
||||
]
|
||||
] = ResponseCache(
|
||||
hs.get_clock(),
|
||||
"get_room_hierarchy",
|
||||
@ -126,6 +134,7 @@ class RoomSummaryHandler:
|
||||
max_depth: Optional[int] = None,
|
||||
limit: Optional[int] = None,
|
||||
from_token: Optional[str] = None,
|
||||
remote_room_hosts: Optional[Tuple[str, ...]] = None,
|
||||
) -> JsonDict:
|
||||
"""
|
||||
Implementation of the room hierarchy C-S API.
|
||||
@ -143,6 +152,9 @@ class RoomSummaryHandler:
|
||||
limit: An optional limit on the number of rooms to return per
|
||||
page. Must be a positive integer.
|
||||
from_token: An optional pagination token.
|
||||
remote_room_hosts: An optional list of remote homeserver server names. If defined,
|
||||
each host will be used to try and fetch the room hierarchy. Must be a tuple so
|
||||
that it can be hashed by the `RoomSummaryHandler._pagination_response_cache`.
|
||||
|
||||
Returns:
|
||||
The JSON hierarchy dictionary.
|
||||
@ -162,6 +174,7 @@ class RoomSummaryHandler:
|
||||
max_depth,
|
||||
limit,
|
||||
from_token,
|
||||
remote_room_hosts,
|
||||
),
|
||||
self._get_room_hierarchy,
|
||||
requester.user.to_string(),
|
||||
@ -170,6 +183,7 @@ class RoomSummaryHandler:
|
||||
max_depth,
|
||||
limit,
|
||||
from_token,
|
||||
remote_room_hosts,
|
||||
)
|
||||
|
||||
async def _get_room_hierarchy(
|
||||
@ -180,6 +194,7 @@ class RoomSummaryHandler:
|
||||
max_depth: Optional[int] = None,
|
||||
limit: Optional[int] = None,
|
||||
from_token: Optional[str] = None,
|
||||
remote_room_hosts: Optional[Tuple[str, ...]] = None,
|
||||
) -> JsonDict:
|
||||
"""See docstring for SpaceSummaryHandler.get_room_hierarchy."""
|
||||
|
||||
@ -199,7 +214,7 @@ class RoomSummaryHandler:
|
||||
|
||||
if not local_room:
|
||||
room_hierarchy = await self._summarize_remote_room_hierarchy(
|
||||
_RoomQueueEntry(requested_room_id, ()),
|
||||
_RoomQueueEntry(requested_room_id, remote_room_hosts or ()),
|
||||
False,
|
||||
)
|
||||
root_room_entry = room_hierarchy[0]
|
||||
@ -240,7 +255,7 @@ class RoomSummaryHandler:
|
||||
processed_rooms = set(pagination_session["processed_rooms"])
|
||||
else:
|
||||
# The queue of rooms to process, the next room is last on the stack.
|
||||
room_queue = [_RoomQueueEntry(requested_room_id, ())]
|
||||
room_queue = [_RoomQueueEntry(requested_room_id, remote_room_hosts or ())]
|
||||
|
||||
# Rooms we have already processed.
|
||||
processed_rooms = set()
|
||||
|
@ -109,6 +109,11 @@ class CapabilitiesRestServlet(RestServlet):
|
||||
"disallowed"
|
||||
] = disallowed
|
||||
|
||||
if self.config.experimental.msc4267_enabled:
|
||||
response["capabilities"]["org.matrix.msc4267.forget_forced_upon_leave"] = {
|
||||
"enabled": self.config.room.forget_on_leave,
|
||||
}
|
||||
|
||||
return HTTPStatus.OK, response
|
||||
|
||||
|
||||
|
@ -1538,6 +1538,7 @@ class RoomHierarchyRestServlet(RestServlet):
|
||||
super().__init__()
|
||||
self._auth = hs.get_auth()
|
||||
self._room_summary_handler = hs.get_room_summary_handler()
|
||||
self.msc4235_enabled = hs.config.experimental.msc4235_enabled
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, room_id: str
|
||||
@ -1547,6 +1548,15 @@ class RoomHierarchyRestServlet(RestServlet):
|
||||
max_depth = parse_integer(request, "max_depth")
|
||||
limit = parse_integer(request, "limit")
|
||||
|
||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||
remote_room_hosts = None
|
||||
if self.msc4235_enabled:
|
||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
||||
via_param = parse_strings_from_args(
|
||||
args, "org.matrix.msc4235.via", required=False
|
||||
)
|
||||
remote_room_hosts = tuple(via_param or [])
|
||||
|
||||
return 200, await self._room_summary_handler.get_room_hierarchy(
|
||||
requester,
|
||||
room_id,
|
||||
@ -1554,6 +1564,7 @@ class RoomHierarchyRestServlet(RestServlet):
|
||||
max_depth=max_depth,
|
||||
limit=limit,
|
||||
from_token=parse_string(request, "from"),
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
)
|
||||
|
||||
|
||||
|
@ -34,6 +34,7 @@ from synapse.metrics.background_process_metrics import wrap_as_background_proces
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.databases import Databases
|
||||
from synapse.types.storage import _BackgroundUpdates
|
||||
from synapse.util.stringutils import shortstr
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -167,6 +168,12 @@ class PurgeEventsStorageController:
|
||||
break
|
||||
|
||||
(room_id, groups_to_sequences) = next_to_delete
|
||||
|
||||
logger.info(
|
||||
"[purge] deleting state groups for room %s: %s",
|
||||
room_id,
|
||||
shortstr(groups_to_sequences.keys(), maxitems=10),
|
||||
)
|
||||
made_progress = await self._delete_state_groups(
|
||||
room_id, groups_to_sequences
|
||||
)
|
||||
|
@ -1478,13 +1478,49 @@ class DatabasePool:
|
||||
"""
|
||||
Upsert, many times.
|
||||
|
||||
This executes a query equivalent to `INSERT INTO ... ON CONFLICT DO UPDATE`,
|
||||
with multiple value rows.
|
||||
The query may use emulated upserts if the database engine does not support upserts,
|
||||
or if the table is currently unsafe to upsert.
|
||||
|
||||
If there are no value columns, this instead generates a `ON CONFLICT DO NOTHING`.
|
||||
|
||||
Args:
|
||||
table: The table to upsert into
|
||||
key_names: The key column names.
|
||||
key_values: A list of each row's key column values.
|
||||
value_names: The value column names
|
||||
value_values: A list of each row's value column values.
|
||||
key_names: The unique key column names. These are the columns used in the ON CONFLICT clause.
|
||||
key_values: A list of each row's key column values, in the same order as `key_names`.
|
||||
value_names: The non-unique value column names
|
||||
value_values: A list of each row's value column values, in the same order as `value_names`.
|
||||
Ignored if value_names is empty.
|
||||
|
||||
Example:
|
||||
```python
|
||||
simple_upsert_many(
|
||||
"mytable",
|
||||
key_names=("room_id", "user_id"),
|
||||
key_values=[
|
||||
("!room1:example.org", "@user1:example.org"),
|
||||
("!room2:example.org", "@user2:example.org"),
|
||||
],
|
||||
value_names=("wombat_count", "is_updated"),
|
||||
value_values=[
|
||||
(42, True),
|
||||
(7, False)
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
gives something equivalent to:
|
||||
|
||||
```sql
|
||||
INSERT INTO mytable (room_id, user_id, wombat_count, is_updated)
|
||||
VALUES
|
||||
('!room1:example.org', '@user1:example.org', 42, True),
|
||||
('!room2:example.org', '@user2:example.org', 7, False)
|
||||
ON CONFLICT DO UPDATE SET
|
||||
wombat_count = EXCLUDED.wombat_count,
|
||||
is_updated = EXCLUDED.is_updated
|
||||
```
|
||||
"""
|
||||
|
||||
# We can autocommit if it safe to upsert
|
||||
@ -1513,6 +1549,8 @@ class DatabasePool:
|
||||
"""
|
||||
Upsert, many times.
|
||||
|
||||
See the documentation for `simple_upsert_many` for examples.
|
||||
|
||||
Args:
|
||||
table: The table to upsert into
|
||||
key_names: The key column names.
|
||||
|
@ -52,10 +52,11 @@ from synapse.storage.database import (
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.util import Duration, json_encoder
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.stringutils import parse_and_validate_server_name
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -1027,6 +1028,40 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
||||
# loop first time we run this.
|
||||
self._clock.sleep(1)
|
||||
|
||||
async def get_devices_with_messages(
|
||||
self, user_id: str, device_ids: StrCollection
|
||||
) -> StrCollection:
|
||||
"""Get the matching device IDs that have messages in the device inbox."""
|
||||
|
||||
def get_devices_with_messages_txn(
|
||||
txn: LoggingTransaction,
|
||||
batch_device_ids: StrCollection,
|
||||
) -> StrCollection:
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "device_id", batch_device_ids
|
||||
)
|
||||
sql = f"""
|
||||
SELECT DISTINCT device_id FROM device_inbox
|
||||
WHERE {clause} AND user_id = ?
|
||||
"""
|
||||
args.append(user_id)
|
||||
txn.execute(sql, args)
|
||||
return {row[0] for row in txn}
|
||||
|
||||
results: Set[str] = set()
|
||||
for batch_device_ids in batch_iter(device_ids, 1000):
|
||||
batch_results = await self.db_pool.runInteraction(
|
||||
"get_devices_with_messages",
|
||||
get_devices_with_messages_txn,
|
||||
batch_device_ids,
|
||||
# We don't need to run in a transaction as it's a single query
|
||||
db_autocommit=True,
|
||||
)
|
||||
|
||||
results.update(batch_results)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
|
||||
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
|
||||
|
@ -282,7 +282,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
|
||||
"count_devices_by_users", count_devices_by_users_txn, user_ids
|
||||
)
|
||||
|
||||
@cached()
|
||||
@cached(tree=True)
|
||||
async def get_device(
|
||||
self, user_id: str, device_id: str
|
||||
) -> Optional[Mapping[str, Any]]:
|
||||
@ -1861,7 +1861,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
)
|
||||
raise StoreError(500, "Problem storing device.")
|
||||
|
||||
async def delete_devices(self, user_id: str, device_ids: List[str]) -> None:
|
||||
async def delete_devices(self, user_id: str, device_ids: StrCollection) -> None:
|
||||
"""Deletes several devices.
|
||||
|
||||
Args:
|
||||
@ -1885,11 +1885,49 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
values=device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream_bulk(
|
||||
txn, self.get_device, [(user_id, device_id) for device_id in device_ids]
|
||||
|
||||
# Also delete associated e2e keys.
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="e2e_device_keys_json",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=device_ids,
|
||||
)
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="e2e_one_time_keys_json",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=device_ids,
|
||||
)
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="dehydrated_devices",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=device_ids,
|
||||
)
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="e2e_fallback_keys_json",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=device_ids,
|
||||
)
|
||||
|
||||
for batch in batch_iter(device_ids, 100):
|
||||
# We're bulk deleting potentially many devices at once, so
|
||||
# let's not invalidate the cache for each device individually.
|
||||
# Instead, we will invalidate the cache for the user as a whole.
|
||||
self._invalidate_cache_and_stream(txn, self.get_device, (user_id,))
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.count_e2e_one_time_keys, (user_id,)
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_e2e_unused_fallback_key_types, (user_id,)
|
||||
)
|
||||
|
||||
for batch in batch_iter(device_ids, 1000):
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_devices", _delete_devices_txn, batch
|
||||
)
|
||||
@ -2061,32 +2099,36 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
||||
context = get_active_span_text_map()
|
||||
|
||||
def add_device_changes_txn(
|
||||
txn: LoggingTransaction, stream_ids: List[int]
|
||||
txn: LoggingTransaction,
|
||||
batch_device_ids: StrCollection,
|
||||
stream_ids: List[int],
|
||||
) -> None:
|
||||
self._add_device_change_to_stream_txn(
|
||||
txn,
|
||||
user_id,
|
||||
device_ids,
|
||||
batch_device_ids,
|
||||
stream_ids,
|
||||
)
|
||||
|
||||
self._add_device_outbound_room_poke_txn(
|
||||
txn,
|
||||
user_id,
|
||||
device_ids,
|
||||
batch_device_ids,
|
||||
room_ids,
|
||||
stream_ids,
|
||||
context,
|
||||
)
|
||||
|
||||
async with self._device_list_id_gen.get_next_mult(
|
||||
len(device_ids)
|
||||
) as stream_ids:
|
||||
await self.db_pool.runInteraction(
|
||||
"add_device_change_to_stream",
|
||||
add_device_changes_txn,
|
||||
stream_ids,
|
||||
)
|
||||
for batch_device_ids in batch_iter(device_ids, 1000):
|
||||
async with self._device_list_id_gen.get_next_mult(
|
||||
len(device_ids)
|
||||
) as stream_ids:
|
||||
await self.db_pool.runInteraction(
|
||||
"add_device_change_to_stream",
|
||||
add_device_changes_txn,
|
||||
batch_device_ids,
|
||||
stream_ids,
|
||||
)
|
||||
|
||||
return stream_ids[-1]
|
||||
|
||||
|
@ -593,7 +593,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
txn, self.count_e2e_one_time_keys, (user_id, device_id)
|
||||
)
|
||||
|
||||
@cached(max_entries=10000)
|
||||
@cached(max_entries=10000, tree=True)
|
||||
async def count_e2e_one_time_keys(
|
||||
self, user_id: str, device_id: str
|
||||
) -> Mapping[str, int]:
|
||||
@ -808,7 +808,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
},
|
||||
)
|
||||
|
||||
@cached(max_entries=10000)
|
||||
@cached(max_entries=10000, tree=True)
|
||||
async def get_e2e_unused_fallback_key_types(
|
||||
self, user_id: str, device_id: str
|
||||
) -> Sequence[str]:
|
||||
@ -1632,46 +1632,6 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
|
||||
log_kv({"message": "Device keys stored."})
|
||||
return True
|
||||
|
||||
async def delete_e2e_keys_by_device(self, user_id: str, device_id: str) -> None:
|
||||
def delete_e2e_keys_by_device_txn(txn: LoggingTransaction) -> None:
|
||||
log_kv(
|
||||
{
|
||||
"message": "Deleting keys for device",
|
||||
"device_id": device_id,
|
||||
"user_id": user_id,
|
||||
}
|
||||
)
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="e2e_device_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="e2e_one_time_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.count_e2e_one_time_keys, (user_id, device_id)
|
||||
)
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="dehydrated_devices",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="e2e_fallback_keys_json",
|
||||
keyvalues={"user_id": user_id, "device_id": device_id},
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id)
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn
|
||||
)
|
||||
|
||||
def _set_e2e_cross_signing_key_txn(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
|
@ -349,6 +349,19 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
where_clause="type = 'm.room.member'",
|
||||
)
|
||||
|
||||
# Added to support efficient reverse lookups on the foreign key
|
||||
# (user_id, device_id) when deleting devices.
|
||||
# We already had a UNIQUE index on these 4 columns but out-of-order
|
||||
# so replace that one.
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
update_name="event_txn_id_device_id_txn_id2",
|
||||
index_name="event_txn_id_device_id_txn_id2",
|
||||
table="event_txn_id_device_id",
|
||||
columns=("user_id", "device_id", "room_id", "txn_id"),
|
||||
unique=True,
|
||||
replaces_index="event_txn_id_device_id_txn_id",
|
||||
)
|
||||
|
||||
def get_un_partial_stated_events_token(self, instance_name: str) -> int:
|
||||
return (
|
||||
self._un_partial_stated_events_stream_id_gen.get_current_token_for_writer(
|
||||
|
@ -40,14 +40,16 @@ from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
make_in_list_sql_clause,
|
||||
)
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.storage.util.id_generators import IdGenerator
|
||||
from synapse.storage.util.sequence import build_sequence_generator
|
||||
from synapse.types import JsonDict, UserID, UserInfo
|
||||
from synapse.types import JsonDict, StrCollection, UserID, UserInfo
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@ -2801,6 +2803,81 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
|
||||
return await self.db_pool.runInteraction("user_delete_access_tokens", f)
|
||||
|
||||
async def user_delete_access_tokens_for_devices(
|
||||
self,
|
||||
user_id: str,
|
||||
device_ids: StrCollection,
|
||||
) -> List[Tuple[str, int, Optional[str]]]:
|
||||
"""
|
||||
Invalidate access and refresh tokens belonging to a user
|
||||
|
||||
Args:
|
||||
user_id: ID of user the tokens belong to
|
||||
device_ids: The devices to delete tokens for.
|
||||
Returns:
|
||||
A tuple of (token, token id, device id) for each of the deleted tokens
|
||||
"""
|
||||
|
||||
def user_delete_access_tokens_for_devices_txn(
|
||||
txn: LoggingTransaction, batch_device_ids: StrCollection
|
||||
) -> List[Tuple[str, int, Optional[str]]]:
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="refresh_tokens",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=batch_device_ids,
|
||||
)
|
||||
|
||||
clause, args = make_in_list_sql_clause(
|
||||
txn.database_engine, "device_id", batch_device_ids
|
||||
)
|
||||
args.append(user_id)
|
||||
|
||||
if self.database_engine.supports_returning:
|
||||
sql = f"""
|
||||
DELETE FROM access_tokens
|
||||
WHERE {clause} AND user_id = ?
|
||||
RETURNING token, id, device_id
|
||||
"""
|
||||
txn.execute(sql, args)
|
||||
tokens_and_devices = txn.fetchall()
|
||||
else:
|
||||
tokens_and_devices = self.db_pool.simple_select_many_txn(
|
||||
txn,
|
||||
table="access_tokens",
|
||||
column="device_id",
|
||||
iterable=batch_device_ids,
|
||||
keyvalues={"user_id": user_id},
|
||||
retcols=("token", "id", "device_id"),
|
||||
)
|
||||
|
||||
self.db_pool.simple_delete_many_txn(
|
||||
txn,
|
||||
table="access_tokens",
|
||||
keyvalues={"user_id": user_id},
|
||||
column="device_id",
|
||||
values=batch_device_ids,
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream_bulk(
|
||||
txn,
|
||||
self.get_user_by_access_token,
|
||||
[(t[0],) for t in tokens_and_devices],
|
||||
)
|
||||
return tokens_and_devices
|
||||
|
||||
results = []
|
||||
for batch_device_ids in batch_iter(device_ids, 1000):
|
||||
tokens_and_devices = await self.db_pool.runInteraction(
|
||||
"user_delete_access_tokens_for_devices",
|
||||
user_delete_access_tokens_for_devices_txn,
|
||||
batch_device_ids,
|
||||
)
|
||||
results.extend(tokens_and_devices)
|
||||
|
||||
return results
|
||||
|
||||
async def delete_access_token(self, access_token: str) -> None:
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
self.db_pool.simple_delete_one_txn(
|
||||
|
@ -0,0 +1,15 @@
|
||||
--
|
||||
-- This file is licensed under the Affero General Public License (AGPL) version 3.
|
||||
--
|
||||
-- Copyright (C) 2025 New Vector, Ltd
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU Affero General Public License as
|
||||
-- published by the Free Software Foundation, either version 3 of the
|
||||
-- License, or (at your option) any later version.
|
||||
--
|
||||
-- See the GNU Affero General Public License for more details:
|
||||
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
|
||||
|
||||
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
|
||||
(9207, 'event_txn_id_device_id_txn_id2', '{}');
|
@ -225,7 +225,7 @@ KNOWN_KEYS = {
|
||||
"depth",
|
||||
"event_id",
|
||||
"hashes",
|
||||
"origin",
|
||||
"origin", # old events were created with an origin field.
|
||||
"origin_server_ts",
|
||||
"prev_events",
|
||||
"room_id",
|
||||
|
@ -48,7 +48,6 @@ class EventSigningTestCase(unittest.TestCase):
|
||||
def test_sign_minimal(self) -> None:
|
||||
event_dict = {
|
||||
"event_id": "$0:domain",
|
||||
"origin": "domain",
|
||||
"origin_server_ts": 1000000,
|
||||
"signatures": {},
|
||||
"type": "X",
|
||||
@ -64,7 +63,7 @@ class EventSigningTestCase(unittest.TestCase):
|
||||
self.assertTrue(hasattr(event, "hashes"))
|
||||
self.assertIn("sha256", event.hashes)
|
||||
self.assertEqual(
|
||||
event.hashes["sha256"], "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI"
|
||||
event.hashes["sha256"], "A6Nco6sqoy18PPfPDVdYvoowfc0PVBk9g9OiyT3ncRM"
|
||||
)
|
||||
|
||||
self.assertTrue(hasattr(event, "signatures"))
|
||||
@ -72,15 +71,14 @@ class EventSigningTestCase(unittest.TestCase):
|
||||
self.assertIn(KEY_NAME, event.signatures["domain"])
|
||||
self.assertEqual(
|
||||
event.signatures[HOSTNAME][KEY_NAME],
|
||||
"2Wptgo4CwmLo/Y8B8qinxApKaCkBG2fjTWB7AbP5Uy+"
|
||||
"aIbygsSdLOFzvdDjww8zUVKCmI02eP9xtyJxc/cLiBA",
|
||||
"PBc48yDVszWB9TRaB/+CZC1B+pDAC10F8zll006j+NN"
|
||||
"fe4PEMWcVuLaG63LFTK9e4rwJE8iLZMPtCKhDTXhpAQ",
|
||||
)
|
||||
|
||||
def test_sign_message(self) -> None:
|
||||
event_dict = {
|
||||
"content": {"body": "Here is the message content"},
|
||||
"event_id": "$0:domain",
|
||||
"origin": "domain",
|
||||
"origin_server_ts": 1000000,
|
||||
"type": "m.room.message",
|
||||
"room_id": "!r:domain",
|
||||
@ -98,7 +96,7 @@ class EventSigningTestCase(unittest.TestCase):
|
||||
self.assertTrue(hasattr(event, "hashes"))
|
||||
self.assertIn("sha256", event.hashes)
|
||||
self.assertEqual(
|
||||
event.hashes["sha256"], "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g"
|
||||
event.hashes["sha256"], "rDCeYBepPlI891h/RkI2/Lkf9bt7u0TxFku4tMs7WKk"
|
||||
)
|
||||
|
||||
self.assertTrue(hasattr(event, "signatures"))
|
||||
@ -106,6 +104,6 @@ class EventSigningTestCase(unittest.TestCase):
|
||||
self.assertIn(KEY_NAME, event.signatures["domain"])
|
||||
self.assertEqual(
|
||||
event.signatures[HOSTNAME][KEY_NAME],
|
||||
"Wm+VzmOUOz08Ds+0NTWb1d4CZrVsJSikkeRxh6aCcUw"
|
||||
"u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA",
|
||||
"Ay4aj2b5oJ1k8INYZ9n3KnszCflM0emwcmQQ7vxpbdc"
|
||||
"Sv9bkJxIZdWX1IJllcZLq89+D3sSabE+vqPtZs9akDw",
|
||||
)
|
||||
|
@ -122,7 +122,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
||||
"prev_events": "prev_events",
|
||||
"prev_state": "prev_state",
|
||||
"auth_events": "auth_events",
|
||||
"origin": "domain",
|
||||
"origin": "domain", # historical top-level field that still exists on old events
|
||||
"origin_server_ts": 1234,
|
||||
"membership": "join",
|
||||
# Also include a key that should be removed.
|
||||
@ -139,7 +139,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
||||
"prev_events": "prev_events",
|
||||
"prev_state": "prev_state",
|
||||
"auth_events": "auth_events",
|
||||
"origin": "domain",
|
||||
"origin": "domain", # historical top-level field that still exists on old events
|
||||
"origin_server_ts": 1234,
|
||||
"membership": "join",
|
||||
"content": {},
|
||||
@ -148,13 +148,12 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
||||
},
|
||||
)
|
||||
|
||||
# As of room versions we now redact the membership, prev_states, and origin keys.
|
||||
# As of room versions we now redact the membership and prev_states keys.
|
||||
self.run_test(
|
||||
{
|
||||
"type": "A",
|
||||
"prev_state": "prev_state",
|
||||
"membership": "join",
|
||||
"origin": "example.com",
|
||||
},
|
||||
{"type": "A", "content": {}, "signatures": {}, "unsigned": {}},
|
||||
room_version=RoomVersions.V11,
|
||||
@ -238,7 +237,6 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
|
||||
{
|
||||
"type": "m.room.create",
|
||||
"content": {"not_a_real_key": True},
|
||||
"origin": "some_homeserver",
|
||||
"nonsense_field": "some_random_garbage",
|
||||
},
|
||||
{
|
||||
|
@ -535,7 +535,6 @@ class StripUnsignedFromEventsTestCase(unittest.TestCase):
|
||||
"depth": 1000,
|
||||
"origin_server_ts": 1,
|
||||
"type": "m.room.member",
|
||||
"origin": "test.servx",
|
||||
"content": {"membership": "join"},
|
||||
"auth_events": [],
|
||||
"unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"},
|
||||
@ -552,7 +551,6 @@ class StripUnsignedFromEventsTestCase(unittest.TestCase):
|
||||
"depth": 1000,
|
||||
"origin_server_ts": 1,
|
||||
"type": "m.room.member",
|
||||
"origin": "test.servx",
|
||||
"auth_events": [],
|
||||
"content": {"membership": "join"},
|
||||
"unsigned": {
|
||||
@ -579,7 +577,6 @@ class StripUnsignedFromEventsTestCase(unittest.TestCase):
|
||||
"depth": 1000,
|
||||
"origin_server_ts": 1,
|
||||
"type": "m.room.power_levels",
|
||||
"origin": "test.servx",
|
||||
"content": {},
|
||||
"auth_events": [],
|
||||
"unsigned": {
|
||||
|
@ -1080,6 +1080,62 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(federation_requests, 2)
|
||||
self._assert_hierarchy(result, expected)
|
||||
|
||||
def test_fed_remote_room_hosts(self) -> None:
|
||||
"""
|
||||
Test if requested room is available over federation using via's.
|
||||
"""
|
||||
fed_hostname = self.hs.hostname + "2"
|
||||
fed_space = "#fed_space:" + fed_hostname
|
||||
fed_subroom = "#fed_sub_room:" + fed_hostname
|
||||
|
||||
remote_room_hosts = tuple(fed_hostname)
|
||||
|
||||
requested_room_entry = _RoomEntry(
|
||||
fed_space,
|
||||
{
|
||||
"room_id": fed_space,
|
||||
"world_readable": True,
|
||||
"join_rule": "public",
|
||||
"room_type": RoomTypes.SPACE,
|
||||
},
|
||||
[
|
||||
{
|
||||
"type": EventTypes.SpaceChild,
|
||||
"room_id": fed_space,
|
||||
"state_key": fed_subroom,
|
||||
"content": {"via": [fed_hostname]},
|
||||
}
|
||||
],
|
||||
)
|
||||
child_room = {
|
||||
"room_id": fed_subroom,
|
||||
"world_readable": True,
|
||||
"join_rule": "public",
|
||||
}
|
||||
|
||||
async def summarize_remote_room_hierarchy(
|
||||
_self: Any, room: Any, suggested_only: bool
|
||||
) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]:
|
||||
return requested_room_entry, {fed_subroom: child_room}, set()
|
||||
|
||||
expected = [
|
||||
(fed_space, [fed_subroom]),
|
||||
(fed_subroom, ()),
|
||||
]
|
||||
|
||||
with mock.patch(
|
||||
"synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy",
|
||||
new=summarize_remote_room_hierarchy,
|
||||
):
|
||||
result = self.get_success(
|
||||
self.handler.get_room_hierarchy(
|
||||
create_requester(self.user),
|
||||
fed_space,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
)
|
||||
)
|
||||
self._assert_hierarchy(result, expected)
|
||||
|
||||
|
||||
class RoomSummaryTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
|
@ -264,3 +264,43 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
self.assertEqual(channel.code, HTTPStatus.OK)
|
||||
self.assertTrue(capabilities["m.get_login_token"]["enabled"])
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"experimental_features": {"msc4267_enabled": True},
|
||||
"forget_rooms_on_leave": True,
|
||||
}
|
||||
)
|
||||
def test_get_forget_forced_upon_leave_with_auto_forget(self) -> None:
|
||||
# Server auto-forgets on /leave, expect enabled client capability
|
||||
access_token = self.get_success(
|
||||
self.auth_handler.create_access_token_for_user_id(
|
||||
self.user, device_id=None, valid_until_ms=None
|
||||
)
|
||||
)
|
||||
channel = self.make_request("GET", self.url, access_token=access_token)
|
||||
capabilities = channel.json_body["capabilities"]
|
||||
self.assertEqual(channel.code, HTTPStatus.OK)
|
||||
self.assertTrue(
|
||||
capabilities["org.matrix.msc4267.forget_forced_upon_leave"]["enabled"]
|
||||
)
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"experimental_features": {"msc4267_enabled": True},
|
||||
"forget_rooms_on_leave": False,
|
||||
}
|
||||
)
|
||||
def test_get_forget_forced_upon_leave_without_auto_forget(self) -> None:
|
||||
# Server doesn't auto-forget on /leave, expect disabled client capability
|
||||
access_token = self.get_success(
|
||||
self.auth_handler.create_access_token_for_user_id(
|
||||
self.user, device_id=None, valid_until_ms=None
|
||||
)
|
||||
)
|
||||
channel = self.make_request("GET", self.url, access_token=access_token)
|
||||
capabilities = channel.json_body["capabilities"]
|
||||
self.assertEqual(channel.code, HTTPStatus.OK)
|
||||
self.assertFalse(
|
||||
capabilities["org.matrix.msc4267.forget_forced_upon_leave"]["enabled"]
|
||||
)
|
||||
|
@ -776,6 +776,34 @@ class ProfileTestCase(unittest.HomeserverTestCase):
|
||||
self.assertEqual(channel.code, 403, channel.result)
|
||||
self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN)
|
||||
|
||||
@unittest.override_config(
|
||||
{
|
||||
"experimental_features": {
|
||||
"msc4133_enabled": True,
|
||||
"msc4133_key_allowlist": ["allowed_field"],
|
||||
}
|
||||
}
|
||||
)
|
||||
def test_set_custom_field_not_allowlisted(self) -> None:
|
||||
"""Setting a field not in the allowlist should be rejected."""
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/blocked",
|
||||
content={"blocked": "test"},
|
||||
access_token=self.owner_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 403, channel.result)
|
||||
self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN)
|
||||
|
||||
# Allowed field should succeed.
|
||||
channel = self.make_request(
|
||||
"PUT",
|
||||
f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/allowed_field",
|
||||
content={"allowed_field": "ok"},
|
||||
access_token=self.owner_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
|
||||
def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None:
|
||||
"""Stores metadata about files in the database.
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user