mirror of
https://github.com/element-hq/synapse.git
synced 2025-12-13 00:00:57 -05:00
### Background As part of Element's plan to support a light form of vhosting (virtual host) (multiple instances of Synapse in the same Python process), we're currently diving into the details and implications of running multiple instances of Synapse in the same Python process. "Per-tenant logging" tracked internally by https://github.com/element-hq/synapse-small-hosts/issues/48 ### Prior art Previously, we exposed `server_name` by providing a static logging `MetadataFilter` that injected the values:205d9e4fc4/synapse/config/logger.py (L216)While this can work fine for the normal case of one Synapse instance per Python process, this configures things globally and isn't compatible when we try to start multiple Synapse instances because each subsequent tenant will overwrite the previous tenant. ### What does this PR do? We remove the `MetadataFilter` and replace it by tracking the `server_name` in the `LoggingContext` and expose it with our existing [`LoggingContextFilter`](205d9e4fc4/synapse/logging/context.py (L584-L622)) that we already use to expose information about the `request`. This means that the `server_name` value follows wherever we log as expected even when we have multiple Synapse instances running in the same process. ### A note on logcontext Anywhere, Synapse mistakenly uses the `sentinel` logcontext to log something, we won't know which server sent the log. We've been fixing up `sentinel` logcontext usage as tracked by https://github.com/element-hq/synapse/issues/18905 Any further `sentinel` logcontext usage we find in the future can be fixed piecemeal as normal.d2a966f922/docs/log_contexts.md (L71-L81)### Testing strategy 1. Adjust your logging config to include `%(server_name)s` in the format ```yaml formatters: precise: format: '%(asctime)s - %(server_name)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' ``` 1. Start Synapse: `poetry run synapse_homeserver --config-path homeserver.yaml` 1. Make some requests (`curl http://localhost:8008/_matrix/client/versions`, etc) 1. Open the homeserver logs and notice the `server_name` in the logs as expected. `unknown_server_from_sentinel_context` is expected for the `sentinel` logcontext (things outside of Synapse).
200 lines
7.4 KiB
Python
200 lines
7.4 KiB
Python
#
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
#
|
|
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
#
|
|
# This program is free software: you can redistribute it and/or modify
|
|
# it under the terms of the GNU Affero General Public License as
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
# License, or (at your option) any later version.
|
|
#
|
|
# See the GNU Affero General Public License for more details:
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
#
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
#
|
|
# [This file includes modifications made by New Vector Limited]
|
|
#
|
|
#
|
|
|
|
from http import HTTPStatus
|
|
from typing import Any, Generator, Tuple, cast
|
|
from unittest.mock import AsyncMock, Mock, call
|
|
|
|
from twisted.internet import defer, reactor as _reactor
|
|
|
|
from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context
|
|
from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache
|
|
from synapse.types import ISynapseReactor, JsonDict
|
|
from synapse.util.clock import Clock
|
|
|
|
from tests import unittest
|
|
from tests.utils import MockClock
|
|
|
|
reactor = cast(ISynapseReactor, _reactor)
|
|
|
|
|
|
class HttpTransactionCacheTestCase(unittest.TestCase):
|
|
def setUp(self) -> None:
|
|
self.clock = MockClock()
|
|
self.hs = Mock()
|
|
self.hs.get_clock = Mock(return_value=self.clock)
|
|
self.hs.get_auth = Mock()
|
|
self.cache = HttpTransactionCache(self.hs)
|
|
|
|
self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"})
|
|
|
|
# Here we make sure that we're setting all the fields that HttpTransactionCache
|
|
# uses to build the transaction key.
|
|
self.mock_request = Mock()
|
|
self.mock_request.path = b"/foo/bar"
|
|
self.mock_requester = Mock()
|
|
self.mock_requester.app_service = None
|
|
self.mock_requester.is_guest = False
|
|
self.mock_requester.access_token_id = 1234
|
|
|
|
@defer.inlineCallbacks
|
|
def test_executes_given_function(
|
|
self,
|
|
) -> Generator["defer.Deferred[Any]", object, None]:
|
|
cb = AsyncMock(return_value=self.mock_http_response)
|
|
res = yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg"
|
|
)
|
|
cb.assert_called_once_with("some_arg", keyword="arg")
|
|
self.assertEqual(res, self.mock_http_response)
|
|
|
|
@defer.inlineCallbacks
|
|
def test_deduplicates_based_on_key(
|
|
self,
|
|
) -> Generator["defer.Deferred[Any]", object, None]:
|
|
cb = AsyncMock(return_value=self.mock_http_response)
|
|
for i in range(3): # invoke multiple times
|
|
res = yield self.cache.fetch_or_execute_request(
|
|
self.mock_request,
|
|
self.mock_requester,
|
|
cb,
|
|
"some_arg",
|
|
keyword="arg",
|
|
changing_args=i,
|
|
)
|
|
self.assertEqual(res, self.mock_http_response)
|
|
# expect only a single call to do the work
|
|
cb.assert_called_once_with("some_arg", keyword="arg", changing_args=0)
|
|
|
|
@defer.inlineCallbacks
|
|
def test_logcontexts_with_async_result(
|
|
self,
|
|
) -> Generator["defer.Deferred[Any]", object, None]:
|
|
@defer.inlineCallbacks
|
|
def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]:
|
|
yield defer.ensureDeferred(
|
|
Clock(reactor, server_name="test_server").sleep(0)
|
|
)
|
|
return 1, {}
|
|
|
|
@defer.inlineCallbacks
|
|
def test() -> Generator["defer.Deferred[Any]", object, None]:
|
|
with LoggingContext(name="c", server_name="test_server") as c1:
|
|
res = yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb
|
|
)
|
|
self.assertIs(current_context(), c1)
|
|
self.assertEqual(res, (1, {}))
|
|
|
|
# run the test twice in parallel
|
|
d = defer.gatherResults([test(), test()])
|
|
self.assertIs(current_context(), SENTINEL_CONTEXT)
|
|
yield d
|
|
self.assertIs(current_context(), SENTINEL_CONTEXT)
|
|
|
|
@defer.inlineCallbacks
|
|
def test_does_not_cache_exceptions(
|
|
self,
|
|
) -> Generator["defer.Deferred[Any]", object, None]:
|
|
"""Checks that, if the callback throws an exception, it is called again
|
|
for the next request.
|
|
"""
|
|
called = [False]
|
|
|
|
def cb() -> "defer.Deferred[Tuple[int, JsonDict]]":
|
|
if called[0]:
|
|
# return a valid result the second time
|
|
return defer.succeed(self.mock_http_response)
|
|
|
|
called[0] = True
|
|
raise Exception("boo")
|
|
|
|
with LoggingContext(name="test", server_name="test_server") as test_context:
|
|
try:
|
|
yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb
|
|
)
|
|
except Exception as e:
|
|
self.assertEqual(e.args[0], "boo")
|
|
self.assertIs(current_context(), test_context)
|
|
|
|
res = yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb
|
|
)
|
|
self.assertEqual(res, self.mock_http_response)
|
|
self.assertIs(current_context(), test_context)
|
|
|
|
@defer.inlineCallbacks
|
|
def test_does_not_cache_failures(
|
|
self,
|
|
) -> Generator["defer.Deferred[Any]", object, None]:
|
|
"""Checks that, if the callback returns a failure, it is called again
|
|
for the next request.
|
|
"""
|
|
called = [False]
|
|
|
|
def cb() -> "defer.Deferred[Tuple[int, JsonDict]]":
|
|
if called[0]:
|
|
# return a valid result the second time
|
|
return defer.succeed(self.mock_http_response)
|
|
|
|
called[0] = True
|
|
return defer.fail(Exception("boo"))
|
|
|
|
with LoggingContext(name="test", server_name="test_server") as test_context:
|
|
try:
|
|
yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb
|
|
)
|
|
except Exception as e:
|
|
self.assertEqual(e.args[0], "boo")
|
|
self.assertIs(current_context(), test_context)
|
|
|
|
res = yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb
|
|
)
|
|
self.assertEqual(res, self.mock_http_response)
|
|
self.assertIs(current_context(), test_context)
|
|
|
|
@defer.inlineCallbacks
|
|
def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]:
|
|
cb = AsyncMock(return_value=self.mock_http_response)
|
|
yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb, "an arg"
|
|
)
|
|
# should NOT have cleaned up yet
|
|
self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2)
|
|
|
|
yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb, "an arg"
|
|
)
|
|
# still using cache
|
|
cb.assert_called_once_with("an arg")
|
|
|
|
self.clock.advance_time_msec(CLEANUP_PERIOD_MS)
|
|
|
|
yield self.cache.fetch_or_execute_request(
|
|
self.mock_request, self.mock_requester, cb, "an arg"
|
|
)
|
|
# no longer using cache
|
|
self.assertEqual(cb.call_count, 2)
|
|
self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")])
|