From 0de7aa99533033151dd212e8409697f5a9fdbc6d Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 13 Jun 2025 09:44:18 +0200 Subject: [PATCH] Enable `flake8-logging` and `flake8-logging-format` rules in Ruff and fix related issues throughout the codebase (#18542) This can be reviewed commit by commit. This enables the `flake8-logging` and `flake8-logging-format` rules in Ruff, as well as logging exception stack traces in a few places where it makes sense - https://docs.astral.sh/ruff/rules/#flake8-logging-log - https://docs.astral.sh/ruff/rules/#flake8-logging-format-g ### Linting to avoid pre-formatting log messages See [`adamchainz/flake8-logging` -> *LOG011 avoid pre-formatting log messages*](https://github.com/adamchainz/flake8-logging/blob/152db2f167355fb23e401bf68046c57cb128a2ae/README.rst#log011-avoid-pre-formatting-log-messages) Practically, this means prefer placeholders (`%s`) over f-strings for logging. This is because placeholders are passed as args to loggers, so they can do special handling of them. For example, Sentry will record the args separately in their logging integration: https://github.com/getsentry/sentry-python/blob/c15b390dfe1ca5c01b30dd56b35d693bb50b413c/sentry_sdk/integrations/logging.py#L280-L284 One theoretical small perf benefit is that log levels that aren't enabled won't get formatted, so it doesn't unnecessarily create formatted strings --- changelog.d/18542.misc | 1 + pyproject.toml | 4 ++++ scripts-dev/check_pydantic_models.py | 2 +- synapse/api/auth/msc3861_delegated.py | 2 +- synapse/app/_base.py | 4 ++-- synapse/app/phone_stats_home.py | 2 +- synapse/appservice/scheduler.py | 2 +- synapse/config/logger.py | 22 ++++++++++--------- synapse/config/room.py | 2 +- synapse/config/server.py | 2 +- synapse/events/auto_accept_invites.py | 13 ++++++----- synapse/federation/federation_client.py | 2 +- synapse/federation/federation_server.py | 3 ++- synapse/handlers/admin.py | 2 +- synapse/handlers/appservice.py | 8 ++----- synapse/handlers/auth.py | 4 ++-- synapse/handlers/device.py | 2 +- synapse/handlers/directory.py | 2 +- synapse/handlers/federation.py | 4 +++- synapse/handlers/identity.py | 4 ++-- synapse/handlers/message.py | 9 +++++--- synapse/handlers/oidc.py | 18 ++++++++------- synapse/handlers/register.py | 4 ++-- synapse/handlers/room.py | 4 ++-- synapse/handlers/room_member.py | 6 +++-- synapse/handlers/saml.py | 2 +- synapse/handlers/send_email.py | 2 +- synapse/handlers/sso.py | 12 ++++++---- synapse/handlers/sync.py | 6 +++-- synapse/handlers/user_directory.py | 3 +-- synapse/handlers/worker_lock.py | 12 ++++++---- synapse/http/client.py | 10 ++++----- synapse/http/matrixfederationclient.py | 2 +- synapse/media/media_repository.py | 12 +++++----- synapse/media/url_previewer.py | 4 ++-- synapse/metrics/__init__.py | 4 ++-- synapse/push/mailer.py | 2 +- synapse/rest/__init__.py | 2 +- synapse/rest/admin/media.py | 22 ++++++++++--------- synapse/storage/_base.py | 2 +- .../databases/main/monthly_active_users.py | 2 +- .../storage/databases/main/user_directory.py | 5 +++-- synapse/storage/invite_rule.py | 4 +++- .../storage/schema/main/delta/30/as_users.py | 7 ++++-- synapse/util/check_dependencies.py | 4 +++- synapse/util/daemonize.py | 2 +- synapse/util/task_scheduler.py | 9 +++++--- tests/federation/test_federation_server.py | 6 +++-- tests/handlers/test_worker_lock.py | 6 +++-- .../test_matrix_federation_agent.py | 2 +- tests/http/test_proxyagent.py | 2 +- tests/logging/test_remote_handler.py | 12 +++++----- 52 files changed, 164 insertions(+), 122 deletions(-) create mode 100644 changelog.d/18542.misc diff --git a/changelog.d/18542.misc b/changelog.d/18542.misc new file mode 100644 index 0000000000..8a2c2d5bf6 --- /dev/null +++ b/changelog.d/18542.misc @@ -0,0 +1 @@ +Enable [`flake8-logging`](https://docs.astral.sh/ruff/rules/#flake8-logging-log) and [`flake8-logging-format`](https://docs.astral.sh/ruff/rules/#flake8-logging-format-g) rules in Ruff and fix related issues throughout the codebase. diff --git a/pyproject.toml b/pyproject.toml index 2359c74032..e520e425d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,6 +74,10 @@ select = [ "PIE", # flake8-executable "EXE", + # flake8-logging + "LOG", + # flake8-logging-format + "G", ] [tool.ruff.lint.isort] diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py index 5eb1f0a9df..26a473a61b 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py @@ -243,7 +243,7 @@ def do_lint() -> Set[str]: importlib.import_module(module_info.name) except ModelCheckerException as e: logger.warning( - f"Bad annotation found when importing {module_info.name}" + "Bad annotation found when importing %s", module_info.name ) failures.add(format_model_checker_exception(e)) diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py index e500a06afe..ad5d59eef1 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py @@ -475,7 +475,7 @@ class MSC3861DelegatedAuth(BaseAuth): # XXX: This is a temporary solution so that the admin API can be called by # the OIDC provider. This will be removed once we have OIDC client # credentials grant support in matrix-authentication-service. - logging.info("Admin toked used") + logger.info("Admin toked used") # XXX: that user doesn't exist and won't be provisioned. # This is mostly fine for admin calls, but we should also think about doing # requesters without a user_id. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 4cc260d551..49ab5d680d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -445,8 +445,8 @@ def listen_http( # getHost() returns a UNIXAddress which contains an instance variable of 'name' # encoded as a byte string. Decode as utf-8 so pretty. logger.info( - "Synapse now listening on Unix Socket at: " - f"{ports[0].getHost().name.decode('utf-8')}" + "Synapse now listening on Unix Socket at: %s", + ports[0].getHost().name.decode("utf-8"), ) return ports diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index bb450a235c..9cd2e1b71e 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -173,7 +173,7 @@ async def phone_stats_home( stats["log_level"] = logging.getLevelName(log_level) logger.info( - "Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats) + "Reporting stats to %s: %s", hs.config.metrics.report_stats_endpoint, stats ) try: await hs.get_proxied_http_client().put_json( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index cba08dde85..ab3f4e15fe 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -461,7 +461,7 @@ class _TransactionController: recoverer = self.recoverers.get(service.id) if not recoverer: # No need to force a retry on a happy AS. - logger.info(f"{service.id} is not in recovery, not forcing retry") + logger.info("%s is not in recovery, not forcing retry", service.id) return recoverer.force_retry() diff --git a/synapse/config/logger.py b/synapse/config/logger.py index e5aca36b75..3f86ec1169 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -51,6 +51,8 @@ if TYPE_CHECKING: from synapse.config.homeserver import HomeServerConfig from synapse.server import HomeServer +logger = logging.getLogger(__name__) + DEFAULT_LOG_CONFIG = Template( """\ # Log configuration for Synapse. @@ -291,7 +293,7 @@ def _load_logging_config(log_config_path: str) -> None: log_config = yaml.safe_load(f.read()) if not log_config: - logging.warning("Loaded a blank logging config?") + logger.warning("Loaded a blank logging config?") # If the old structured logging configuration is being used, raise an error. if "structured" in log_config and log_config.get("structured"): @@ -312,7 +314,7 @@ def _reload_logging_config(log_config_path: Optional[str]) -> None: return _load_logging_config(log_config_path) - logging.info("Reloaded log config from %s due to SIGHUP", log_config_path) + logger.info("Reloaded log config from %s due to SIGHUP", log_config_path) def setup_logging( @@ -349,17 +351,17 @@ def setup_logging( appbase.register_sighup(_reload_logging_config, log_config_path) # Log immediately so we can grep backwards. - logging.warning("***** STARTING SERVER *****") - logging.warning( + logger.warning("***** STARTING SERVER *****") + logger.warning( "Server %s version %s", sys.argv[0], SYNAPSE_VERSION, ) - logging.warning("Copyright (c) 2023 New Vector, Inc") - logging.warning( + logger.warning("Copyright (c) 2023 New Vector, Inc") + logger.warning( "Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse" ) - logging.info("Server hostname: %s", config.server.server_name) - logging.info("Public Base URL: %s", config.server.public_baseurl) - logging.info("Instance name: %s", hs.get_instance_name()) - logging.info("Twisted reactor: %s", type(reactor).__name__) + logger.info("Server hostname: %s", config.server.server_name) + logger.info("Public Base URL: %s", config.server.public_baseurl) + logger.info("Instance name: %s", hs.get_instance_name()) + logger.info("Twisted reactor: %s", type(reactor).__name__) diff --git a/synapse/config/room.py b/synapse/config/room.py index ec8cf5be36..bad387ecbf 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -27,7 +27,7 @@ from synapse.types import JsonDict from ._base import Config, ConfigError -logger = logging.Logger(__name__) +logger = logging.getLogger(__name__) class RoomDefaultEncryptionTypes: diff --git a/synapse/config/server.py b/synapse/config/server.py index 6b29983617..6893450989 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -41,7 +41,7 @@ from synapse.util.stringutils import parse_and_validate_server_name from ._base import Config, ConfigError from ._util import validate_config -logger = logging.Logger(__name__) +logger = logging.getLogger(__name__) DIRECT_TCP_ERROR = """ Using direct TCP replication for workers is no longer supported. diff --git a/synapse/events/auto_accept_invites.py b/synapse/events/auto_accept_invites.py index 4295107c47..9e5f76f33f 100644 --- a/synapse/events/auto_accept_invites.py +++ b/synapse/events/auto_accept_invites.py @@ -195,15 +195,18 @@ class InviteAutoAccepter: except SynapseError as e: if e.code == HTTPStatus.FORBIDDEN: logger.debug( - f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}" + "Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: %s", + e, ) else: - logger.warn( - f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}" + logger.warning( + "Update_room_membership raised the following unexpected (SynapseError) exception: %s", + e, ) except Exception as e: - logger.warn( - f"Update_room_membership raised the following unexpected exception: {e}" + logger.warning( + "Update_room_membership raised the following unexpected exception: %s", + e, ) sleep = 2**retries diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7c485aa7e0..2d1da70793 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1818,7 +1818,7 @@ class FederationClient(FederationBase): ) return timestamp_to_event_response except SynapseError as e: - logger.warn( + logger.warning( "timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s", room_id, timestamp, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2f2c78babc..8718b70401 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -928,7 +928,8 @@ class FederationServer(FederationBase): # joins) or the full state (for full joins). # Return a 404 as we would if we weren't in the room at all. logger.info( - f"Rejecting /send_{membership_type} to %s because it's a partial state room", + "Rejecting /send_%s to %s because it's a partial state room", + membership_type, room_id, ) raise SynapseError( diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 971a74244f..262437f04f 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -495,7 +495,7 @@ class AdminHandler: ) except Exception as ex: logger.info( - f"Redaction of event {event.event_id} failed due to: {ex}" + "Redaction of event %s failed due to: %s", event.event_id, ex ) result["failed_redactions"][event.event_id] = str(ex) await self._task_scheduler.update_task(task.id, result=result) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index b7d1033351..f3bbdb5a05 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -465,9 +465,7 @@ class ApplicationServicesHandler: service, "read_receipt" ) if new_token is not None and new_token.stream <= from_key: - logger.debug( - "Rejecting token lower than or equal to stored: %s" % (new_token,) - ) + logger.debug("Rejecting token lower than or equal to stored: %s", new_token) return [] from_token = MultiWriterStreamToken(stream=from_key) @@ -509,9 +507,7 @@ class ApplicationServicesHandler: service, "presence" ) if new_token is not None and new_token <= from_key: - logger.debug( - "Rejecting token lower than or equal to stored: %s" % (new_token,) - ) + logger.debug("Rejecting token lower than or equal to stored: %s", new_token) return [] for user in users: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index e96922c08d..de5b38caca 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1895,7 +1895,7 @@ def load_single_legacy_password_auth_provider( try: provider = module(config=config, account_handler=api) except Exception as e: - logger.error("Error while initializing %r: %s", module, e) + logger.exception("Error while initializing %r: %s", module, e) raise # All methods that the module provides should be async, but this wasn't enforced @@ -2428,7 +2428,7 @@ class PasswordAuthProvider: except CancelledError: raise except Exception as e: - logger.error("Module raised an exception in is_3pid_allowed: %s", e) + logger.exception("Module raised an exception in is_3pid_allowed: %s", e) raise SynapseError(code=500, msg="Internal Server Error") return True diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index f8b547bbed..8f9bf92fda 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1600,7 +1600,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): if prev_stream_id is not None and cached_devices == { d["device_id"]: d for d in devices }: - logging.info( + logger.info( "Skipping device list resync for %s, as our cache matches already", user_id, ) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 48c7d411d5..74c697960f 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -282,7 +282,7 @@ class DirectoryHandler: except RequestSendFailed: raise SynapseError(502, "Failed to fetch alias") except CodeMessageException as e: - logging.warning( + logger.warning( "Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg ) if e.code == 404: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a6de3e824d..729b676163 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1095,7 +1095,9 @@ class FederationHandler: rule = invite_config.get_invite_rule(event.sender) if rule == InviteRule.BLOCK: logger.info( - f"Automatically rejecting invite from {event.sender} due to the invite filtering rules of {event.state_key}" + "Automatically rejecting invite from %s due to the invite filtering rules of %s", + event.sender, + event.state_key, ) raise SynapseError( 403, diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 89191217d6..d96b585308 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -218,7 +218,7 @@ class IdentityHandler: return data except HttpResponseException as e: - logger.error("3PID bind failed with Matrix error: %r", e) + logger.exception("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") @@ -323,7 +323,7 @@ class IdentityHandler: # The remote server probably doesn't support unbinding (yet) logger.warning("Received %d response while unbinding threepid", e.code) else: - logger.error("Failed to unbind threepid on identity server: %s", e) + logger.exception("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index cb6de02309..5d6ee6996f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -460,7 +460,7 @@ class MessageHandler: # date from the database in the same database transaction. await self.store.expire_event(event_id) except Exception as e: - logger.error("Could not expire event %s: %r", event_id, e) + logger.exception("Could not expire event %s: %r", event_id, e) # Schedule the expiry of the next event to expire. await self._schedule_next_expiry() @@ -2061,7 +2061,8 @@ class EventCreationHandler: # dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY logger.info( "Failed to send dummy event into room %s. Will exclude it from " - "future attempts until cache expires" % (room_id,) + "future attempts until cache expires", + room_id, ) now = self.clock.time_msec() self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now @@ -2120,7 +2121,9 @@ class EventCreationHandler: except AuthError: logger.info( "Failed to send dummy event into room %s for user %s due to " - "lack of power. Will try another user" % (room_id, user_id) + "lack of power. Will try another user", + room_id, + user_id, ) return False diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index acf2d4bc8b..8f3e717fb4 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -563,12 +563,13 @@ class OidcProvider: raise ValueError("Unexpected subject") except Exception: logger.warning( - f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} " + "OIDC Back-Channel Logout is enabled for issuer %r " "but it looks like the configured `user_mapping_provider` " "does not use the `sub` claim as subject. If it is the case, " "and you want Synapse to ignore the `sub` claim in OIDC " "Back-Channel Logouts, set `backchannel_logout_ignore_sub` " - "to `true` in the issuer config." + "to `true` in the issuer config.", + self.issuer, ) @property @@ -826,10 +827,10 @@ class OidcProvider: if response.code < 400: logger.debug( "Invalid response from the authorization server: " - 'responded with a "{status}" ' - "but body has an error field: {error!r}".format( - status=status, error=resp["error"] - ) + 'responded with a "%s" ' + "but body has an error field: %r", + status, + resp["error"], ) description = resp.get("error_description", error) @@ -1385,7 +1386,8 @@ class OidcProvider: # support dynamic registration in Synapse at some point. if not self._config.backchannel_logout_enabled: logger.warning( - f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config" + "Received an OIDC Back-Channel Logout request from issuer %r but it is disabled in config", + self.issuer, ) # TODO: this responds with a 400 status code, which is what the OIDC @@ -1797,5 +1799,5 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): extras[key] = template.render(user=userinfo).strip() except Exception as e: # Log an error and skip this value (don't break login for this). - logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e)) + logger.exception("Failed to render OIDC extra attribute %s: %s", key, e) return extras diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index d577039a4c..970013ef20 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -506,7 +506,7 @@ class RegistrationHandler: ratelimit=False, ) except Exception as e: - logger.error("Failed to join new user to %r: %r", r, e) + logger.exception("Failed to join new user to %r: %r", r, e) async def _join_rooms(self, user_id: str) -> None: """ @@ -596,7 +596,7 @@ class RegistrationHandler: # moving away from bare excepts is a good thing to do. logger.error("Failed to join new user to %r: %r", r, e) except Exception as e: - logger.error("Failed to join new user to %r: %r", r, e, exc_info=True) + logger.exception("Failed to join new user to %r: %r", r, e) async def _auto_join_rooms(self, user_id: str) -> None: """Automatically joins users to auto join rooms - creating the room in the first place diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1ccb6f7171..0f004d02d3 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -698,7 +698,7 @@ class RoomCreationHandler: except SynapseError as e: # again I'm not really expecting this to fail, but if it does, I'd rather # we returned the new room to the client at this point. - logger.error("Unable to send updated alias events in old room: %s", e) + logger.exception("Unable to send updated alias events in old room: %s", e) try: await self.event_creation_handler.create_and_send_nonmember_event( @@ -715,7 +715,7 @@ class RoomCreationHandler: except SynapseError as e: # again I'm not really expecting this to fail, but if it does, I'd rather # we returned the new room to the client at this point. - logger.error("Unable to send updated alias events in new room: %s", e) + logger.exception("Unable to send updated alias events in new room: %s", e) async def create_room( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 24ee5e89a6..cf9db7b018 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -922,7 +922,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): rule = invite_config.get_invite_rule(requester.user.to_string()) if rule == InviteRule.BLOCK: logger.info( - f"Automatically rejecting invite from {target_id} due to the the invite filtering rules of {requester.user}" + "Automatically rejecting invite from %s due to the the invite filtering rules of %s", + target_id, + requester.user, ) raise SynapseError( 403, @@ -1570,7 +1572,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): require_consent=False, ) except Exception as e: - logger.exception("Error kicking guest user: %s" % (e,)) + logger.exception("Error kicking guest user: %s", e) async def lookup_room_alias( self, room_alias: RoomAlias diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 8ebd3d4ff9..81bec7499c 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -124,7 +124,7 @@ class SamlHandler: ) # Since SAML sessions timeout it is useful to log when they were created. - logger.info("Initiating a new SAML session: %s" % (reqid,)) + logger.info("Initiating a new SAML session: %s", reqid) now = self.clock.time_msec() self._outstanding_requests_dict[reqid] = Saml2SessionData( diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 8cf8d2bada..353991573f 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -238,7 +238,7 @@ class SendEmailHandler: multipart_msg.attach(text_part) multipart_msg.attach(html_part) - logger.info("Sending email to %s" % email_address) + logger.info("Sending email to %s", email_address) await self._sendmail( self._reactor, diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 07827cf95b..7c5cf91aba 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -1230,12 +1230,16 @@ class SsoHandler: if expected_user_id is not None and user_id != expected_user_id: logger.error( "Received a logout notification from SSO provider " - f"{auth_provider_id!r} for the user {expected_user_id!r}, but with " - f"a session ID ({auth_provider_session_id!r}) which belongs to " - f"{user_id!r}. This may happen when the SSO provider user mapper " + "%r for the user %r, but with " + "a session ID (%r) which belongs to " + "%r. This may happen when the SSO provider user mapper " "uses something else than the standard attribute as mapping ID. " "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` " - "in the provider config if that is the case." + "in the provider config if that is the case.", + auth_provider_id, + expected_user_id, + auth_provider_session_id, + user_id, ) continue diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c6f2c38d8d..a400e63fd5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -3074,8 +3074,10 @@ class SyncHandler: if batch.limited and since_token: user_id = sync_result_builder.sync_config.user.to_string() logger.debug( - "Incremental gappy sync of %s for user %s with %d state events" - % (room_id, user_id, len(state)) + "Incremental gappy sync of %s for user %s with %d state events", + room_id, + user_id, + len(state), ) elif room_builder.rtype == "archived": archived_room_sync = ArchivedSyncResult( diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 33edef5f14..1f692c79a0 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -749,10 +749,9 @@ class UserDirectoryHandler(StateDeltasHandler): ) continue except Exception: - logger.error( + logger.exception( "Failed to refresh profile for %r due to unhandled exception", user_id, - exc_info=True, ) await self.store.set_remote_user_profile_in_user_dir_stale( user_id, diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index e58a416026..b36f7bd5fd 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -50,6 +50,8 @@ if TYPE_CHECKING: from synapse.server import HomeServer +logger = logging.getLogger(__name__) + # This lock is used to avoid creating an event while we are purging the room. # We take a read lock when creating an event, and a write one when purging a room. # This is because it is fine to create several events concurrently, since referenced events @@ -271,8 +273,9 @@ class WaitingLock: next = self._retry_interval self._retry_interval = max(5, next * 2) if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes - logging.warning( - f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock." + logger.warning( + "Lock timeout is getting excessive: %ss. There may be a deadlock.", + self._retry_interval, ) return next * random.uniform(0.9, 1.1) @@ -350,7 +353,8 @@ class WaitingMultiLock: next = self._retry_interval self._retry_interval = max(5, next * 2) if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes - logging.warning( - f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock." + logger.warning( + "Lock timeout is getting excessive: %ss. There may be a deadlock.", + self._retry_interval, ) return next * random.uniform(0.9, 1.1) diff --git a/synapse/http/client.py b/synapse/http/client.py index 84a510fb42..928bfb228a 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -213,7 +213,7 @@ class _IPBlockingResolver: if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): logger.info( - "Blocked %s from DNS resolution to %s" % (ip_address, hostname) + "Blocked %s from DNS resolution to %s", ip_address, hostname ) has_bad_ip = True @@ -318,7 +318,7 @@ class BlocklistingAgentWrapper(Agent): pass else: if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist): - logger.info("Blocking access to %s" % (ip_address,)) + logger.info("Blocking access to %s", ip_address) e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked") return defer.fail(Failure(e)) @@ -723,7 +723,7 @@ class BaseHttpClient: resp_headers = dict(response.headers.getAllRawHeaders()) if response.code > 299: - logger.warning("Got %d when downloading %s" % (response.code, url)) + logger.warning("Got %d when downloading %s", response.code, url) raise SynapseError( HTTPStatus.BAD_GATEWAY, "Got error %d" % (response.code,), Codes.UNKNOWN ) @@ -1106,7 +1106,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.stream.write(data[start:end]) except Exception as e: logger.warning( - f"Exception encountered writing file data to stream: {e}" + "Exception encountered writing file data to stream: %s", e ) self.deferred.errback() self.file_length += end - start @@ -1129,7 +1129,7 @@ class _MultipartParserProtocol(protocol.Protocol): try: self.parser.write(incoming_data) except Exception as e: - logger.warning(f"Exception writing to multipart parser: {e}") + logger.warning("Exception writing to multipart parser: %s", e) self.deferred.errback() return diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 88bf98045c..97a863a118 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -602,7 +602,7 @@ class MatrixFederationHttpClient: try: parse_and_validate_server_name(request.destination) except ValueError: - logger.exception(f"Invalid destination: {request.destination}.") + logger.exception("Invalid destination: %s.", request.destination) raise FederationDeniedError(request.destination) if timeout is not None: diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 18c5a8ecec..1d05c536af 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -313,7 +313,7 @@ class MediaRepository: logger.info("Stored local media in file %r", fname) if should_quarantine: - logger.warn( + logger.warning( "Media has been automatically quarantined as it matched existing quarantined media" ) @@ -366,7 +366,7 @@ class MediaRepository: logger.info("Stored local media in file %r", fname) if should_quarantine: - logger.warn( + logger.warning( "Media has been automatically quarantined as it matched existing quarantined media" ) @@ -1393,8 +1393,8 @@ class MediaRepository: ) logger.info( - "Purging remote media last accessed before" - f" {remote_media_threshold_timestamp_ms}" + "Purging remote media last accessed before %s", + remote_media_threshold_timestamp_ms, ) await self.delete_old_remote_media( @@ -1409,8 +1409,8 @@ class MediaRepository: ) logger.info( - "Purging local media last accessed before" - f" {local_media_threshold_timestamp_ms}" + "Purging local media last accessed before %s", + local_media_threshold_timestamp_ms, ) await self.delete_old_local_media( diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index 8ef2b3f0c0..0c665e1942 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -287,7 +287,7 @@ class UrlPreviewer: og["og:image:width"] = dims["width"] og["og:image:height"] = dims["height"] else: - logger.warning("Couldn't get dims for %s" % url) + logger.warning("Couldn't get dims for %s", url) # define our OG response for this media elif _is_html(media_info.media_type): @@ -609,7 +609,7 @@ class UrlPreviewer: should_quarantine = await self.store.get_is_hash_quarantined(sha256) if should_quarantine: - logger.warn( + logger.warning( "Media has been automatically quarantined as it matched existing quarantined media" ) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 3051b623d0..86ac2c2395 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -118,7 +118,7 @@ class LaterGauge(Collector): def _register(self) -> None: if self.name in all_gauges.keys(): - logger.warning("%s already registered, reregistering" % (self.name,)) + logger.warning("%s already registered, reregistering", self.name) REGISTRY.unregister(all_gauges.pop(self.name)) REGISTRY.register(self) @@ -244,7 +244,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector): def _register_with_collector(self) -> None: if self.name in all_gauges.keys(): - logger.warning("%s already registered, reregistering" % (self.name,)) + logger.warning("%s already registered, reregistering", self.name) REGISTRY.unregister(all_gauges.pop(self.name)) REGISTRY.register(self) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index cf611bd90b..fadba480dd 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -135,7 +135,7 @@ class Mailer: self.app_name = app_name self.email_subjects: EmailSubjectConfig = hs.config.email.email_subjects - logger.info("Created Mailer for app_name %s" % app_name) + logger.info("Created Mailer for app_name %s", app_name) emails_sent_counter.labels("password_reset") diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 00f108de08..aeb4267bb7 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -165,7 +165,7 @@ class ClientRestResource(JsonResource): # Fail on unknown servlet groups. if servlet_group not in SERVLET_GROUPS: if servlet_group == "media": - logger.warn( + logger.warning( "media.can_load_media_repo needs to be configured for the media servlet to be available" ) raise RuntimeError( diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index ee6a681285..195f22a4c2 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -71,7 +71,7 @@ class QuarantineMediaInRoom(RestServlet): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) - logging.info("Quarantining room: %s", room_id) + logger.info("Quarantining room: %s", room_id) # Quarantine all media in this room num_quarantined = await self.store.quarantine_media_ids_in_room( @@ -98,7 +98,7 @@ class QuarantineMediaByUser(RestServlet): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) - logging.info("Quarantining media by user: %s", user_id) + logger.info("Quarantining media by user: %s", user_id) # Quarantine all media this user has uploaded num_quarantined = await self.store.quarantine_media_ids_by_user( @@ -127,7 +127,7 @@ class QuarantineMediaByID(RestServlet): requester = await self.auth.get_user_by_req(request) await assert_user_is_admin(self.auth, requester) - logging.info("Quarantining media by ID: %s/%s", server_name, media_id) + logger.info("Quarantining media by ID: %s/%s", server_name, media_id) # Quarantine this media id await self.store.quarantine_media_by_id( @@ -155,7 +155,7 @@ class UnquarantineMediaByID(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - logging.info("Remove from quarantine media by ID: %s/%s", server_name, media_id) + logger.info("Remove from quarantine media by ID: %s/%s", server_name, media_id) # Remove from quarantine this media id await self.store.quarantine_media_by_id(server_name, media_id, None) @@ -177,7 +177,7 @@ class ProtectMediaByID(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - logging.info("Protecting local media by ID: %s", media_id) + logger.info("Protecting local media by ID: %s", media_id) # Protect this media id await self.store.mark_local_media_as_safe(media_id, safe=True) @@ -199,7 +199,7 @@ class UnprotectMediaByID(RestServlet): ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - logging.info("Unprotecting local media by ID: %s", media_id) + logger.info("Unprotecting local media by ID: %s", media_id) # Unprotect this media id await self.store.mark_local_media_as_safe(media_id, safe=False) @@ -280,7 +280,7 @@ class DeleteMediaByID(RestServlet): if await self.store.get_local_media(media_id) is None: raise NotFoundError("Unknown media") - logging.info("Deleting local media by ID: %s", media_id) + logger.info("Deleting local media by ID: %s", media_id) deleted_media, total = await self.media_repository.delete_local_media_ids( [media_id] @@ -327,9 +327,11 @@ class DeleteMediaByDateSize(RestServlet): if server_name is not None and self.server_name != server_name: raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media") - logging.info( - "Deleting local media by timestamp: %s, size larger than: %s, keep profile media: %s" - % (before_ts, size_gt, keep_profiles) + logger.info( + "Deleting local media by timestamp: %s, size larger than: %s, keep profile media: %s", + before_ts, + size_gt, + keep_profiles, ) deleted_media, total = await self.media_repository.delete_old_local_media( diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b5fe7dd858..2309b1648e 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -240,5 +240,5 @@ def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: try: return json_decoder.decode(db_content) except Exception: - logging.warning("Tried to decode '%r' as JSON and failed", db_content) + logger.warning("Tried to decode '%r' as JSON and failed", db_content) raise diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 659ee13d71..f5a6b98be7 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -331,7 +331,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): values={"timestamp": int(self._clock.time_msec())}, ) else: - logger.warning("mau limit reserved threepid %s not found in db" % tp) + logger.warning("mau limit reserved threepid %s not found in db", tp) async def upsert_monthly_active_user(self, user_id: str) -> None: """Updates or inserts the user into the monthly active user table, which diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 31a8ce6666..09671abcad 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -253,8 +253,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): return 1 logger.debug( - "Processing the next %d rooms of %d remaining" - % (len(rooms_to_work_on), progress["remaining"]) + "Processing the next %d rooms of %d remaining", + len(rooms_to_work_on), + progress["remaining"], ) processed_event_count = 0 diff --git a/synapse/storage/invite_rule.py b/synapse/storage/invite_rule.py index b9d9d1eb62..f63390871e 100644 --- a/synapse/storage/invite_rule.py +++ b/synapse/storage/invite_rule.py @@ -50,7 +50,9 @@ class InviteRulesConfig: except Exception as e: # If for whatever reason we can't process this, just ignore it. logger.debug( - f"Could not process '{value}' field of invite rule config, ignoring: {e}" + "Could not process '%s' field of invite rule config, ignoring: %s", + value, + e, ) if account_data: diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py index 2a3023cd07..060217575b 100644 --- a/synapse/storage/schema/main/delta/30/as_users.py +++ b/synapse/storage/schema/main/delta/30/as_users.py @@ -63,8 +63,11 @@ def run_upgrade( if user_id in owned.keys(): logger.error( "user_id %s was owned by more than one application" - " service (IDs %s and %s); assigning arbitrarily to %s" - % (user_id, owned[user_id], appservice.id, owned[user_id]) + " service (IDs %s and %s); assigning arbitrarily to %s", + user_id, + owned[user_id], + appservice.id, + owned[user_id], ) owned.setdefault(appservice.id, []).append(user_id) diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 68336814c0..c790c9667f 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -37,6 +37,8 @@ DISTRIBUTION_NAME = "matrix-synapse" __all__ = ["check_requirements"] +logger = logging.getLogger(__name__) + class DependencyException(Exception): @property @@ -211,6 +213,6 @@ def check_requirements(extra: Optional[str] = None) -> None: if deps_unfulfilled: for err in errors: - logging.error(err) + logger.error(err) raise DependencyException(deps_unfulfilled) diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index 52549f53c4..9fdefc5a76 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -133,7 +133,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - # write a log line on SIGTERM. def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn: - logger.warning("Caught signal %s. Stopping daemon." % signum) + logger.warning("Caught signal %s. Stopping daemon.", signum) sys.exit(0) signal.signal(signal.SIGTERM, sigterm) diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 4683d09cd7..5169656c73 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -440,7 +440,8 @@ class TaskScheduler: except Exception: f = Failure() logger.error( - f"scheduled task {task.id} failed", + "scheduled task %s failed", + task.id, exc_info=(f.type, f.value, f.getTracebackObject()), ) status = TaskStatus.FAILED @@ -473,8 +474,10 @@ class TaskScheduler: self._clock.time_msec() > task.timestamp + TaskScheduler.LAST_UPDATE_BEFORE_WARNING_MS ): - logger.warn( - f"Task {task.id} (action {task.action}) has seen no update for more than 24h and may be stuck" + logger.warning( + "Task %s (action %s) has seen no update for more than 24h and may be stuck", + task.id, + task.action, ) if task.id in self._running_tasks: diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 42dc844734..a7b702fbd1 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -45,6 +45,8 @@ from synapse.util import Clock from tests import unittest from tests.unittest import override_config +logger = logging.getLogger(__name__) + class FederationServerTests(unittest.FederatingHomeserverTestCase): servlets = [ @@ -252,7 +254,7 @@ class MessageAcceptTests(unittest.FederatingHomeserverTestCase): class ServerACLsTestCase(unittest.TestCase): def test_blocked_server(self) -> None: e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) - logging.info("ACL event: %s", e.content) + logger.info("ACL event: %s", e.content) server_acl_evalutor = server_acl_evaluator_from_event(e) @@ -266,7 +268,7 @@ class ServerACLsTestCase(unittest.TestCase): def test_block_ip_literals(self) -> None: e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]}) - logging.info("ACL event: %s", e.content) + logger.info("ACL event: %s", e.content) server_acl_evalutor = server_acl_evaluator_from_event(e) diff --git a/tests/handlers/test_worker_lock.py b/tests/handlers/test_worker_lock.py index 0691d3f99c..968d119a50 100644 --- a/tests/handlers/test_worker_lock.py +++ b/tests/handlers/test_worker_lock.py @@ -65,8 +65,10 @@ class WorkerLockTestCase(unittest.HomeserverTestCase): timeout_seconds = 15 # Increased timeout for RISC-V # add a print or log statement here for visibility in CI logs logger.info( # use logger.info - f"Detected RISC-V architecture ({current_machine}). " - f"Adjusting test_lock_contention: timeout={timeout_seconds}s" + "Detected RISC-V architecture (%s). " + "Adjusting test_lock_contention: timeout=%ss", + current_machine, + timeout_seconds, ) else: # Settings for other architectures diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 0fbb4db2f7..6b25e53c28 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -1822,7 +1822,7 @@ def _get_test_protocol_factory() -> IProtocolFactory: def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" - logger.info(f"Completed request {request}") + logger.info("Completed request %s", request) @implementer(IPolicyForHTTPS) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 80b0856a56..2ef8a95c45 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -893,4 +893,4 @@ def _get_test_protocol_factory() -> IProtocolFactory: def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" - logger.info(f"Completed request {request}") + logger.info("Completed request %s", request) diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index f5412ac6e2..4178a8d831 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -86,11 +86,11 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): # Send some debug messages for i in range(3): - logger.debug("debug %s" % (i,)) + logger.debug("debug %s", i) # Send a bunch of useful messages for i in range(7): - logger.info("info %s" % (i,)) + logger.info("info %s", i) # The last debug message pushes it past the maximum buffer logger.debug("too much debug") @@ -116,15 +116,15 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): # Send some debug messages for i in range(3): - logger.debug("debug %s" % (i,)) + logger.debug("debug %s", i) # Send a bunch of useful messages for i in range(10): - logger.warning("warn %s" % (i,)) + logger.warning("warn %s", i) # Send a bunch of info messages for i in range(3): - logger.info("info %s" % (i,)) + logger.info("info %s", i) # The last debug message pushes it past the maximum buffer logger.debug("too much debug") @@ -152,7 +152,7 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): # Send a bunch of useful messages for i in range(20): - logger.warning("warn %s" % (i,)) + logger.warning("warn %s", i) # Allow the reconnection client, server = connect_logging_client(self.reactor, 0)