mirror of
				https://github.com/element-hq/synapse.git
				synced 2025-11-04 00:01:22 -05:00 
			
		
		
		
	Enable flake8-logging and flake8-logging-format rules in Ruff and fix related issues throughout the codebase (#18542)
				
					
				
			This can be reviewed commit by commit. This enables the `flake8-logging` and `flake8-logging-format` rules in Ruff, as well as logging exception stack traces in a few places where it makes sense - https://docs.astral.sh/ruff/rules/#flake8-logging-log - https://docs.astral.sh/ruff/rules/#flake8-logging-format-g ### Linting to avoid pre-formatting log messages See [`adamchainz/flake8-logging` -> *LOG011 avoid pre-formatting log messages*](152db2f167/README.rst (log011-avoid-pre-formatting-log-messages)) Practically, this means prefer placeholders (`%s`) over f-strings for logging. This is because placeholders are passed as args to loggers, so they can do special handling of them. For example, Sentry will record the args separately in their logging integration:c15b390dfe/sentry_sdk/integrations/logging.py (L280-L284)One theoretical small perf benefit is that log levels that aren't enabled won't get formatted, so it doesn't unnecessarily create formatted strings
This commit is contained in:
		
							parent
							
								
									e4ca593eb6
								
							
						
					
					
						commit
						0de7aa9953
					
				
							
								
								
									
										1
									
								
								changelog.d/18542.misc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								changelog.d/18542.misc
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
Enable [`flake8-logging`](https://docs.astral.sh/ruff/rules/#flake8-logging-log) and [`flake8-logging-format`](https://docs.astral.sh/ruff/rules/#flake8-logging-format-g) rules in Ruff and fix related issues throughout the codebase.
 | 
			
		||||
@ -74,6 +74,10 @@ select = [
 | 
			
		||||
    "PIE",
 | 
			
		||||
    # flake8-executable
 | 
			
		||||
    "EXE",
 | 
			
		||||
    # flake8-logging
 | 
			
		||||
    "LOG",
 | 
			
		||||
    # flake8-logging-format
 | 
			
		||||
    "G",
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
[tool.ruff.lint.isort]
 | 
			
		||||
 | 
			
		||||
@ -243,7 +243,7 @@ def do_lint() -> Set[str]:
 | 
			
		||||
                importlib.import_module(module_info.name)
 | 
			
		||||
            except ModelCheckerException as e:
 | 
			
		||||
                logger.warning(
 | 
			
		||||
                    f"Bad annotation found when importing {module_info.name}"
 | 
			
		||||
                    "Bad annotation found when importing %s", module_info.name
 | 
			
		||||
                )
 | 
			
		||||
                failures.add(format_model_checker_exception(e))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -475,7 +475,7 @@ class MSC3861DelegatedAuth(BaseAuth):
 | 
			
		||||
            # XXX: This is a temporary solution so that the admin API can be called by
 | 
			
		||||
            # the OIDC provider. This will be removed once we have OIDC client
 | 
			
		||||
            # credentials grant support in matrix-authentication-service.
 | 
			
		||||
            logging.info("Admin toked used")
 | 
			
		||||
            logger.info("Admin toked used")
 | 
			
		||||
            # XXX: that user doesn't exist and won't be provisioned.
 | 
			
		||||
            # This is mostly fine for admin calls, but we should also think about doing
 | 
			
		||||
            # requesters without a user_id.
 | 
			
		||||
 | 
			
		||||
@ -445,8 +445,8 @@ def listen_http(
 | 
			
		||||
        # getHost() returns a UNIXAddress which contains an instance variable of 'name'
 | 
			
		||||
        # encoded as a byte string. Decode as utf-8 so pretty.
 | 
			
		||||
        logger.info(
 | 
			
		||||
            "Synapse now listening on Unix Socket at: "
 | 
			
		||||
            f"{ports[0].getHost().name.decode('utf-8')}"
 | 
			
		||||
            "Synapse now listening on Unix Socket at: %s",
 | 
			
		||||
            ports[0].getHost().name.decode("utf-8"),
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    return ports
 | 
			
		||||
 | 
			
		||||
@ -173,7 +173,7 @@ async def phone_stats_home(
 | 
			
		||||
    stats["log_level"] = logging.getLevelName(log_level)
 | 
			
		||||
 | 
			
		||||
    logger.info(
 | 
			
		||||
        "Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
 | 
			
		||||
        "Reporting stats to %s: %s", hs.config.metrics.report_stats_endpoint, stats
 | 
			
		||||
    )
 | 
			
		||||
    try:
 | 
			
		||||
        await hs.get_proxied_http_client().put_json(
 | 
			
		||||
 | 
			
		||||
@ -461,7 +461,7 @@ class _TransactionController:
 | 
			
		||||
        recoverer = self.recoverers.get(service.id)
 | 
			
		||||
        if not recoverer:
 | 
			
		||||
            # No need to force a retry on a happy AS.
 | 
			
		||||
            logger.info(f"{service.id} is not in recovery, not forcing retry")
 | 
			
		||||
            logger.info("%s is not in recovery, not forcing retry", service.id)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        recoverer.force_retry()
 | 
			
		||||
 | 
			
		||||
@ -51,6 +51,8 @@ if TYPE_CHECKING:
 | 
			
		||||
    from synapse.config.homeserver import HomeServerConfig
 | 
			
		||||
    from synapse.server import HomeServer
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
DEFAULT_LOG_CONFIG = Template(
 | 
			
		||||
    """\
 | 
			
		||||
# Log configuration for Synapse.
 | 
			
		||||
@ -291,7 +293,7 @@ def _load_logging_config(log_config_path: str) -> None:
 | 
			
		||||
        log_config = yaml.safe_load(f.read())
 | 
			
		||||
 | 
			
		||||
    if not log_config:
 | 
			
		||||
        logging.warning("Loaded a blank logging config?")
 | 
			
		||||
        logger.warning("Loaded a blank logging config?")
 | 
			
		||||
 | 
			
		||||
    # If the old structured logging configuration is being used, raise an error.
 | 
			
		||||
    if "structured" in log_config and log_config.get("structured"):
 | 
			
		||||
@ -312,7 +314,7 @@ def _reload_logging_config(log_config_path: Optional[str]) -> None:
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    _load_logging_config(log_config_path)
 | 
			
		||||
    logging.info("Reloaded log config from %s due to SIGHUP", log_config_path)
 | 
			
		||||
    logger.info("Reloaded log config from %s due to SIGHUP", log_config_path)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def setup_logging(
 | 
			
		||||
@ -349,17 +351,17 @@ def setup_logging(
 | 
			
		||||
    appbase.register_sighup(_reload_logging_config, log_config_path)
 | 
			
		||||
 | 
			
		||||
    # Log immediately so we can grep backwards.
 | 
			
		||||
    logging.warning("***** STARTING SERVER *****")
 | 
			
		||||
    logging.warning(
 | 
			
		||||
    logger.warning("***** STARTING SERVER *****")
 | 
			
		||||
    logger.warning(
 | 
			
		||||
        "Server %s version %s",
 | 
			
		||||
        sys.argv[0],
 | 
			
		||||
        SYNAPSE_VERSION,
 | 
			
		||||
    )
 | 
			
		||||
    logging.warning("Copyright (c) 2023 New Vector, Inc")
 | 
			
		||||
    logging.warning(
 | 
			
		||||
    logger.warning("Copyright (c) 2023 New Vector, Inc")
 | 
			
		||||
    logger.warning(
 | 
			
		||||
        "Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse"
 | 
			
		||||
    )
 | 
			
		||||
    logging.info("Server hostname: %s", config.server.server_name)
 | 
			
		||||
    logging.info("Public Base URL: %s", config.server.public_baseurl)
 | 
			
		||||
    logging.info("Instance name: %s", hs.get_instance_name())
 | 
			
		||||
    logging.info("Twisted reactor: %s", type(reactor).__name__)
 | 
			
		||||
    logger.info("Server hostname: %s", config.server.server_name)
 | 
			
		||||
    logger.info("Public Base URL: %s", config.server.public_baseurl)
 | 
			
		||||
    logger.info("Instance name: %s", hs.get_instance_name())
 | 
			
		||||
    logger.info("Twisted reactor: %s", type(reactor).__name__)
 | 
			
		||||
 | 
			
		||||
@ -27,7 +27,7 @@ from synapse.types import JsonDict
 | 
			
		||||
 | 
			
		||||
from ._base import Config, ConfigError
 | 
			
		||||
 | 
			
		||||
logger = logging.Logger(__name__)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RoomDefaultEncryptionTypes:
 | 
			
		||||
 | 
			
		||||
@ -41,7 +41,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
 | 
			
		||||
from ._base import Config, ConfigError
 | 
			
		||||
from ._util import validate_config
 | 
			
		||||
 | 
			
		||||
logger = logging.Logger(__name__)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
DIRECT_TCP_ERROR = """
 | 
			
		||||
Using direct TCP replication for workers is no longer supported.
 | 
			
		||||
 | 
			
		||||
@ -195,15 +195,18 @@ class InviteAutoAccepter:
 | 
			
		||||
            except SynapseError as e:
 | 
			
		||||
                if e.code == HTTPStatus.FORBIDDEN:
 | 
			
		||||
                    logger.debug(
 | 
			
		||||
                        f"Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: {e}"
 | 
			
		||||
                        "Update_room_membership was forbidden. This can sometimes be expected for remote invites. Exception: %s",
 | 
			
		||||
                        e,
 | 
			
		||||
                    )
 | 
			
		||||
                else:
 | 
			
		||||
                    logger.warn(
 | 
			
		||||
                        f"Update_room_membership raised the following unexpected (SynapseError) exception: {e}"
 | 
			
		||||
                    logger.warning(
 | 
			
		||||
                        "Update_room_membership raised the following unexpected (SynapseError) exception: %s",
 | 
			
		||||
                        e,
 | 
			
		||||
                    )
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                logger.warn(
 | 
			
		||||
                    f"Update_room_membership raised the following unexpected exception: {e}"
 | 
			
		||||
                logger.warning(
 | 
			
		||||
                    "Update_room_membership raised the following unexpected exception: %s",
 | 
			
		||||
                    e,
 | 
			
		||||
                )
 | 
			
		||||
 | 
			
		||||
            sleep = 2**retries
 | 
			
		||||
 | 
			
		||||
@ -1818,7 +1818,7 @@ class FederationClient(FederationBase):
 | 
			
		||||
            )
 | 
			
		||||
            return timestamp_to_event_response
 | 
			
		||||
        except SynapseError as e:
 | 
			
		||||
            logger.warn(
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
 | 
			
		||||
                room_id,
 | 
			
		||||
                timestamp,
 | 
			
		||||
 | 
			
		||||
@ -928,7 +928,8 @@ class FederationServer(FederationBase):
 | 
			
		||||
            # joins) or the full state (for full joins).
 | 
			
		||||
            # Return a 404 as we would if we weren't in the room at all.
 | 
			
		||||
            logger.info(
 | 
			
		||||
                f"Rejecting /send_{membership_type} to %s because it's a partial state room",
 | 
			
		||||
                "Rejecting /send_%s to %s because it's a partial state room",
 | 
			
		||||
                membership_type,
 | 
			
		||||
                room_id,
 | 
			
		||||
            )
 | 
			
		||||
            raise SynapseError(
 | 
			
		||||
 | 
			
		||||
@ -495,7 +495,7 @@ class AdminHandler:
 | 
			
		||||
                    )
 | 
			
		||||
                except Exception as ex:
 | 
			
		||||
                    logger.info(
 | 
			
		||||
                        f"Redaction of event {event.event_id} failed due to: {ex}"
 | 
			
		||||
                        "Redaction of event %s failed due to: %s", event.event_id, ex
 | 
			
		||||
                    )
 | 
			
		||||
                    result["failed_redactions"][event.event_id] = str(ex)
 | 
			
		||||
                    await self._task_scheduler.update_task(task.id, result=result)
 | 
			
		||||
 | 
			
		||||
@ -465,9 +465,7 @@ class ApplicationServicesHandler:
 | 
			
		||||
            service, "read_receipt"
 | 
			
		||||
        )
 | 
			
		||||
        if new_token is not None and new_token.stream <= from_key:
 | 
			
		||||
            logger.debug(
 | 
			
		||||
                "Rejecting token lower than or equal to stored: %s" % (new_token,)
 | 
			
		||||
            )
 | 
			
		||||
            logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
 | 
			
		||||
            return []
 | 
			
		||||
 | 
			
		||||
        from_token = MultiWriterStreamToken(stream=from_key)
 | 
			
		||||
@ -509,9 +507,7 @@ class ApplicationServicesHandler:
 | 
			
		||||
            service, "presence"
 | 
			
		||||
        )
 | 
			
		||||
        if new_token is not None and new_token <= from_key:
 | 
			
		||||
            logger.debug(
 | 
			
		||||
                "Rejecting token lower than or equal to stored: %s" % (new_token,)
 | 
			
		||||
            )
 | 
			
		||||
            logger.debug("Rejecting token lower than or equal to stored: %s", new_token)
 | 
			
		||||
            return []
 | 
			
		||||
 | 
			
		||||
        for user in users:
 | 
			
		||||
 | 
			
		||||
@ -1895,7 +1895,7 @@ def load_single_legacy_password_auth_provider(
 | 
			
		||||
    try:
 | 
			
		||||
        provider = module(config=config, account_handler=api)
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        logger.error("Error while initializing %r: %s", module, e)
 | 
			
		||||
        logger.exception("Error while initializing %r: %s", module, e)
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
    # All methods that the module provides should be async, but this wasn't enforced
 | 
			
		||||
@ -2428,7 +2428,7 @@ class PasswordAuthProvider:
 | 
			
		||||
            except CancelledError:
 | 
			
		||||
                raise
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                logger.error("Module raised an exception in is_3pid_allowed: %s", e)
 | 
			
		||||
                logger.exception("Module raised an exception in is_3pid_allowed: %s", e)
 | 
			
		||||
                raise SynapseError(code=500, msg="Internal Server Error")
 | 
			
		||||
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
@ -1600,7 +1600,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
 | 
			
		||||
            if prev_stream_id is not None and cached_devices == {
 | 
			
		||||
                d["device_id"]: d for d in devices
 | 
			
		||||
            }:
 | 
			
		||||
                logging.info(
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "Skipping device list resync for %s, as our cache matches already",
 | 
			
		||||
                    user_id,
 | 
			
		||||
                )
 | 
			
		||||
 | 
			
		||||
@ -282,7 +282,7 @@ class DirectoryHandler:
 | 
			
		||||
            except RequestSendFailed:
 | 
			
		||||
                raise SynapseError(502, "Failed to fetch alias")
 | 
			
		||||
            except CodeMessageException as e:
 | 
			
		||||
                logging.warning(
 | 
			
		||||
                logger.warning(
 | 
			
		||||
                    "Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
 | 
			
		||||
                )
 | 
			
		||||
                if e.code == 404:
 | 
			
		||||
 | 
			
		||||
@ -1095,7 +1095,9 @@ class FederationHandler:
 | 
			
		||||
        rule = invite_config.get_invite_rule(event.sender)
 | 
			
		||||
        if rule == InviteRule.BLOCK:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                f"Automatically rejecting invite from {event.sender} due to the invite filtering rules of {event.state_key}"
 | 
			
		||||
                "Automatically rejecting invite from %s due to the invite filtering rules of %s",
 | 
			
		||||
                event.sender,
 | 
			
		||||
                event.state_key,
 | 
			
		||||
            )
 | 
			
		||||
            raise SynapseError(
 | 
			
		||||
                403,
 | 
			
		||||
 | 
			
		||||
@ -218,7 +218,7 @@ class IdentityHandler:
 | 
			
		||||
 | 
			
		||||
            return data
 | 
			
		||||
        except HttpResponseException as e:
 | 
			
		||||
            logger.error("3PID bind failed with Matrix error: %r", e)
 | 
			
		||||
            logger.exception("3PID bind failed with Matrix error: %r", e)
 | 
			
		||||
            raise e.to_synapse_error()
 | 
			
		||||
        except RequestTimedOutError:
 | 
			
		||||
            raise SynapseError(500, "Timed out contacting identity server")
 | 
			
		||||
@ -323,7 +323,7 @@ class IdentityHandler:
 | 
			
		||||
                # The remote server probably doesn't support unbinding (yet)
 | 
			
		||||
                logger.warning("Received %d response while unbinding threepid", e.code)
 | 
			
		||||
            else:
 | 
			
		||||
                logger.error("Failed to unbind threepid on identity server: %s", e)
 | 
			
		||||
                logger.exception("Failed to unbind threepid on identity server: %s", e)
 | 
			
		||||
                raise SynapseError(500, "Failed to contact identity server")
 | 
			
		||||
        except RequestTimedOutError:
 | 
			
		||||
            raise SynapseError(500, "Timed out contacting identity server")
 | 
			
		||||
 | 
			
		||||
@ -460,7 +460,7 @@ class MessageHandler:
 | 
			
		||||
            # date from the database in the same database transaction.
 | 
			
		||||
            await self.store.expire_event(event_id)
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            logger.error("Could not expire event %s: %r", event_id, e)
 | 
			
		||||
            logger.exception("Could not expire event %s: %r", event_id, e)
 | 
			
		||||
 | 
			
		||||
        # Schedule the expiry of the next event to expire.
 | 
			
		||||
        await self._schedule_next_expiry()
 | 
			
		||||
@ -2061,7 +2061,8 @@ class EventCreationHandler:
 | 
			
		||||
                # dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "Failed to send dummy event into room %s. Will exclude it from "
 | 
			
		||||
                    "future attempts until cache expires" % (room_id,)
 | 
			
		||||
                    "future attempts until cache expires",
 | 
			
		||||
                    room_id,
 | 
			
		||||
                )
 | 
			
		||||
                now = self.clock.time_msec()
 | 
			
		||||
                self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
 | 
			
		||||
@ -2120,7 +2121,9 @@ class EventCreationHandler:
 | 
			
		||||
            except AuthError:
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "Failed to send dummy event into room %s for user %s due to "
 | 
			
		||||
                    "lack of power. Will try another user" % (room_id, user_id)
 | 
			
		||||
                    "lack of power. Will try another user",
 | 
			
		||||
                    room_id,
 | 
			
		||||
                    user_id,
 | 
			
		||||
                )
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -563,12 +563,13 @@ class OidcProvider:
 | 
			
		||||
                        raise ValueError("Unexpected subject")
 | 
			
		||||
                except Exception:
 | 
			
		||||
                    logger.warning(
 | 
			
		||||
                        f"OIDC Back-Channel Logout is enabled for issuer {self.issuer!r} "
 | 
			
		||||
                        "OIDC Back-Channel Logout is enabled for issuer %r "
 | 
			
		||||
                        "but it looks like the configured `user_mapping_provider` "
 | 
			
		||||
                        "does not use the `sub` claim as subject. If it is the case, "
 | 
			
		||||
                        "and you want Synapse to ignore the `sub` claim in OIDC "
 | 
			
		||||
                        "Back-Channel Logouts, set `backchannel_logout_ignore_sub` "
 | 
			
		||||
                        "to `true` in the issuer config."
 | 
			
		||||
                        "to `true` in the issuer config.",
 | 
			
		||||
                        self.issuer,
 | 
			
		||||
                    )
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
@ -826,10 +827,10 @@ class OidcProvider:
 | 
			
		||||
            if response.code < 400:
 | 
			
		||||
                logger.debug(
 | 
			
		||||
                    "Invalid response from the authorization server: "
 | 
			
		||||
                    'responded with a "{status}" '
 | 
			
		||||
                    "but body has an error field: {error!r}".format(
 | 
			
		||||
                        status=status, error=resp["error"]
 | 
			
		||||
                    )
 | 
			
		||||
                    'responded with a "%s" '
 | 
			
		||||
                    "but body has an error field: %r",
 | 
			
		||||
                    status,
 | 
			
		||||
                    resp["error"],
 | 
			
		||||
                )
 | 
			
		||||
 | 
			
		||||
            description = resp.get("error_description", error)
 | 
			
		||||
@ -1385,7 +1386,8 @@ class OidcProvider:
 | 
			
		||||
        # support dynamic registration in Synapse at some point.
 | 
			
		||||
        if not self._config.backchannel_logout_enabled:
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                f"Received an OIDC Back-Channel Logout request from issuer {self.issuer!r} but it is disabled in config"
 | 
			
		||||
                "Received an OIDC Back-Channel Logout request from issuer %r but it is disabled in config",
 | 
			
		||||
                self.issuer,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            # TODO: this responds with a 400 status code, which is what the OIDC
 | 
			
		||||
@ -1797,5 +1799,5 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]):
 | 
			
		||||
                extras[key] = template.render(user=userinfo).strip()
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                # Log an error and skip this value (don't break login for this).
 | 
			
		||||
                logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e))
 | 
			
		||||
                logger.exception("Failed to render OIDC extra attribute %s: %s", key, e)
 | 
			
		||||
        return extras
 | 
			
		||||
 | 
			
		||||
@ -506,7 +506,7 @@ class RegistrationHandler:
 | 
			
		||||
                            ratelimit=False,
 | 
			
		||||
                        )
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                logger.error("Failed to join new user to %r: %r", r, e)
 | 
			
		||||
                logger.exception("Failed to join new user to %r: %r", r, e)
 | 
			
		||||
 | 
			
		||||
    async def _join_rooms(self, user_id: str) -> None:
 | 
			
		||||
        """
 | 
			
		||||
@ -596,7 +596,7 @@ class RegistrationHandler:
 | 
			
		||||
                # moving away from bare excepts is a good thing to do.
 | 
			
		||||
                logger.error("Failed to join new user to %r: %r", r, e)
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                logger.error("Failed to join new user to %r: %r", r, e, exc_info=True)
 | 
			
		||||
                logger.exception("Failed to join new user to %r: %r", r, e)
 | 
			
		||||
 | 
			
		||||
    async def _auto_join_rooms(self, user_id: str) -> None:
 | 
			
		||||
        """Automatically joins users to auto join rooms - creating the room in the first place
 | 
			
		||||
 | 
			
		||||
@ -698,7 +698,7 @@ class RoomCreationHandler:
 | 
			
		||||
        except SynapseError as e:
 | 
			
		||||
            # again I'm not really expecting this to fail, but if it does, I'd rather
 | 
			
		||||
            # we returned the new room to the client at this point.
 | 
			
		||||
            logger.error("Unable to send updated alias events in old room: %s", e)
 | 
			
		||||
            logger.exception("Unable to send updated alias events in old room: %s", e)
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            await self.event_creation_handler.create_and_send_nonmember_event(
 | 
			
		||||
@ -715,7 +715,7 @@ class RoomCreationHandler:
 | 
			
		||||
        except SynapseError as e:
 | 
			
		||||
            # again I'm not really expecting this to fail, but if it does, I'd rather
 | 
			
		||||
            # we returned the new room to the client at this point.
 | 
			
		||||
            logger.error("Unable to send updated alias events in new room: %s", e)
 | 
			
		||||
            logger.exception("Unable to send updated alias events in new room: %s", e)
 | 
			
		||||
 | 
			
		||||
    async def create_room(
 | 
			
		||||
        self,
 | 
			
		||||
 | 
			
		||||
@ -922,7 +922,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
 | 
			
		||||
                rule = invite_config.get_invite_rule(requester.user.to_string())
 | 
			
		||||
                if rule == InviteRule.BLOCK:
 | 
			
		||||
                    logger.info(
 | 
			
		||||
                        f"Automatically rejecting invite from {target_id} due to the the invite filtering rules of {requester.user}"
 | 
			
		||||
                        "Automatically rejecting invite from %s due to the the invite filtering rules of %s",
 | 
			
		||||
                        target_id,
 | 
			
		||||
                        requester.user,
 | 
			
		||||
                    )
 | 
			
		||||
                    raise SynapseError(
 | 
			
		||||
                        403,
 | 
			
		||||
@ -1570,7 +1572,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
 | 
			
		||||
                    require_consent=False,
 | 
			
		||||
                )
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                logger.exception("Error kicking guest user: %s" % (e,))
 | 
			
		||||
                logger.exception("Error kicking guest user: %s", e)
 | 
			
		||||
 | 
			
		||||
    async def lookup_room_alias(
 | 
			
		||||
        self, room_alias: RoomAlias
 | 
			
		||||
 | 
			
		||||
@ -124,7 +124,7 @@ class SamlHandler:
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        # Since SAML sessions timeout it is useful to log when they were created.
 | 
			
		||||
        logger.info("Initiating a new SAML session: %s" % (reqid,))
 | 
			
		||||
        logger.info("Initiating a new SAML session: %s", reqid)
 | 
			
		||||
 | 
			
		||||
        now = self.clock.time_msec()
 | 
			
		||||
        self._outstanding_requests_dict[reqid] = Saml2SessionData(
 | 
			
		||||
 | 
			
		||||
@ -238,7 +238,7 @@ class SendEmailHandler:
 | 
			
		||||
        multipart_msg.attach(text_part)
 | 
			
		||||
        multipart_msg.attach(html_part)
 | 
			
		||||
 | 
			
		||||
        logger.info("Sending email to %s" % email_address)
 | 
			
		||||
        logger.info("Sending email to %s", email_address)
 | 
			
		||||
 | 
			
		||||
        await self._sendmail(
 | 
			
		||||
            self._reactor,
 | 
			
		||||
 | 
			
		||||
@ -1230,12 +1230,16 @@ class SsoHandler:
 | 
			
		||||
            if expected_user_id is not None and user_id != expected_user_id:
 | 
			
		||||
                logger.error(
 | 
			
		||||
                    "Received a logout notification from SSO provider "
 | 
			
		||||
                    f"{auth_provider_id!r} for the user {expected_user_id!r}, but with "
 | 
			
		||||
                    f"a session ID ({auth_provider_session_id!r}) which belongs to "
 | 
			
		||||
                    f"{user_id!r}. This may happen when the SSO provider user mapper "
 | 
			
		||||
                    "%r for the user %r, but with "
 | 
			
		||||
                    "a session ID (%r) which belongs to "
 | 
			
		||||
                    "%r. This may happen when the SSO provider user mapper "
 | 
			
		||||
                    "uses something else than the standard attribute as mapping ID. "
 | 
			
		||||
                    "For OIDC providers, set `backchannel_logout_ignore_sub` to `true` "
 | 
			
		||||
                    "in the provider config if that is the case."
 | 
			
		||||
                    "in the provider config if that is the case.",
 | 
			
		||||
                    auth_provider_id,
 | 
			
		||||
                    expected_user_id,
 | 
			
		||||
                    auth_provider_session_id,
 | 
			
		||||
                    user_id,
 | 
			
		||||
                )
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -3074,8 +3074,10 @@ class SyncHandler:
 | 
			
		||||
                if batch.limited and since_token:
 | 
			
		||||
                    user_id = sync_result_builder.sync_config.user.to_string()
 | 
			
		||||
                    logger.debug(
 | 
			
		||||
                        "Incremental gappy sync of %s for user %s with %d state events"
 | 
			
		||||
                        % (room_id, user_id, len(state))
 | 
			
		||||
                        "Incremental gappy sync of %s for user %s with %d state events",
 | 
			
		||||
                        room_id,
 | 
			
		||||
                        user_id,
 | 
			
		||||
                        len(state),
 | 
			
		||||
                    )
 | 
			
		||||
            elif room_builder.rtype == "archived":
 | 
			
		||||
                archived_room_sync = ArchivedSyncResult(
 | 
			
		||||
 | 
			
		||||
@ -749,10 +749,9 @@ class UserDirectoryHandler(StateDeltasHandler):
 | 
			
		||||
                    )
 | 
			
		||||
                    continue
 | 
			
		||||
                except Exception:
 | 
			
		||||
                    logger.error(
 | 
			
		||||
                    logger.exception(
 | 
			
		||||
                        "Failed to refresh profile for %r due to unhandled exception",
 | 
			
		||||
                        user_id,
 | 
			
		||||
                        exc_info=True,
 | 
			
		||||
                    )
 | 
			
		||||
                    await self.store.set_remote_user_profile_in_user_dir_stale(
 | 
			
		||||
                        user_id,
 | 
			
		||||
 | 
			
		||||
@ -50,6 +50,8 @@ if TYPE_CHECKING:
 | 
			
		||||
    from synapse.server import HomeServer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# This lock is used to avoid creating an event while we are purging the room.
 | 
			
		||||
# We take a read lock when creating an event, and a write one when purging a room.
 | 
			
		||||
# This is because it is fine to create several events concurrently, since referenced events
 | 
			
		||||
@ -271,8 +273,9 @@ class WaitingLock:
 | 
			
		||||
        next = self._retry_interval
 | 
			
		||||
        self._retry_interval = max(5, next * 2)
 | 
			
		||||
        if self._retry_interval > 5 * 2 ^ 7:  # ~10 minutes
 | 
			
		||||
            logging.warning(
 | 
			
		||||
                f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock."
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Lock timeout is getting excessive: %ss. There may be a deadlock.",
 | 
			
		||||
                self._retry_interval,
 | 
			
		||||
            )
 | 
			
		||||
        return next * random.uniform(0.9, 1.1)
 | 
			
		||||
 | 
			
		||||
@ -350,7 +353,8 @@ class WaitingMultiLock:
 | 
			
		||||
        next = self._retry_interval
 | 
			
		||||
        self._retry_interval = max(5, next * 2)
 | 
			
		||||
        if self._retry_interval > 5 * 2 ^ 7:  # ~10 minutes
 | 
			
		||||
            logging.warning(
 | 
			
		||||
                f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock."
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Lock timeout is getting excessive: %ss. There may be a deadlock.",
 | 
			
		||||
                self._retry_interval,
 | 
			
		||||
            )
 | 
			
		||||
        return next * random.uniform(0.9, 1.1)
 | 
			
		||||
 | 
			
		||||
@ -213,7 +213,7 @@ class _IPBlockingResolver:
 | 
			
		||||
 | 
			
		||||
                if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
 | 
			
		||||
                    logger.info(
 | 
			
		||||
                        "Blocked %s from DNS resolution to %s" % (ip_address, hostname)
 | 
			
		||||
                        "Blocked %s from DNS resolution to %s", ip_address, hostname
 | 
			
		||||
                    )
 | 
			
		||||
                    has_bad_ip = True
 | 
			
		||||
 | 
			
		||||
@ -318,7 +318,7 @@ class BlocklistingAgentWrapper(Agent):
 | 
			
		||||
            pass
 | 
			
		||||
        else:
 | 
			
		||||
            if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
 | 
			
		||||
                logger.info("Blocking access to %s" % (ip_address,))
 | 
			
		||||
                logger.info("Blocking access to %s", ip_address)
 | 
			
		||||
                e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked")
 | 
			
		||||
                return defer.fail(Failure(e))
 | 
			
		||||
 | 
			
		||||
@ -723,7 +723,7 @@ class BaseHttpClient:
 | 
			
		||||
        resp_headers = dict(response.headers.getAllRawHeaders())
 | 
			
		||||
 | 
			
		||||
        if response.code > 299:
 | 
			
		||||
            logger.warning("Got %d when downloading %s" % (response.code, url))
 | 
			
		||||
            logger.warning("Got %d when downloading %s", response.code, url)
 | 
			
		||||
            raise SynapseError(
 | 
			
		||||
                HTTPStatus.BAD_GATEWAY, "Got error %d" % (response.code,), Codes.UNKNOWN
 | 
			
		||||
            )
 | 
			
		||||
@ -1106,7 +1106,7 @@ class _MultipartParserProtocol(protocol.Protocol):
 | 
			
		||||
                        self.stream.write(data[start:end])
 | 
			
		||||
                    except Exception as e:
 | 
			
		||||
                        logger.warning(
 | 
			
		||||
                            f"Exception encountered writing file data to stream: {e}"
 | 
			
		||||
                            "Exception encountered writing file data to stream: %s", e
 | 
			
		||||
                        )
 | 
			
		||||
                        self.deferred.errback()
 | 
			
		||||
                    self.file_length += end - start
 | 
			
		||||
@ -1129,7 +1129,7 @@ class _MultipartParserProtocol(protocol.Protocol):
 | 
			
		||||
        try:
 | 
			
		||||
            self.parser.write(incoming_data)
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            logger.warning(f"Exception writing to multipart parser: {e}")
 | 
			
		||||
            logger.warning("Exception writing to multipart parser: %s", e)
 | 
			
		||||
            self.deferred.errback()
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -602,7 +602,7 @@ class MatrixFederationHttpClient:
 | 
			
		||||
        try:
 | 
			
		||||
            parse_and_validate_server_name(request.destination)
 | 
			
		||||
        except ValueError:
 | 
			
		||||
            logger.exception(f"Invalid destination: {request.destination}.")
 | 
			
		||||
            logger.exception("Invalid destination: %s.", request.destination)
 | 
			
		||||
            raise FederationDeniedError(request.destination)
 | 
			
		||||
 | 
			
		||||
        if timeout is not None:
 | 
			
		||||
 | 
			
		||||
@ -313,7 +313,7 @@ class MediaRepository:
 | 
			
		||||
        logger.info("Stored local media in file %r", fname)
 | 
			
		||||
 | 
			
		||||
        if should_quarantine:
 | 
			
		||||
            logger.warn(
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Media has been automatically quarantined as it matched existing quarantined media"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
@ -366,7 +366,7 @@ class MediaRepository:
 | 
			
		||||
        logger.info("Stored local media in file %r", fname)
 | 
			
		||||
 | 
			
		||||
        if should_quarantine:
 | 
			
		||||
            logger.warn(
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Media has been automatically quarantined as it matched existing quarantined media"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
@ -1393,8 +1393,8 @@ class MediaRepository:
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Purging remote media last accessed before"
 | 
			
		||||
                f" {remote_media_threshold_timestamp_ms}"
 | 
			
		||||
                "Purging remote media last accessed before %s",
 | 
			
		||||
                remote_media_threshold_timestamp_ms,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            await self.delete_old_remote_media(
 | 
			
		||||
@ -1409,8 +1409,8 @@ class MediaRepository:
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Purging local media last accessed before"
 | 
			
		||||
                f" {local_media_threshold_timestamp_ms}"
 | 
			
		||||
                "Purging local media last accessed before %s",
 | 
			
		||||
                local_media_threshold_timestamp_ms,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            await self.delete_old_local_media(
 | 
			
		||||
 | 
			
		||||
@ -287,7 +287,7 @@ class UrlPreviewer:
 | 
			
		||||
                og["og:image:width"] = dims["width"]
 | 
			
		||||
                og["og:image:height"] = dims["height"]
 | 
			
		||||
            else:
 | 
			
		||||
                logger.warning("Couldn't get dims for %s" % url)
 | 
			
		||||
                logger.warning("Couldn't get dims for %s", url)
 | 
			
		||||
 | 
			
		||||
            # define our OG response for this media
 | 
			
		||||
        elif _is_html(media_info.media_type):
 | 
			
		||||
@ -609,7 +609,7 @@ class UrlPreviewer:
 | 
			
		||||
            should_quarantine = await self.store.get_is_hash_quarantined(sha256)
 | 
			
		||||
 | 
			
		||||
            if should_quarantine:
 | 
			
		||||
                logger.warn(
 | 
			
		||||
                logger.warning(
 | 
			
		||||
                    "Media has been automatically quarantined as it matched existing quarantined media"
 | 
			
		||||
                )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -118,7 +118,7 @@ class LaterGauge(Collector):
 | 
			
		||||
 | 
			
		||||
    def _register(self) -> None:
 | 
			
		||||
        if self.name in all_gauges.keys():
 | 
			
		||||
            logger.warning("%s already registered, reregistering" % (self.name,))
 | 
			
		||||
            logger.warning("%s already registered, reregistering", self.name)
 | 
			
		||||
            REGISTRY.unregister(all_gauges.pop(self.name))
 | 
			
		||||
 | 
			
		||||
        REGISTRY.register(self)
 | 
			
		||||
@ -244,7 +244,7 @@ class InFlightGauge(Generic[MetricsEntry], Collector):
 | 
			
		||||
 | 
			
		||||
    def _register_with_collector(self) -> None:
 | 
			
		||||
        if self.name in all_gauges.keys():
 | 
			
		||||
            logger.warning("%s already registered, reregistering" % (self.name,))
 | 
			
		||||
            logger.warning("%s already registered, reregistering", self.name)
 | 
			
		||||
            REGISTRY.unregister(all_gauges.pop(self.name))
 | 
			
		||||
 | 
			
		||||
        REGISTRY.register(self)
 | 
			
		||||
 | 
			
		||||
@ -135,7 +135,7 @@ class Mailer:
 | 
			
		||||
        self.app_name = app_name
 | 
			
		||||
        self.email_subjects: EmailSubjectConfig = hs.config.email.email_subjects
 | 
			
		||||
 | 
			
		||||
        logger.info("Created Mailer for app_name %s" % app_name)
 | 
			
		||||
        logger.info("Created Mailer for app_name %s", app_name)
 | 
			
		||||
 | 
			
		||||
    emails_sent_counter.labels("password_reset")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -165,7 +165,7 @@ class ClientRestResource(JsonResource):
 | 
			
		||||
            # Fail on unknown servlet groups.
 | 
			
		||||
            if servlet_group not in SERVLET_GROUPS:
 | 
			
		||||
                if servlet_group == "media":
 | 
			
		||||
                    logger.warn(
 | 
			
		||||
                    logger.warning(
 | 
			
		||||
                        "media.can_load_media_repo needs to be configured for the media servlet to be available"
 | 
			
		||||
                    )
 | 
			
		||||
                raise RuntimeError(
 | 
			
		||||
 | 
			
		||||
@ -71,7 +71,7 @@ class QuarantineMediaInRoom(RestServlet):
 | 
			
		||||
        requester = await self.auth.get_user_by_req(request)
 | 
			
		||||
        await assert_user_is_admin(self.auth, requester)
 | 
			
		||||
 | 
			
		||||
        logging.info("Quarantining room: %s", room_id)
 | 
			
		||||
        logger.info("Quarantining room: %s", room_id)
 | 
			
		||||
 | 
			
		||||
        # Quarantine all media in this room
 | 
			
		||||
        num_quarantined = await self.store.quarantine_media_ids_in_room(
 | 
			
		||||
@ -98,7 +98,7 @@ class QuarantineMediaByUser(RestServlet):
 | 
			
		||||
        requester = await self.auth.get_user_by_req(request)
 | 
			
		||||
        await assert_user_is_admin(self.auth, requester)
 | 
			
		||||
 | 
			
		||||
        logging.info("Quarantining media by user: %s", user_id)
 | 
			
		||||
        logger.info("Quarantining media by user: %s", user_id)
 | 
			
		||||
 | 
			
		||||
        # Quarantine all media this user has uploaded
 | 
			
		||||
        num_quarantined = await self.store.quarantine_media_ids_by_user(
 | 
			
		||||
@ -127,7 +127,7 @@ class QuarantineMediaByID(RestServlet):
 | 
			
		||||
        requester = await self.auth.get_user_by_req(request)
 | 
			
		||||
        await assert_user_is_admin(self.auth, requester)
 | 
			
		||||
 | 
			
		||||
        logging.info("Quarantining media by ID: %s/%s", server_name, media_id)
 | 
			
		||||
        logger.info("Quarantining media by ID: %s/%s", server_name, media_id)
 | 
			
		||||
 | 
			
		||||
        # Quarantine this media id
 | 
			
		||||
        await self.store.quarantine_media_by_id(
 | 
			
		||||
@ -155,7 +155,7 @@ class UnquarantineMediaByID(RestServlet):
 | 
			
		||||
    ) -> Tuple[int, JsonDict]:
 | 
			
		||||
        await assert_requester_is_admin(self.auth, request)
 | 
			
		||||
 | 
			
		||||
        logging.info("Remove from quarantine media by ID: %s/%s", server_name, media_id)
 | 
			
		||||
        logger.info("Remove from quarantine media by ID: %s/%s", server_name, media_id)
 | 
			
		||||
 | 
			
		||||
        # Remove from quarantine this media id
 | 
			
		||||
        await self.store.quarantine_media_by_id(server_name, media_id, None)
 | 
			
		||||
@ -177,7 +177,7 @@ class ProtectMediaByID(RestServlet):
 | 
			
		||||
    ) -> Tuple[int, JsonDict]:
 | 
			
		||||
        await assert_requester_is_admin(self.auth, request)
 | 
			
		||||
 | 
			
		||||
        logging.info("Protecting local media by ID: %s", media_id)
 | 
			
		||||
        logger.info("Protecting local media by ID: %s", media_id)
 | 
			
		||||
 | 
			
		||||
        # Protect this media id
 | 
			
		||||
        await self.store.mark_local_media_as_safe(media_id, safe=True)
 | 
			
		||||
@ -199,7 +199,7 @@ class UnprotectMediaByID(RestServlet):
 | 
			
		||||
    ) -> Tuple[int, JsonDict]:
 | 
			
		||||
        await assert_requester_is_admin(self.auth, request)
 | 
			
		||||
 | 
			
		||||
        logging.info("Unprotecting local media by ID: %s", media_id)
 | 
			
		||||
        logger.info("Unprotecting local media by ID: %s", media_id)
 | 
			
		||||
 | 
			
		||||
        # Unprotect this media id
 | 
			
		||||
        await self.store.mark_local_media_as_safe(media_id, safe=False)
 | 
			
		||||
@ -280,7 +280,7 @@ class DeleteMediaByID(RestServlet):
 | 
			
		||||
        if await self.store.get_local_media(media_id) is None:
 | 
			
		||||
            raise NotFoundError("Unknown media")
 | 
			
		||||
 | 
			
		||||
        logging.info("Deleting local media by ID: %s", media_id)
 | 
			
		||||
        logger.info("Deleting local media by ID: %s", media_id)
 | 
			
		||||
 | 
			
		||||
        deleted_media, total = await self.media_repository.delete_local_media_ids(
 | 
			
		||||
            [media_id]
 | 
			
		||||
@ -327,9 +327,11 @@ class DeleteMediaByDateSize(RestServlet):
 | 
			
		||||
        if server_name is not None and self.server_name != server_name:
 | 
			
		||||
            raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media")
 | 
			
		||||
 | 
			
		||||
        logging.info(
 | 
			
		||||
            "Deleting local media by timestamp: %s, size larger than: %s, keep profile media: %s"
 | 
			
		||||
            % (before_ts, size_gt, keep_profiles)
 | 
			
		||||
        logger.info(
 | 
			
		||||
            "Deleting local media by timestamp: %s, size larger than: %s, keep profile media: %s",
 | 
			
		||||
            before_ts,
 | 
			
		||||
            size_gt,
 | 
			
		||||
            keep_profiles,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        deleted_media, total = await self.media_repository.delete_old_local_media(
 | 
			
		||||
 | 
			
		||||
@ -240,5 +240,5 @@ def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any:
 | 
			
		||||
    try:
 | 
			
		||||
        return json_decoder.decode(db_content)
 | 
			
		||||
    except Exception:
 | 
			
		||||
        logging.warning("Tried to decode '%r' as JSON and failed", db_content)
 | 
			
		||||
        logger.warning("Tried to decode '%r' as JSON and failed", db_content)
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
@ -331,7 +331,7 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore):
 | 
			
		||||
                        values={"timestamp": int(self._clock.time_msec())},
 | 
			
		||||
                    )
 | 
			
		||||
            else:
 | 
			
		||||
                logger.warning("mau limit reserved threepid %s not found in db" % tp)
 | 
			
		||||
                logger.warning("mau limit reserved threepid %s not found in db", tp)
 | 
			
		||||
 | 
			
		||||
    async def upsert_monthly_active_user(self, user_id: str) -> None:
 | 
			
		||||
        """Updates or inserts the user into the monthly active user table, which
 | 
			
		||||
 | 
			
		||||
@ -253,8 +253,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
 | 
			
		||||
            return 1
 | 
			
		||||
 | 
			
		||||
        logger.debug(
 | 
			
		||||
            "Processing the next %d rooms of %d remaining"
 | 
			
		||||
            % (len(rooms_to_work_on), progress["remaining"])
 | 
			
		||||
            "Processing the next %d rooms of %d remaining",
 | 
			
		||||
            len(rooms_to_work_on),
 | 
			
		||||
            progress["remaining"],
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        processed_event_count = 0
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,9 @@ class InviteRulesConfig:
 | 
			
		||||
                        except Exception as e:
 | 
			
		||||
                            # If for whatever reason we can't process this, just ignore it.
 | 
			
		||||
                            logger.debug(
 | 
			
		||||
                                f"Could not process '{value}' field of invite rule config, ignoring: {e}"
 | 
			
		||||
                                "Could not process '%s' field of invite rule config, ignoring: %s",
 | 
			
		||||
                                value,
 | 
			
		||||
                                e,
 | 
			
		||||
                            )
 | 
			
		||||
 | 
			
		||||
        if account_data:
 | 
			
		||||
 | 
			
		||||
@ -63,8 +63,11 @@ def run_upgrade(
 | 
			
		||||
                if user_id in owned.keys():
 | 
			
		||||
                    logger.error(
 | 
			
		||||
                        "user_id %s was owned by more than one application"
 | 
			
		||||
                        " service (IDs %s and %s); assigning arbitrarily to %s"
 | 
			
		||||
                        % (user_id, owned[user_id], appservice.id, owned[user_id])
 | 
			
		||||
                        " service (IDs %s and %s); assigning arbitrarily to %s",
 | 
			
		||||
                        user_id,
 | 
			
		||||
                        owned[user_id],
 | 
			
		||||
                        appservice.id,
 | 
			
		||||
                        owned[user_id],
 | 
			
		||||
                    )
 | 
			
		||||
                owned.setdefault(appservice.id, []).append(user_id)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -37,6 +37,8 @@ DISTRIBUTION_NAME = "matrix-synapse"
 | 
			
		||||
 | 
			
		||||
__all__ = ["check_requirements"]
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DependencyException(Exception):
 | 
			
		||||
    @property
 | 
			
		||||
@ -211,6 +213,6 @@ def check_requirements(extra: Optional[str] = None) -> None:
 | 
			
		||||
 | 
			
		||||
    if deps_unfulfilled:
 | 
			
		||||
        for err in errors:
 | 
			
		||||
            logging.error(err)
 | 
			
		||||
            logger.error(err)
 | 
			
		||||
 | 
			
		||||
        raise DependencyException(deps_unfulfilled)
 | 
			
		||||
 | 
			
		||||
@ -133,7 +133,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
 | 
			
		||||
 | 
			
		||||
    # write a log line on SIGTERM.
 | 
			
		||||
    def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn:
 | 
			
		||||
        logger.warning("Caught signal %s. Stopping daemon." % signum)
 | 
			
		||||
        logger.warning("Caught signal %s. Stopping daemon.", signum)
 | 
			
		||||
        sys.exit(0)
 | 
			
		||||
 | 
			
		||||
    signal.signal(signal.SIGTERM, sigterm)
 | 
			
		||||
 | 
			
		||||
@ -440,7 +440,8 @@ class TaskScheduler:
 | 
			
		||||
                except Exception:
 | 
			
		||||
                    f = Failure()
 | 
			
		||||
                    logger.error(
 | 
			
		||||
                        f"scheduled task {task.id} failed",
 | 
			
		||||
                        "scheduled task %s failed",
 | 
			
		||||
                        task.id,
 | 
			
		||||
                        exc_info=(f.type, f.value, f.getTracebackObject()),
 | 
			
		||||
                    )
 | 
			
		||||
                    status = TaskStatus.FAILED
 | 
			
		||||
@ -473,8 +474,10 @@ class TaskScheduler:
 | 
			
		||||
            self._clock.time_msec()
 | 
			
		||||
            > task.timestamp + TaskScheduler.LAST_UPDATE_BEFORE_WARNING_MS
 | 
			
		||||
        ):
 | 
			
		||||
            logger.warn(
 | 
			
		||||
                f"Task {task.id} (action {task.action}) has seen no update for more than 24h and may be stuck"
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Task %s (action %s) has seen no update for more than 24h and may be stuck",
 | 
			
		||||
                task.id,
 | 
			
		||||
                task.action,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        if task.id in self._running_tasks:
 | 
			
		||||
 | 
			
		||||
@ -45,6 +45,8 @@ from synapse.util import Clock
 | 
			
		||||
from tests import unittest
 | 
			
		||||
from tests.unittest import override_config
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class FederationServerTests(unittest.FederatingHomeserverTestCase):
 | 
			
		||||
    servlets = [
 | 
			
		||||
@ -252,7 +254,7 @@ class MessageAcceptTests(unittest.FederatingHomeserverTestCase):
 | 
			
		||||
class ServerACLsTestCase(unittest.TestCase):
 | 
			
		||||
    def test_blocked_server(self) -> None:
 | 
			
		||||
        e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
 | 
			
		||||
        logging.info("ACL event: %s", e.content)
 | 
			
		||||
        logger.info("ACL event: %s", e.content)
 | 
			
		||||
 | 
			
		||||
        server_acl_evalutor = server_acl_evaluator_from_event(e)
 | 
			
		||||
 | 
			
		||||
@ -266,7 +268,7 @@ class ServerACLsTestCase(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_block_ip_literals(self) -> None:
 | 
			
		||||
        e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]})
 | 
			
		||||
        logging.info("ACL event: %s", e.content)
 | 
			
		||||
        logger.info("ACL event: %s", e.content)
 | 
			
		||||
 | 
			
		||||
        server_acl_evalutor = server_acl_evaluator_from_event(e)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -65,8 +65,10 @@ class WorkerLockTestCase(unittest.HomeserverTestCase):
 | 
			
		||||
            timeout_seconds = 15  # Increased timeout for RISC-V
 | 
			
		||||
            # add a print or log statement here for visibility in CI logs
 | 
			
		||||
            logger.info(  # use logger.info
 | 
			
		||||
                f"Detected RISC-V architecture ({current_machine}). "
 | 
			
		||||
                f"Adjusting test_lock_contention: timeout={timeout_seconds}s"
 | 
			
		||||
                "Detected RISC-V architecture (%s). "
 | 
			
		||||
                "Adjusting test_lock_contention: timeout=%ss",
 | 
			
		||||
                current_machine,
 | 
			
		||||
                timeout_seconds,
 | 
			
		||||
            )
 | 
			
		||||
        else:
 | 
			
		||||
            # Settings for other architectures
 | 
			
		||||
 | 
			
		||||
@ -1822,7 +1822,7 @@ def _get_test_protocol_factory() -> IProtocolFactory:
 | 
			
		||||
 | 
			
		||||
def _log_request(request: str) -> None:
 | 
			
		||||
    """Implements Factory.log, which is expected by Request.finish"""
 | 
			
		||||
    logger.info(f"Completed request {request}")
 | 
			
		||||
    logger.info("Completed request %s", request)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@implementer(IPolicyForHTTPS)
 | 
			
		||||
 | 
			
		||||
@ -893,4 +893,4 @@ def _get_test_protocol_factory() -> IProtocolFactory:
 | 
			
		||||
 | 
			
		||||
def _log_request(request: str) -> None:
 | 
			
		||||
    """Implements Factory.log, which is expected by Request.finish"""
 | 
			
		||||
    logger.info(f"Completed request {request}")
 | 
			
		||||
    logger.info("Completed request %s", request)
 | 
			
		||||
 | 
			
		||||
@ -86,11 +86,11 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
 | 
			
		||||
 | 
			
		||||
        # Send some debug messages
 | 
			
		||||
        for i in range(3):
 | 
			
		||||
            logger.debug("debug %s" % (i,))
 | 
			
		||||
            logger.debug("debug %s", i)
 | 
			
		||||
 | 
			
		||||
        # Send a bunch of useful messages
 | 
			
		||||
        for i in range(7):
 | 
			
		||||
            logger.info("info %s" % (i,))
 | 
			
		||||
            logger.info("info %s", i)
 | 
			
		||||
 | 
			
		||||
        # The last debug message pushes it past the maximum buffer
 | 
			
		||||
        logger.debug("too much debug")
 | 
			
		||||
@ -116,15 +116,15 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
 | 
			
		||||
 | 
			
		||||
        # Send some debug messages
 | 
			
		||||
        for i in range(3):
 | 
			
		||||
            logger.debug("debug %s" % (i,))
 | 
			
		||||
            logger.debug("debug %s", i)
 | 
			
		||||
 | 
			
		||||
        # Send a bunch of useful messages
 | 
			
		||||
        for i in range(10):
 | 
			
		||||
            logger.warning("warn %s" % (i,))
 | 
			
		||||
            logger.warning("warn %s", i)
 | 
			
		||||
 | 
			
		||||
        # Send a bunch of info messages
 | 
			
		||||
        for i in range(3):
 | 
			
		||||
            logger.info("info %s" % (i,))
 | 
			
		||||
            logger.info("info %s", i)
 | 
			
		||||
 | 
			
		||||
        # The last debug message pushes it past the maximum buffer
 | 
			
		||||
        logger.debug("too much debug")
 | 
			
		||||
@ -152,7 +152,7 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
 | 
			
		||||
 | 
			
		||||
        # Send a bunch of useful messages
 | 
			
		||||
        for i in range(20):
 | 
			
		||||
            logger.warning("warn %s" % (i,))
 | 
			
		||||
            logger.warning("warn %s", i)
 | 
			
		||||
 | 
			
		||||
        # Allow the reconnection
 | 
			
		||||
        client, server = connect_logging_client(self.reactor, 0)
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user