From 4508d24847f510b460696342afb650eb8f290a70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Zimmermann?= <101292599+ekneg54@users.noreply.github.com> Date: Mon, 13 May 2024 15:13:01 +0200 Subject: [PATCH] advanced logging configuration (#584) * implement dictconfig * change logging format and logger names * shorten logger names * remove queuehandler and own listener process --------- Co-authored-by: dtrai2 <95028228+dtrai2@users.noreply.github.com> --- .gitignore | 1 + .pylintrc | 1 + CHANGELOG.md | 9 +- logprep/connector/elasticsearch/output.py | 4 - logprep/connector/http/input.py | 3 +- logprep/connector/opensearch/output.py | 1 - logprep/factory.py | 2 +- logprep/framework/pipeline.py | 16 +- logprep/framework/pipeline_manager.py | 42 +---- logprep/metrics/exporter.py | 2 +- logprep/run_logprep.py | 39 ++--- logprep/runner.py | 12 +- .../auto_rule_corpus_tester.py | 24 +-- logprep/util/configuration.py | 143 ++++++++++++++++-- logprep/util/credentials.py | 4 +- logprep/util/defaults.py | 38 ++++- logprep/util/http.py | 35 ++--- logprep/util/logging.py | 33 ++++ logprep/util/rule_dry_runner.py | 5 +- .../exampledata/config/http_pipeline.yml | 8 + quickstart/exampledata/config/pipeline.yml | 7 +- tests/acceptance/test_file_input.py | 4 +- tests/testdata/config/config.yml | 6 +- tests/unit/framework/test_pipeline.py | 9 +- tests/unit/framework/test_pipeline_manager.py | 11 -- tests/unit/metrics/test_exporter.py | 12 +- tests/unit/test_run_logprep.py | 2 +- tests/unit/test_runner.py | 7 +- .../unit/util/test_auto_rule_corpus_tester.py | 6 + tests/unit/util/test_configuration.py | 55 ++++++- tests/unit/util/test_logging.py | 66 ++++++++ tests/unit/util/test_rule_dry_runner.py | 5 - 32 files changed, 421 insertions(+), 191 deletions(-) create mode 100644 logprep/util/logging.py create mode 100644 tests/unit/util/test_logging.py diff --git a/.gitignore b/.gitignore index d576116e7..5834c3d3f 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ dist/ error_file experiments **/_static/*.xlsx +logprep.log diff --git a/.pylintrc b/.pylintrc index 341235a17..4a73798c3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -13,6 +13,7 @@ disable=too-few-public-methods [DESIGN] min-public-methods=1 max-public-methods=40 +max-attributes=12 [CLASSES] diff --git a/CHANGELOG.md b/CHANGELOG.md index 85cf456b0..83b0e026e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,16 +6,21 @@ ### Features -* add fine-grained logger configuration for OpenSearch/ElasticSearch libraries * add gzip handling to `http_input` connector +* adds advanced logging configuration + * add configurable log format + * add configurable datetime formate in logs + * makes `hostname` available in custom log formats + * add fine grained log level configuration for every logger instance ### Improvements * rename `logprep.event_generator` module to `logprep.generator` +* shorten logger instance names ### Bugfix -* fixes exposing OpenSearch/ElasticSearch stacktraces in log when errors happen +* fixes exposing OpenSearch/ElasticSearch stacktraces in log when errors happen by making loglevel configurable for loggers `opensearch` and `elasticsearch` ## 11.2.1 diff --git a/logprep/connector/elasticsearch/output.py b/logprep/connector/elasticsearch/output.py index d46e23712..1bcaf652d 100644 --- a/logprep/connector/elasticsearch/output.py +++ b/logprep/connector/elasticsearch/output.py @@ -104,9 +104,6 @@ class Config(Output.Config): flush_timeout: Optional[int] = field(validator=validators.instance_of(int), default=60) """(Optional) Timeout after :code:`message_backlog` is flushed if :code:`message_backlog_size` is not reached.""" - loglevel: Optional[str] = field(validator=validators.instance_of(str), default="INFO") - """(Optional) Log level for the underlying library. Enables fine-grained control over the - logging, e.g. stacktraces can be activated or deactivated. Defaults to :code:`INFO`.""" __slots__ = ["_message_backlog", "_size_error_pattern"] @@ -172,7 +169,6 @@ def _search_context(self) -> search.Elasticsearch: elasticsearch.Elasticsearch the eleasticsearch context """ - logging.getLogger("elasticsearch").setLevel(self._config.loglevel) return search.Elasticsearch( self._config.hosts, scheme=self.schema, diff --git a/logprep/connector/http/input.py b/logprep/connector/http/input.py index 9eaf24a3e..9e9a45f2b 100644 --- a/logprep/connector/http/input.py +++ b/logprep/connector/http/input.py @@ -433,7 +433,6 @@ def setup(self): raise FatalInputError( self, "Necessary instance attribute `pipeline_index` could not be found." ) - self._logger.debug( f"HttpInput Connector started on target {self.target} and " f"queue {id(self.messages)} " @@ -462,7 +461,7 @@ def setup(self): app = self._get_asgi_app(endpoints_config) self.http_server = http.ThreadingHTTPServer( - self._config.uvicorn_config, app, daemon=False, logger_name="Logprep HTTPServer" + self._config.uvicorn_config, app, daemon=False, logger_name="HTTPServer" ) self.http_server.start() diff --git a/logprep/connector/opensearch/output.py b/logprep/connector/opensearch/output.py index 91e52e784..76f26c67a 100644 --- a/logprep/connector/opensearch/output.py +++ b/logprep/connector/opensearch/output.py @@ -100,7 +100,6 @@ class Config(ElasticsearchOutput.Config): @cached_property def _search_context(self): - logging.getLogger("opensearch").setLevel(self._config.loglevel) return search.OpenSearch( self._config.hosts, scheme=self.schema, diff --git a/logprep/factory.py b/logprep/factory.py index 270233882..84d388b99 100644 --- a/logprep/factory.py +++ b/logprep/factory.py @@ -19,7 +19,7 @@ class Factory: """Create components for logprep.""" - _logger: "Logger" = logging.getLogger(__name__) + _logger: "Logger" = logging.getLogger("Factory") @classmethod def create(cls, configuration: dict, logger: "Logger") -> "Component": diff --git a/logprep/framework/pipeline.py b/logprep/framework/pipeline.py index 42b84f063..2c001c584 100644 --- a/logprep/framework/pipeline.py +++ b/logprep/framework/pipeline.py @@ -91,9 +91,6 @@ class Metrics(Component.Metrics): _logprep_config: Configuration """ the logprep configuration dict """ - _log_queue: multiprocessing.Queue - """ the handler for the logs """ - _continue_iterating: Value """ a flag to signal if iterating continues """ @@ -160,15 +157,10 @@ def _input(self) -> Input: return Factory.create(input_connector_config, self.logger) def __init__( - self, - config: Configuration, - pipeline_index: int = None, - log_queue: multiprocessing.Queue = None, - lock: Lock = None, + self, config: Configuration, pipeline_index: int = None, lock: Lock = None ) -> None: - self._log_queue = log_queue - self.logger = logging.getLogger(f"Logprep Pipeline {pipeline_index}") - self.logger.addHandler(logging.handlers.QueueHandler(log_queue)) + self.logger = logging.getLogger("Pipeline") + self.logger.name = f"Pipeline{pipeline_index}" self._logprep_config = config self._timeout = config.timeout self._continue_iterating = Value(c_bool) @@ -207,7 +199,7 @@ def _create_processor(self, entry: dict) -> "Processor": self.logger.debug(f"Created '{processor}' processor") return processor - def run(self) -> None: + def run(self) -> None: # pylint: disable=method-hidden """Start processing processors in the Pipeline.""" with self._continue_iterating.get_lock(): self._continue_iterating.value = True diff --git a/logprep/framework/pipeline_manager.py b/logprep/framework/pipeline_manager.py index bbce5fb60..c672c4630 100644 --- a/logprep/framework/pipeline_manager.py +++ b/logprep/framework/pipeline_manager.py @@ -17,16 +17,6 @@ from logprep.util.configuration import Configuration -def logger_process(queue: multiprocessing.queues.Queue, logger: logging.Logger): - """Process log messages from a queue.""" - - while True: - message = queue.get() - if message is None: - break - logger.handle(message) - - class PipelineManager: """Manage pipelines via multi-processing.""" @@ -60,9 +50,8 @@ class Metrics(Component.Metrics): def __init__(self, configuration: Configuration): self.metrics = self.Metrics(labels={"component": "manager"}) - self._logger = logging.getLogger("Logprep PipelineManager") + self._logger = logging.getLogger("Manager") if multiprocessing.current_process().name == "MainProcess": - self._start_multiprocess_logger() self._set_http_input_queue(configuration) self._pipelines: list[multiprocessing.Process] = [] self._configuration = configuration @@ -86,25 +75,6 @@ def _set_http_input_queue(self, configuration): message_backlog_size = input_config.get("message_backlog_size", 15000) HttpConnector.messages = multiprocessing.Queue(maxsize=message_backlog_size) - def _start_multiprocess_logger(self): - self.log_queue = multiprocessing.Queue(-1) - self._log_process = multiprocessing.Process( - target=logger_process, args=(self.log_queue, self._logger), daemon=True - ) - self._log_process.start() - - def get_count(self) -> int: - """Get the pipeline count. - - Parameters - ---------- - count : int - The pipeline count will be incrementally changed until it reaches this value. - - """ - self._logger.debug(f"Getting pipeline count: {len(self._pipelines)}") - return len(self._pipelines) - def set_count(self, count: int): """Set the pipeline count. @@ -161,9 +131,6 @@ def stop(self): self._decrease_to_count(0) if self.prometheus_exporter: self.prometheus_exporter.cleanup_prometheus_multiprocess_dir() - self.log_queue.put(None) # signal the logger process to stop - self._log_process.join() - self.log_queue.close() def restart(self): """Restarts all pipelines""" @@ -175,12 +142,7 @@ def restart(self): self.prometheus_exporter.run() def _create_pipeline(self, index) -> multiprocessing.Process: - pipeline = Pipeline( - pipeline_index=index, - config=self._configuration, - log_queue=self.log_queue, - lock=self._lock, - ) + pipeline = Pipeline(pipeline_index=index, config=self._configuration, lock=self._lock) self._logger.info("Created new pipeline") process = multiprocessing.Process(target=pipeline.run, daemon=True) process.stop = pipeline.stop diff --git a/logprep/metrics/exporter.py b/logprep/metrics/exporter.py index d85b6d3f0..69fa8a08f 100644 --- a/logprep/metrics/exporter.py +++ b/logprep/metrics/exporter.py @@ -15,7 +15,7 @@ class PrometheusExporter: def __init__(self, configuration: MetricsConfig): self.is_running = False - logger_name = "Prometheus Exporter" + logger_name = "Exporter" self._logger = getLogger(logger_name) self._logger.debug("Initializing Prometheus Exporter") self.configuration = configuration diff --git a/logprep/run_logprep.py b/logprep/run_logprep.py index d67afdbe2..a1ef92c31 100644 --- a/logprep/run_logprep.py +++ b/logprep/run_logprep.py @@ -1,6 +1,7 @@ # pylint: disable=logging-fstring-interpolation """This module can be used to start the logprep.""" import logging +import logging.config import os import signal import sys @@ -12,21 +13,17 @@ from logprep.generator.http.controller import Controller from logprep.generator.kafka.run_load_tester import LoadTester from logprep.runner import Runner -from logprep.util import defaults from logprep.util.auto_rule_tester.auto_rule_corpus_tester import RuleCorpusTester from logprep.util.auto_rule_tester.auto_rule_tester import AutoRuleTester from logprep.util.configuration import Configuration, InvalidConfigurationError +from logprep.util.defaults import DEFAULT_LOG_CONFIG from logprep.util.helper import get_versions_string, print_fcolor from logprep.util.rule_dry_runner import DryRunner warnings.simplefilter("always", DeprecationWarning) logging.captureWarnings(True) - -logging.getLogger("filelock").setLevel(logging.ERROR) -logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR) -logging.getLogger("elasticsearch").setLevel(logging.ERROR) - - +logging.config.dictConfig(DEFAULT_LOG_CONFIG) +logger = logging.getLogger("logprep") EPILOG_STR = "Check out our docs at https://logprep.readthedocs.io/en/latest/" @@ -35,17 +32,13 @@ def _print_version(config: "Configuration") -> None: sys.exit(0) -def _get_logger(logger_config: dict) -> logging.Logger: - log_level = logger_config.get("level", "INFO") - logging.basicConfig(level=log_level, format=defaults.DEFAULT_LOG_FORMAT) - logger = logging.getLogger("Logprep") - logger.setLevel(log_level) - return logger - - def _get_configuration(config_paths: list[str]) -> Configuration: try: - return Configuration.from_sources(config_paths) + config = Configuration.from_sources(config_paths) + config.logger.setup_logging() + logger = logging.getLogger("root") # pylint: disable=redefined-outer-name + logger.info(f"Log level set to '{logging.getLevelName(logger.level)}'") + return config except InvalidConfigurationError as error: print(f"InvalidConfigurationError: {error}", file=sys.stderr) sys.exit(1) @@ -80,8 +73,6 @@ def run(configs: tuple[str], version=None) -> None: configuration = _get_configuration(configs) if version: _print_version(configuration) - logger = _get_logger(configuration.logger) - logger.info(f"Log level set to '{logging.getLevelName(logger.level)}'") for version in get_versions_string(configuration).split("\n"): logger.info(version) logger.debug(f"Metric export enabled: {configuration.metrics.enabled}") @@ -150,7 +141,7 @@ def dry_run(configs: tuple[str], events: str, input_type: str, full_output: bool """ config = _get_configuration(configs) json_input = input_type == "json" - dry_runner = DryRunner(events, config, full_output, json_input, logging.getLogger("DryRunner")) + dry_runner = DryRunner(events, config, full_output, json_input) dry_runner.run() @@ -270,7 +261,7 @@ def generate_kafka(config, file): @click.option( "--loglevel", help="Sets the log level for the logger.", - type=click.Choice(logging._levelToName.values()), + type=click.Choice(logging._levelToName.values()), # pylint: disable=protected-access required=False, default="INFO", ) @@ -286,12 +277,8 @@ def generate_http(**kwargs): Generates events based on templated sample files stored inside a dataset directory. The events will be sent to a http endpoint. """ - log_level = kwargs.get("loglevel") - logging.basicConfig( - level=log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" - ) - logger = logging.getLogger("generator") - logger.info(f"Log level set to '{log_level}'") + generator_logger = logging.getLogger("Generator") + generator_logger.info(f"Log level set to '{logging.getLevelName(generator_logger.level)}'") generator = Controller(**kwargs) generator.run() diff --git a/logprep/runner.py b/logprep/runner.py index d28366cb1..aa09dc42c 100644 --- a/logprep/runner.py +++ b/logprep/runner.py @@ -130,7 +130,7 @@ def get_runner(configuration: Configuration) -> "Runner": def __init__(self, configuration: Configuration) -> None: self._configuration = configuration self.metrics = self.Metrics(labels={"logprep": "unset", "config": "unset"}) - self._logger = logging.getLogger("Logprep Runner") + self._logger = logging.getLogger("Runner") self._manager = PipelineManager(configuration) self.scheduler = Scheduler() @@ -147,15 +147,17 @@ def start(self): self._manager.restart() self._logger.info("Startup complete") self._logger.debug("Runner iterating") + self._iterate() + self._logger.info("Shutting down") + self._manager.stop() + self._logger.info("Shutdown complete") + + def _iterate(self): for _ in self._keep_iterating(): if self._exit_received: break self.scheduler.run_pending() self._manager.restart_failed_pipeline() - self._logger.info("Shutting down") - self._logger.info("Initiated shutdown") - self._manager.stop() - self._logger.info("Shutdown complete") def reload_configuration(self): """Reloads the configuration""" diff --git a/logprep/util/auto_rule_tester/auto_rule_corpus_tester.py b/logprep/util/auto_rule_tester/auto_rule_corpus_tester.py index fdc0b02dd..b7d8be6a2 100644 --- a/logprep/util/auto_rule_tester/auto_rule_corpus_tester.py +++ b/logprep/util/auto_rule_tester/auto_rule_corpus_tester.py @@ -91,7 +91,7 @@ import tempfile from functools import cached_property from json import JSONDecodeError -from logging import getLogger +from logging.config import dictConfig from pathlib import Path from pprint import pprint from typing import List @@ -152,7 +152,9 @@ class TestCase: def __init__(self, config_paths: tuple[str], input_test_data_path: str): self._original_config_paths = config_paths self._input_test_data_path = input_test_data_path - self.log_capture_string = None + self.log_capture_string = io.StringIO() + self.logger = logging.getLogger("corpustester") + self.logger.handlers[0].setStream(self.log_capture_string) @cached_property def _tmp_dir(self): @@ -173,17 +175,6 @@ def _test_cases(self): raise ValueError(f"The following TestCases have no input documents: {no_input_files}") return dict(sorted(test_cases.items())) - @cached_property - def _logprep_logger(self): - logprep_logger = getLogger("logprep-rule-corpus-tester") - logprep_logger.propagate = False - logprep_logger.setLevel(logging.WARNING) - self.log_capture_string = io.StringIO() - self.stream_handler = logging.StreamHandler(self.log_capture_string) - self.stream_handler.setLevel(logging.WARNING) - logprep_logger.addHandler(self.stream_handler) - return logprep_logger - @cached_property def _pipeline(self): merged_input_file_path = Path(self._tmp_dir) / "input.json" @@ -202,7 +193,7 @@ def _pipeline(self): } patched_config.pipeline = config.pipeline pipeline = Pipeline(config=patched_config) - pipeline.logger = self._logprep_logger + pipeline.logger = self.logger return pipeline def run(self): @@ -241,10 +232,7 @@ def _retrieve_log_capture(self): log_capture = self.log_capture_string.getvalue() # set new log_capture to clear previous entries self.log_capture_string = io.StringIO() - self.stream_handler = logging.StreamHandler(self.log_capture_string) - self.stream_handler.setLevel(logging.WARNING) - self._logprep_logger.handlers.clear() - self._logprep_logger.addHandler(self.stream_handler) + self.logger.handlers[0].setStream(self.log_capture_string) return log_capture def _compare_logprep_outputs(self, test_case_id, logprep_output): diff --git a/logprep/util/configuration.py b/logprep/util/configuration.py index 3ccb8fa4f..de407438c 100644 --- a/logprep/util/configuration.py +++ b/logprep/util/configuration.py @@ -200,10 +200,12 @@ """ import json +import logging import os from copy import deepcopy from itertools import chain from logging import getLogger +from logging.config import dictConfig from pathlib import Path from typing import Any, Iterable, List, Optional @@ -222,6 +224,7 @@ from logprep.util.credentials import CredentialsEnvNotFoundError, CredentialsFactory from logprep.util.defaults import ( DEFAULT_CONFIG_LOCATION, + DEFAULT_LOG_CONFIG, ENV_NAME_LOGPREP_CREDENTIALS_FILE, ) from logprep.util.getter import GetterFactory, GetterNotFoundError @@ -319,6 +322,127 @@ class MetricsConfig: ) +@define(kw_only=True) +class LoggerConfig: + """The logger config class used in Configuration. + The schema for this class is derived from the python logging module: + https://docs.python.org/3/library/logging.config.html#dictionary-schema-details + """ + + _LOG_LEVELS = ( + logging.NOTSET, # 0 + logging.DEBUG, # 10 + logging.INFO, # 20 + logging.WARNING, # 30 + logging.ERROR, # 40 + logging.CRITICAL, # 50 + ) + + version: int = field(validator=validators.instance_of(int), default=1) + formatters: dict = field(validator=validators.instance_of(dict), factory=dict) + filters: dict = field(validator=validators.instance_of(dict), factory=dict) + handlers: dict = field(validator=validators.instance_of(dict), factory=dict) + disable_existing_loggers: bool = field(validator=validators.instance_of(bool), default=False) + level: str = field( + default="INFO", + validator=[ + validators.instance_of(str), + validators.in_([logging.getLevelName(level) for level in _LOG_LEVELS]), + ], + eq=False, + ) + """The log level of the root logger. Defaults to :code:`INFO`. + + .. security-best-practice:: + :title: Logprep Log-Level + :location: config.logger.level + :suggested-value: INFO + + The log level of the root logger should be set to :code:`INFO` or higher in production environments + to avoid exposing sensitive information in the logs. + """ + format: str = field(default="", validator=[validators.instance_of(str)], eq=False) + """The format of the log message as supported by the :code:`LogprepFormatter`. + Defaults to :code:`"%(asctime)-15s %(name)-10s %(levelname)-8s: %(message)s"`. + + .. autoclass:: logprep.util.logging.LogprepFormatter + :no-index: + + """ + datefmt: str = field(default="", validator=[validators.instance_of(str)], eq=False) + """The date format of the log message. Defaults to :code:`"%Y-%m-%d %H:%M:%S"`.""" + loggers: dict = field(validator=validators.instance_of(dict), factory=dict) + """The loggers loglevel configuration. Defaults to: + + .. csv-table:: + + "root", "INFO" + "filelock", "ERROR" + "urllib3.connectionpool", "ERROR" + "elasticsearch", "ERROR" + "opensearch", "ERROR" + "uvicorn", "INFO" + "uvicorn.access", "INFO" + "uvicorn.error", "INFO" + + You can alter the log level of the loggers by adding them to the loggers mapping like in the + example. Logprep opts out of hierarchical loggers and so it is possible to set the log level in + general for all loggers in the :code:`root` logger to :code:`INFO` and then set the log level + for specific loggers like :code:`Runner` to :code:`DEBUG` to get only DEBUG Messages from the + Runner instance. + + If you want to silence other loggers like :code:`py.warnings` you can set the log level to + :code:`ERROR` here. + + .. code-block:: yaml + :caption: Example of a custom logger configuration + + logger: + level: ERROR + format: "%(asctime)-15s %(hostname)-5s %(name)-10s %(levelname)-8s: %(message)s" + datefmt: "%Y-%m-%d %H:%M:%S" + loggers: + "py.warnings": {"level": "ERROR"} + "Runner": {"level": "DEBUG"} + + """ + + def __attrs_post_init__(self) -> None: + """Create a LoggerConfig from a logprep logger configuration.""" + self._set_defaults() + if not self.level: + self.level = DEFAULT_LOG_CONFIG.get("loggers", {}).get("root", {}).get("level", "INFO") + if self.loggers: + self._set_loggers_levels() + self.loggers = {**DEFAULT_LOG_CONFIG["loggers"] | self.loggers} + self.loggers.get("root", {}).update({"level": self.level}) + + def setup_logging(self) -> None: + """Setup the logging configuration. + is called in the :code:`logprep.run_logprep` module. + We have to write the configuration to the environment variable :code:`LOGPREP_LOG_CONFIG` to + make it available for the uvicorn server in :code:'logprep.util.http'. + """ + log_config = asdict(self) + os.environ["LOGPREP_LOG_CONFIG"] = json.dumps(log_config) + dictConfig(log_config) + + def _set_loggers_levels(self): + """sets the loggers levels to the default or to the given level.""" + for logger_name, logger_config in self.loggers.items(): + default_logger_config = deepcopy(DEFAULT_LOG_CONFIG.get(logger_name, {})) + if "level" in logger_config: + default_logger_config.update({"level": logger_config["level"]}) + self.loggers[logger_name].update(default_logger_config) + + def _set_defaults(self): + """resets all keys to the defined defaults except :code:`loggers`.""" + for key, value in DEFAULT_LOG_CONFIG.items(): + if key == "loggers": + continue + setattr(self, key, value) + + @define(kw_only=True) class Configuration: """the configuration class""" @@ -374,18 +498,19 @@ class Configuration: processing power. This can be useful for testing and debugging. Larger values (like 5.0) slow the reaction time down, but this requires less processing power, which makes in preferable for continuous operation. Defaults to :code:`5.0`.""" - logger: dict = field( - validator=validators.instance_of(dict), default={"level": "INFO"}, eq=False + logger: LoggerConfig = field( + validator=validators.instance_of(LoggerConfig), + default=LoggerConfig(**DEFAULT_LOG_CONFIG), + eq=False, + converter=lambda x: LoggerConfig(**x) if isinstance(x, dict) else x, ) - """Logger configuration. Defaults to :code:`{"level": "INFO"}`. + """Logger configuration. - .. security-best-practice:: - :title: Logprep Log-Level - :location: config.logger.level - :suggested-value: INFO + .. autoclass:: logprep.util.configuration.LoggerConfig + :no-index: + :no-undoc-members: + :members: level, format, datefmt, loggers - The loglevel of logprep should be set to :code:`"INFO"` in production environments, as the - :code:`"DEBUG"` level could expose sensitive events into the log. """ input: dict = field(validator=validators.instance_of(dict), factory=dict, eq=False) """ diff --git a/logprep/util/credentials.py b/logprep/util/credentials.py index 099361dcd..54ce955b7 100644 --- a/logprep/util/credentials.py +++ b/logprep/util/credentials.py @@ -129,7 +129,7 @@ class CredentialsEnvNotFoundError(Exception): class CredentialsFactory: """Factory class to create credentials for a given target URL.""" - _logger = logging.getLogger(__name__) + _logger = logging.getLogger("Credentials") @classmethod def from_target(cls, target_url: str) -> "Credentials": @@ -402,7 +402,7 @@ def is_expired(self) -> bool: class Credentials: """Abstract Base Class for Credentials""" - _logger = logging.getLogger(__name__) + _logger = logging.getLogger("Credentials") _session: Session = field(validator=validators.instance_of((Session, type(None))), default=None) diff --git a/logprep/util/defaults.py b/logprep/util/defaults.py index 6dd2558a5..ab260bf26 100644 --- a/logprep/util/defaults.py +++ b/logprep/util/defaults.py @@ -1,5 +1,41 @@ """Default values for logprep.""" DEFAULT_CONFIG_LOCATION = "file:///etc/logprep/pipeline.yml" -DEFAULT_LOG_FORMAT = "%(asctime)-15s %(name)-5s %(levelname)-8s: %(message)s" +DEFAULT_LOG_FORMAT = "%(asctime)-15s %(process)-6s %(name)-10s %(levelname)-8s: %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +# dictconfig as described in +# https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema +DEFAULT_LOG_CONFIG = { + "version": 1, + "formatters": { + "logprep": { + "class": "logprep.util.logging.LogprepFormatter", + "format": DEFAULT_LOG_FORMAT, + "datefmt": DEFAULT_LOG_DATE_FORMAT, + } + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": "logprep", + "stream": "ext://sys.stdout", + }, + "string": {"class": "logging.StreamHandler", "level": "WARNING"}, + }, + "loggers": { + "root": {"level": "INFO", "handlers": ["console"]}, + "filelock": {"level": "ERROR"}, + "urllib3.connectionpool": {"level": "ERROR"}, + "elasticsearch": {"level": "ERROR"}, + "opensearch": {"level": "ERROR"}, + "corpustester": { + "level": "WARNING", + "handlers": ["string"], + "propagate": "1", + }, + }, + "filters": {}, + "disable_existing_loggers": False, +} ENV_NAME_LOGPREP_CREDENTIALS_FILE = "LOGPREP_CREDENTIALS_FILE" diff --git a/logprep/util/http.py b/logprep/util/http.py index 28b059fd9..8c75450b8 100644 --- a/logprep/util/http.py +++ b/logprep/util/http.py @@ -1,12 +1,14 @@ """logprep http utils""" import inspect +import json import logging +import os import threading import uvicorn -from logprep.util import defaults +from logprep.util.defaults import DEFAULT_LOG_CONFIG uvicorn_parameter_keys = inspect.signature(uvicorn.Config).parameters.keys() UVICORN_CONFIG_KEYS = [ @@ -24,15 +26,6 @@ class ThreadingHTTPServer: # pylint: disable=too-many-instance-attributes _instance = None _lock = threading.Lock() - @property - def _log_config(self) -> dict: - """Use for Uvicorn same log formatter like for Logprep""" - log_config = uvicorn.config.LOGGING_CONFIG - log_config["formatters"]["default"]["fmt"] = defaults.DEFAULT_LOG_FORMAT - log_config["formatters"]["access"]["fmt"] = defaults.DEFAULT_LOG_FORMAT - log_config["handlers"]["default"]["stream"] = "ext://sys.stdout" - return log_config - def __new__(cls, *args, **kwargs): with cls._lock: if not cls._instance: @@ -72,9 +65,13 @@ def __init__( uvicorn_config = {**internal_uvicorn_config, **uvicorn_config} self._logger_name = logger_name self._logger = logging.getLogger(self._logger_name) - uvicorn_config = uvicorn.Config(**uvicorn_config, app=app, log_config=self._log_config) + logprep_log_config = json.loads( + os.environ.get("LOGPREP_LOG_CONFIG", json.dumps(DEFAULT_LOG_CONFIG)) + ) + uvicorn_config = uvicorn.Config(**uvicorn_config, app=app, log_config=logprep_log_config) + logging.getLogger("uvicorn.access").name = self._logger_name + logging.getLogger("uvicorn.error").name = self._logger_name self.server = uvicorn.Server(uvicorn_config) - self._override_runtime_logging() self.thread = threading.Thread(daemon=daemon, target=self.server.run) def start(self): @@ -96,17 +93,3 @@ def shut_down(self): self._logger.debug("Wait for server to exit gracefully...") continue self.thread.join() - - def _override_runtime_logging(self): - """Uvicorn doesn't provide API to change name and handler beforehand - needs to be done during runtime""" - for logger_name in ["uvicorn", "uvicorn.access"]: - registered_handlers = logging.getLogger(logger_name).handlers - if not registered_handlers: - continue - logging.getLogger(logger_name).removeHandler(registered_handlers[0]) - logging.getLogger(logger_name).addHandler( - logging.getLogger("Logprep").parent.handlers[0] - ) - logging.getLogger("uvicorn.access").name = self._logger_name - logging.getLogger("uvicorn.error").name = self._logger_name diff --git a/logprep/util/logging.py b/logprep/util/logging.py new file mode 100644 index 000000000..2559c6109 --- /dev/null +++ b/logprep/util/logging.py @@ -0,0 +1,33 @@ +"""helper classes for logprep logging""" + +import logging +import logging.handlers +from socket import gethostname + + +class LogprepFormatter(logging.Formatter): + """ + A custom formatter for logprep logging with additional attributes. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. The available attributes + are listed in the + `python documentation `_ . + Additionally, the formatter provides the following logprep specific attributes: + + .. table:: + + +-----------------------+--------------------------------------------------+ + | attribute | description | + +=======================+==================================================+ + | %(hostname) | (Logprep specific) The hostname of the machine | + | | where the log was emitted | + +-----------------------+--------------------------------------------------+ + + """ + + def format(self, record): + record.hostname = gethostname() + return super().format(record) diff --git a/logprep/util/rule_dry_runner.py b/logprep/util/rule_dry_runner.py index 0a1375960..bec18f3a9 100644 --- a/logprep/util/rule_dry_runner.py +++ b/logprep/util/rule_dry_runner.py @@ -40,6 +40,7 @@ """ import json +import logging import shutil import tempfile from copy import deepcopy @@ -95,13 +96,13 @@ def _input_documents(self): return document_getter.get_jsonl() def __init__( - self, input_file_path: str, config: Configuration, full_output: bool, use_json: bool, logger + self, input_file_path: str, config: Configuration, full_output: bool, use_json: bool ): self._input_file_path = input_file_path self._config = config self._full_output = full_output self._use_json = use_json - self._logger = logger + self._logger = logging.getLogger("DryRunner") def run(self): """Run the dry runner.""" diff --git a/quickstart/exampledata/config/http_pipeline.yml b/quickstart/exampledata/config/http_pipeline.yml index dfc41fd10..68236582d 100644 --- a/quickstart/exampledata/config/http_pipeline.yml +++ b/quickstart/exampledata/config/http_pipeline.yml @@ -1,8 +1,16 @@ version: 1 process_count: 2 +config_refresh_interval: 10 logger: level: INFO + loggers: + uvicorn: + level: INFO + uvicorn.access: + level: INFO + uvicorn.error: + level: INFO metrics: enabled: true diff --git a/quickstart/exampledata/config/pipeline.yml b/quickstart/exampledata/config/pipeline.yml index 93e8be56f..f28b8c6f5 100644 --- a/quickstart/exampledata/config/pipeline.yml +++ b/quickstart/exampledata/config/pipeline.yml @@ -3,7 +3,11 @@ process_count: 2 timeout: 0.1 logger: level: INFO - + format: "%(asctime)-15s %(hostname)-5s %(name)-10s %(levelname)-8s: %(message)s" + datefmt: "%Y-%m-%d %H:%M:%S" + loggers: + "py.warnings": {"level": "ERROR"} + "Runner": {"level": "DEBUG"} metrics: enabled: true port: 8001 @@ -118,7 +122,6 @@ output: parallel_bulk: false user: admin secret: admin - loglevel: ERROR kafka: type: confluentkafka_output default: false diff --git a/tests/acceptance/test_file_input.py b/tests/acceptance/test_file_input.py index 3e3ab59a9..b1128239e 100644 --- a/tests/acceptance/test_file_input.py +++ b/tests/acceptance/test_file_input.py @@ -83,7 +83,7 @@ def test_file_input_accepts_message_for_single_pipeline(tmp_path, config: Config config_path.write_text(config.as_yaml()) write_file(str(input_path), test_initial_log_data) proc = start_logprep(config_path) - wait_for_output(proc, "Logprep INFO : Log level set to 'INFO'") + wait_for_output(proc, "Runner INFO : Startup complete") wait_for_interval(4 * CHECK_INTERVAL) assert test_initial_log_data[0] in output_path.read_text() @@ -98,6 +98,6 @@ def test_file_input_accepts_message_for_two_pipelines(tmp_path, config: Configur config_path.write_text(config.as_yaml()) write_file(str(input_path), test_initial_log_data) proc = start_logprep(config_path) - wait_for_output(proc, "Logprep INFO : Log level set to 'INFO'") + wait_for_output(proc, "Runner INFO : Startup complete") wait_for_interval(4 * CHECK_INTERVAL) assert test_initial_log_data[0] in output_path.read_text() diff --git a/tests/testdata/config/config.yml b/tests/testdata/config/config.yml index 556f724d7..846c20aaf 100644 --- a/tests/testdata/config/config.yml +++ b/tests/testdata/config/config.yml @@ -44,9 +44,9 @@ input: kafka_config: bootstrap.servers: "127.0.0.1:9092" group.id: "cgroup" - auto.commit: "true" - session.timeout: "6000" - offset.reset.policy: "smallest" + enable.auto.commit: "true" + session.timeout.ms: "6000" + auto.offset.reset: "smallest" enable.auto.offset.store: "true" output: kafka_output: diff --git a/tests/unit/framework/test_pipeline.py b/tests/unit/framework/test_pipeline.py index 1f6351c45..19cda2ce6 100644 --- a/tests/unit/framework/test_pipeline.py +++ b/tests/unit/framework/test_pipeline.py @@ -1,6 +1,8 @@ # pylint: disable=missing-docstring # pylint: disable=protected-access # pylint: disable=attribute-defined-outside-init +import logging +import multiprocessing from copy import deepcopy from logging import DEBUG, getLogger from multiprocessing import Lock @@ -56,7 +58,6 @@ def setup_method(self): self.pipeline = Pipeline( pipeline_index=1, config=self.logprep_config, - log_queue=mock.MagicMock(), lock=self.lock, ) @@ -112,7 +113,8 @@ def test_empty_documents_are_not_forwarded_to_other_processors(self, _): ) deleter_processor._specific_tree.add_rule(deleter_rule) self.pipeline._pipeline = [mock.MagicMock(), deleter_processor, mock.MagicMock()] - self.pipeline.logger.setLevel(DEBUG) + logger = logging.getLogger("Pipeline") + logger.setLevel(DEBUG) while self.pipeline._input._documents: self.pipeline.process_pipeline() assert len(self.pipeline._input._documents) == 0, "all events were processed" @@ -478,8 +480,9 @@ def test_shut_down_drains_input_queues(self, _): "endpoints": {"/json": "json", "/jsonl": "jsonl", "/plaintext": "plaintext"}, } } - self.pipeline._input = original_create(input_config, self.pipeline.logger) + self.pipeline._input = original_create(input_config, mock.MagicMock()) self.pipeline._input.pipeline_index = 1 + self.pipeline._input.messages = multiprocessing.Queue(-1) self.pipeline._input.setup() self.pipeline._input.messages.put({"message": "test message"}) assert self.pipeline._input.messages.qsize() == 1 diff --git a/tests/unit/framework/test_pipeline_manager.py b/tests/unit/framework/test_pipeline_manager.py index 141f97568..584eb5d8a 100644 --- a/tests/unit/framework/test_pipeline_manager.py +++ b/tests/unit/framework/test_pipeline_manager.py @@ -24,12 +24,6 @@ def setup_class(self): def teardown_method(self): self.manager._pipelines = [] - def test_get_count_returns_count_of_pipelines(self): - for count in range(5): - self.manager.set_count(count) - - assert self.manager.get_count() == count - def test_decrease_to_count_removes_required_number_of_pipelines(self): self.manager._increase_to_count(3) @@ -150,11 +144,6 @@ def test_prometheus_exporter_is_instanciated_if_metrics_enabled(self): manager = PipelineManager(config) assert isinstance(manager.prometheus_exporter, PrometheusExporter) - def test_stop_closes_log_queue(self): - with mock.patch.object(self.manager, "log_queue") as log_queue_mock: - self.manager.stop() - log_queue_mock.close.assert_called() - def test_set_count_increases_number_of_pipeline_starts_metric(self): self.manager.metrics.number_of_pipeline_starts = 0 self.manager.set_count(2) diff --git a/tests/unit/metrics/test_exporter.py b/tests/unit/metrics/test_exporter.py index 96cff9026..44097a4a8 100644 --- a/tests/unit/metrics/test_exporter.py +++ b/tests/unit/metrics/test_exporter.py @@ -2,14 +2,15 @@ # pylint: disable=protected-access # pylint: disable=attribute-defined-outside-init # pylint: disable=line-too-long -import logging import os.path +from logging.config import dictConfig from unittest import mock from prometheus_client import REGISTRY from logprep.metrics.exporter import PrometheusExporter from logprep.util.configuration import MetricsConfig +from logprep.util.defaults import DEFAULT_LOG_CONFIG @mock.patch( @@ -31,13 +32,10 @@ def test_default_port_if_missing_in_config(self): assert exporter._port == 8000 @mock.patch("logprep.util.http.ThreadingHTTPServer.start") - def test_run_starts_http_server(self, mock_http_server_start, caplog): - with caplog.at_level(logging.INFO): - exporter = PrometheusExporter(self.metrics_config) - exporter.run() - + def test_run_starts_http_server(self, mock_http_server_start): + exporter = PrometheusExporter(self.metrics_config) + exporter.run() mock_http_server_start.assert_called() - assert f"Prometheus Exporter started on port {exporter._port}" in caplog.text def test_cleanup_prometheus_multiprocess_dir_deletes_temp_dir_contents_but_not_the_dir_itself( self, tmp_path diff --git a/tests/unit/test_run_logprep.py b/tests/unit/test_run_logprep.py index 94c73d5ef..dbfffad5a 100644 --- a/tests/unit/test_run_logprep.py +++ b/tests/unit/test_run_logprep.py @@ -281,7 +281,7 @@ def test_test_ruleset_starts_rule_corpus_tester(self, mock_tester): @mock.patch("logging.Logger.info") def test_run_logprep_logs_log_level(self, mock_info): config = Configuration.from_sources(("tests/testdata/config/config.yml",)) - assert config.logger.get("level") == "INFO" + assert config.logger.level == "INFO" with mock.patch("logprep.run_logprep.Runner"): with pytest.raises(SystemExit): run_logprep.run(("tests/testdata/config/config.yml",)) diff --git a/tests/unit/test_runner.py b/tests/unit/test_runner.py index 1a491ef97..0de3541b5 100644 --- a/tests/unit/test_runner.py +++ b/tests/unit/test_runner.py @@ -115,7 +115,7 @@ def test_reload_invokes_manager_restart_on_config_change(self, runner: Runner): def test_set_config_refresh_interval(self, new_value, expected_value, runner): with mock.patch.object(runner, "_manager"): runner._config_refresh_interval = new_value - runner._keep_iterating = partial(mock_keep_iterating, 1) + runner._exit_received = True runner.start() if expected_value is None: assert len(runner.scheduler.jobs) == 0 @@ -262,11 +262,12 @@ def test_stop_method(self, runner: Runner): runner.stop() assert runner._exit_received - @mock.patch("logprep.runner.Runner._keep_iterating", new=partial(mock_keep_iterating, 1)) def test_start_sets_version_metric(self, runner: Runner): runner._configuration.version = "very custom version" + runner._exit_received = True with mock.patch("logprep.metrics.metrics.GaugeMetric.add_with_labels") as mock_add: - runner.start() + with mock.patch.object(runner, "_manager"): + runner.start() mock_add.assert_called() mock_add.assert_has_calls( ( diff --git a/tests/unit/util/test_auto_rule_corpus_tester.py b/tests/unit/util/test_auto_rule_corpus_tester.py index 062ac80e1..1cd4ef102 100644 --- a/tests/unit/util/test_auto_rule_corpus_tester.py +++ b/tests/unit/util/test_auto_rule_corpus_tester.py @@ -5,11 +5,13 @@ import os import re from json import JSONDecodeError +from logging.config import dictConfig from unittest import mock import pytest from logprep.util.auto_rule_tester.auto_rule_corpus_tester import RuleCorpusTester +from logprep.util.defaults import DEFAULT_LOG_CONFIG from logprep.util.getter import GetterFactory @@ -39,6 +41,10 @@ def prepare_corpus_tester(corpus_tester, tmp_path, test_data): class TestAutoRuleTester: + + def setup_method(self): + dictConfig(DEFAULT_LOG_CONFIG) + @pytest.mark.parametrize( "test_case, test_data, mock_output, expected_prints, exit_code", [ diff --git a/tests/unit/util/test_configuration.py b/tests/unit/util/test_configuration.py index 4b5d5cd75..5a1a59e57 100644 --- a/tests/unit/util/test_configuration.py +++ b/tests/unit/util/test_configuration.py @@ -17,6 +17,7 @@ Configuration, InvalidConfigurationError, InvalidConfigurationErrors, + LoggerConfig, MetricsConfig, ) from logprep.util.defaults import ENV_NAME_LOGPREP_CREDENTIALS_FILE @@ -46,7 +47,7 @@ class TestConfiguration: ("config_refresh_interval", type(None), None), ("process_count", int, 1), ("timeout", float, 5.0), - ("logger", dict, {"level": "INFO"}), + ("logger", LoggerConfig, LoggerConfig(**{"level": "INFO"})), ("pipeline", list, []), ("input", dict, {}), ("output", dict, {}), @@ -78,7 +79,6 @@ def test_create_from_sources_adds_configs(self): ("config_refresh_interval", 0, 900), ("process_count", 1, 2), ("timeout", 1.0, 2.0), - ("logger", {"level": "INFO"}, {"level": "DEBUG"}), ( "metrics", {"enabled": False, "port": 8000, "uvicorn_config": {"access_log": True}}, @@ -126,6 +126,40 @@ def test_get_last_value(self, tmp_path, attribute, first_value, second_value): else: assert attribute_from_test == second_value + def test_get_last_value_logger_config(self, tmp_path): + attribute = "logger" + first_value = {"level": "INFO"} + second_value = {"level": "DEBUG"} + first_config = tmp_path / "pipeline.yml" + first_config.write_text( + f""" +input: + dummy: + type: dummy_input + documents: [] +output: + dummy: + type: dummy_output +{attribute}: {first_value} +""" + ) + second_config = tmp_path / "pipeline2.yml" + second_config.write_text( + f""" +input: + dummy: + type: dummy_input + documents: [] +output: + dummy: + type: dummy_output +{attribute}: {second_value} +""" + ) + config = Configuration.from_sources([str(first_config), str(second_config)]) + assert config.logger.level == "DEBUG" + assert config.logger.loggers.get("root").get("level") == "DEBUG" + @pytest.mark.parametrize( "attribute, value, expected_error, expected_message", [ @@ -1241,3 +1275,20 @@ def test_invalid_configuration_error_only_append_unique_errors( error = InvalidConfigurationErrors(error_list) assert len(error.errors) == len(expected_error_list) assert error.errors == expected_error_list + + +class TestLoggerConfig: + + @pytest.mark.parametrize("kwargs", [{"level": "DEBUG"}, {"level": "INFO"}]) + def test_logger_config_sets_global_level(self, kwargs): + config = LoggerConfig(**kwargs) + assert config.loggers.get("root").get("level") == kwargs.get("level") + assert config.loggers.get("opensearch").get("level") == "ERROR" + + @pytest.mark.parametrize("kwargs", [{"loggers": {"logprep": {"level": "DEBUG"}}}]) + def test_loggers_config_only_sets_level(self, kwargs): + config = LoggerConfig(**kwargs) + assert config.loggers.get("logprep").get("level") == "DEBUG", "should be set" + assert config.loggers.get("root").get("level") == "INFO", "should be default" + assert config.loggers.get("root").get("handlers") == ["console"], "should be default" + assert config.loggers.get("opensearch").get("level") == "ERROR", "should be default" diff --git a/tests/unit/util/test_logging.py b/tests/unit/util/test_logging.py new file mode 100644 index 000000000..3da497adc --- /dev/null +++ b/tests/unit/util/test_logging.py @@ -0,0 +1,66 @@ +import logging +import logging.config +from socket import gethostname + +import pytest + +from logprep.util.defaults import DEFAULT_LOG_CONFIG +from logprep.util.logging import LogprepFormatter + + +class TestLogprepFormatter: + + def test_default_log_config(self): + logging.config.dictConfig(DEFAULT_LOG_CONFIG) + logger = logging.getLogger("test") + assert isinstance(logger.root.handlers[0].formatter, LogprepFormatter) + + def test_formatter_init_with_default(self): + default_formatter_config = DEFAULT_LOG_CONFIG["formatters"]["logprep"] + formatter = LogprepFormatter( + fmt=default_formatter_config["format"], datefmt=default_formatter_config["datefmt"] + ) + assert isinstance(formatter, LogprepFormatter) + + def test_format_returns_str(self): + default_formatter_config = DEFAULT_LOG_CONFIG["formatters"]["logprep"] + formatter = LogprepFormatter( + fmt=default_formatter_config["format"], datefmt=default_formatter_config["datefmt"] + ) + record = logging.LogRecord( + name="test", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg="test message", + args=None, + exc_info=None, + ) + formatted_record = formatter.format(record) + assert isinstance(formatted_record, str) + assert "test message" in formatted_record + assert "INFO" in formatted_record + + @pytest.mark.parametrize( + "custom_format, expected", + [ + ("%(asctime)-15s %(hostname)-5s %(name)-5s %(levelname)-8s: %(message)s", gethostname), + ], + ) + def test_format_custom_format(self, custom_format, expected): + default_formatter_config = DEFAULT_LOG_CONFIG["formatters"]["logprep"] + formatter = LogprepFormatter(fmt=custom_format, datefmt=default_formatter_config["datefmt"]) + record = logging.LogRecord( + name="test", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg="test message", + args=None, + exc_info=None, + ) + formatted_record = formatter.format(record) + assert isinstance(formatted_record, str) + assert "test message" in formatted_record + assert "INFO" in formatted_record + assert expected() in formatted_record diff --git a/tests/unit/util/test_rule_dry_runner.py b/tests/unit/util/test_rule_dry_runner.py index b3bafeeb8..53b6ea896 100644 --- a/tests/unit/util/test_rule_dry_runner.py +++ b/tests/unit/util/test_rule_dry_runner.py @@ -86,7 +86,6 @@ def test_dry_run_accepts_json_as_input(self, tmp_path, capsys): config=self.config, full_output=True, use_json=True, - logger=logging.getLogger("test-logger"), ) dry_runner.run() @@ -106,7 +105,6 @@ def test_dry_run_accepts_json_in_list_as_input(self, tmp_path, capsys): config=self.config, full_output=True, use_json=True, - logger=logging.getLogger("test-logger"), ) dry_runner.run() @@ -126,7 +124,6 @@ def test_dry_run_accepts_jsonl_as_input(self, tmp_path, capsys): config=self.config, full_output=True, use_json=False, - logger=logging.getLogger("test-logger"), ) dry_runner.run() @@ -155,7 +152,6 @@ def test_dry_run_print_custom_output(self, tmp_path, capsys): config=self.config, full_output=True, use_json=True, - logger=logging.getLogger("test-logger"), ) dry_runner.run() @@ -181,7 +177,6 @@ def test_dry_run_prints_predetection(self, tmp_path, capsys): config=self.config, full_output=True, use_json=True, - logger=logging.getLogger("test-logger"), ) dry_runner.run()