diff --git a/CHANGELOG.md b/CHANGELOG.md index c4c6d9bae..653bbf2f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,8 @@ ## next release ### Breaking +* reimplement the logprep CLI, see `logprep --help` for more information. + ### Features diff --git a/logprep/run_logprep.py b/logprep/run_logprep.py index 8953af687..875df7442 100644 --- a/logprep/run_logprep.py +++ b/logprep/run_logprep.py @@ -1,27 +1,22 @@ -#!/usr/bin/python3 -"""This module can be used to start the logprep.""" # pylint: disable=logging-fstring-interpolation -import inspect +"""This module can be used to start the logprep.""" import logging import os import sys import warnings -from argparse import ArgumentParser -from os.path import basename -from pathlib import Path +import click import requests from colorama import Fore from logprep._version import get_versions -from logprep.processor.base.rule import Rule from logprep.runner import Runner from logprep.util.auto_rule_tester.auto_rule_corpus_tester import RuleCorpusTester from logprep.util.auto_rule_tester.auto_rule_tester import AutoRuleTester from logprep.util.configuration import Configuration, InvalidConfigurationError +from logprep.util.getter import GetterNotFoundError from logprep.util.helper import print_fcolor from logprep.util.rule_dry_runner import DryRunner -from logprep.util.schema_and_rule_checker import SchemaAndRuleChecker warnings.simplefilter("always", DeprecationWarning) logging.captureWarnings(True) @@ -32,112 +27,32 @@ logging.getLogger("elasticsearch").setLevel(logging.ERROR) -def _parse_arguments(): - argument_parser = ArgumentParser() - argument_parser.add_argument( - "--version", - help="print the current version and exit", - action="store_true", - ) - argument_parser.add_argument( - "config", - nargs="?", - help=f"Path to configuration file, if not given then " - f"the default path '{DEFAULT_LOCATION_CONFIG}' is used", - default=DEFAULT_LOCATION_CONFIG, - ) - argument_parser.add_argument("--disable-logging", help="Disable logging", action="store_true") - argument_parser.add_argument( - "--validate-rules", - help="Validate Labeler Rules (if well-formed" " and valid against given schema)", - action="store_true", - ) - argument_parser.add_argument( - "--verify-config", - help="Verify the configuration file", - action="store_true", - ) - argument_parser.add_argument( - "--dry-run", - help="Dry run pipeline with events in given " "path and print results", - metavar="PATH_TO_JSON_LINE_FILE_WITH_EVENTS", - ) - argument_parser.add_argument( - "--dry-run-input-type", - choices=["json", "jsonl"], - default="json", - help="Specify input type for dry-run", - ) - argument_parser.add_argument( - "--dry-run-full-output", - help="Print full dry-run output, including " "all extra output", - action="store_true", - ) - argument_parser.add_argument("--auto-test", help="Run rule-tests", action="store_true") - argument_parser.add_argument( - "--auto-corpus-test", help="Run rule-corpus-test", action="store_true" - ) - argument_parser.add_argument( - "--corpus-testdata", help="Directory to the test data for the rule-corpus-test" - ) - arguments = argument_parser.parse_args() - - requires_dry_run = arguments.dry_run_full_output or arguments.dry_run_input_type == "jsonl" - if requires_dry_run and not arguments.dry_run: - argument_parser.error("--dry-run-input-type and --dry-run-full-output require --dry-run") - - return arguments - - -def _run_logprep(arguments, logger: logging.Logger): - runner = None - try: - runner = Runner.get_runner() - runner.load_configuration(arguments.config) - logger.debug("Configuration loaded") - runner.start() - # pylint: disable=broad-except - except BaseException as error: - if os.environ.get("DEBUG", False): - logger.exception(f"A critical error occurred: {error}") # pragma: no cover - else: - logger.critical(f"A critical error occurred: {error}") - if runner: - runner.stop() - sys.exit(1) - # pylint: enable=broad-except - - -def get_processor_type_and_rule_class() -> dict: # pylint: disable=missing-docstring - return { - basename(Path(inspect.getfile(rule_class)).parent): rule_class - for rule_class in Rule.__subclasses__() - } +EPILOG_STR = "Check out our docs at https://logprep.readthedocs.io/en/latest/" -def get_versions_string(args) -> str: +def get_versions_string(config=None) -> str: """ Prints the version and exists. If a configuration was found then it's version is printed as well """ versions = get_versions() padding = 25 - version_string = f"{'python version:'.ljust(padding)}{sys.version.split()[0]}" - version_string += f"\n{'logprep version:'.ljust(padding)}{versions['version']}" - try: - config = Configuration().create_from_yaml(args.config) - except FileNotFoundError: - config = Configuration() - config.path = args.config + version_string = f"{'logprep version:'.ljust(padding)}{versions['version']}" + version_string += f"\n{'python version:'.ljust(padding)}{sys.version.split()[0]}" if config: config_version = f"{config.get('version', 'unset')}, {config.path}" else: - config_version = f"no configuration found in '{config.path}'" + config_version = "no configuration found" version_string += f"\n{'configuration version:'.ljust(padding)}{config_version}" return version_string -def _setup_logger(args, config: Configuration): +def print_version_and_exit(config): + print(get_versions_string(config)) + sys.exit(0) + + +def _setup_logger(config: Configuration): try: log_config = config.get("logger", {}) log_level = log_config.get("level", "INFO") @@ -146,7 +61,7 @@ def _setup_logger(args, config: Configuration): ) logger = logging.getLogger("Logprep") logger.info(f"Log level set to '{log_level}'") - for version in get_versions_string(args).split("\n"): + for version in get_versions_string(config).split("\n"): logger.info(version) except BaseException as error: # pylint: disable=broad-except logging.getLogger("Logprep").exception(error) @@ -154,88 +69,181 @@ def _setup_logger(args, config: Configuration): return logger -def _load_configuration(args): +def _load_configuration(config): try: - config = Configuration().create_from_yaml(args.config) + config = Configuration().create_from_yaml(config) except FileNotFoundError: - print(f"The given config file does not exist: {args.config}", file=sys.stderr) + print(f"The given config file does not exist: {config}", file=sys.stderr) print( "Create the configuration or change the path. Use '--help' for more information.", file=sys.stderr, ) sys.exit(1) + except GetterNotFoundError as error: + print(f"{error}", file=sys.stderr) except requests.RequestException as error: print(f"{error}", file=sys.stderr) sys.exit(1) return config -def _validate_rules(args, config: Configuration, logger: logging.Logger): +@click.group(name="logprep") +@click.version_option(version=get_versions_string(), message="%(version)s") +def cli(): + """ + Logprep allows to collect, process and forward log messages from various data sources. + Log messages are being read and written by so-called connectors. + """ + + +@cli.command(short_help="Run logprep to process log messages", epilog=EPILOG_STR) +@click.argument("config") +@click.option( + "--version", + is_flag=True, + default=False, + help="Print version and exit (includes also congfig version)", +) +def run(config: str, version=None): + """ + Run Logprep with the given configuration. + + CONFIG is a path to configuration file (filepath or URL). + """ + config_obj = _load_configuration(config) + if version: + print_version_and_exit(config_obj) + logger = _setup_logger(config_obj) + logger.debug(f'Metric export enabled: {config_obj.get("metrics", {}).get("enabled", False)}') + logger.debug(f"Config path: {config}") + runner = None try: - config.verify_pipeline_only(logger) - except InvalidConfigurationError as error: - logger.critical(error) - sys.exit(1) - type_rule_map = get_processor_type_and_rule_class() - rules_valid = [] - for processor_type, rule_class in type_rule_map.items(): - rules_valid.append( - SchemaAndRuleChecker().validate_rules(args.config, processor_type, rule_class, logger) - ) - if not all(rules_valid): + runner = Runner.get_runner() + runner.load_configuration(config) + logger.debug("Configuration loaded") + runner.start() + # pylint: disable=broad-except + except BaseException as error: + if os.environ.get("DEBUG", False): + logger.exception(f"A critical error occurred: {error}") # pragma: no cover + else: + logger.critical(f"A critical error occurred: {error}") + if runner: + runner.stop() sys.exit(1) - if not args.auto_test: - sys.exit(0) + # pylint: enable=broad-except -def main(): - """Start the logprep runner.""" - args = _parse_arguments() +@cli.group(name="test", short_help="Execute tests against a given configuration") +def test(): + """ + Execute certain tests like unit and integration tests. Can also verify the configuration. + """ + + +@test.command(name="config") +@click.argument("config") +def test_config(config): + """ + Verify the configuration file - if args.version: - print(get_versions_string(args)) - sys.exit(0) + CONFIG is a path to configuration file (filepath or URL). + """ + config = _load_configuration(config) + logger = _setup_logger(config) try: - config = _load_configuration(args) - except BaseException as error: # pylint: disable=broad-except - logging.getLogger("Logprep").error(error) + config.verify(logger=logger) + except InvalidConfigurationError as error: + logger.critical(error) sys.exit(1) + print_fcolor(Fore.GREEN, "The verification of the configuration was successful") + + +@test.command(short_help="Execute a dry run against a configuration and selected events") +@click.argument("config") +@click.argument("events") +@click.option( + "--input-type", + help="Specifies the input type.", + type=click.Choice(["json", "jsonl"]), + default="jsonl", + show_default=True, +) +@click.option( + "--full-output", + help="Print full dry-run output, including all extra output", + default=True, + type=click.BOOL, + show_default=True, +) +def dry_run(config, events, input_type, full_output): + """ + Execute a logprep dry run with the given configuration against a set of events. The results of + the processing will be printed in the terminal. - logger = _setup_logger(args, config) + \b + CONFIG is a path to configuration file (filepath or URL). + EVENTS is a path to a 'json' or 'jsonl' file. + """ + json_input = input_type == "json" + dry_runner = DryRunner(events, config, full_output, json_input, logging.getLogger("DryRunner")) + dry_runner.run() - if args.validate_rules or args.auto_test: - _validate_rules(args, config, logger) - logger.debug(f'Metric export enabled: {config.get("metrics", {}).get("enabled", False)}') - logger.debug(f"Config path: {args.config}") - if args.auto_test: - auto_rule_tester = AutoRuleTester(args.config) - auto_rule_tester.run() - elif args.dry_run: - json_input = args.dry_run_input_type == "json" - dry_runner = DryRunner( - args.dry_run, args.config, args.dry_run_full_output, json_input, logger - ) - dry_runner.run() - elif args.verify_config: - try: - config.verify(logger) - except InvalidConfigurationError as error: - logger.critical(error) - sys.exit(1) - print_fcolor(Fore.GREEN, "The verification of the configuration was successful") - elif args.auto_corpus_test: - if args.corpus_testdata is None: - logger.error( - "In order to start the auto-rule-corpus-tester you have to configure the " - "directory to the test data with '--corpus-testdata'. See '--help' for " - "more information." - ) - sys.exit(1) - RuleCorpusTester(args.config, args.corpus_testdata).run() - else: - _run_logprep(args, logger) +@test.command(short_help="Run the rule tests of the given configuration", name="unit") +@click.argument("config") +def test_rules(config): + """ + Test rules against their respective test files + + CONFIG is a path to configuration file (filepath or URL). + """ + tester = AutoRuleTester(config) + tester.run() + + +@test.command( + short_help="Run the rule corpus tester against a given configuration", name="integration" +) +@click.argument("config") +@click.argument("testdata") +def test_ruleset(config, testdata): + """Test the given ruleset against specified test data + + \b + CONFIG is a path to configuration file (filepath or URL). + TESTDATA is a path to a set of test files. + """ + tester = RuleCorpusTester(config, testdata) + tester.run() + + +@cli.command(short_help="Generate load for a running logprep instance [Not Yet Implemented]") +def generate(): + """ + Generate load offers two different options to create sample events for a running + logprep instance. + """ + raise NotImplementedError + + +@cli.command(short_help="Print a complete configuration file [Not Yet Implemented]", name="print") +@click.argument("config") +@click.option( + "--output", + type=click.Choice(["json", "yaml"]), + default="yaml", + help="What output format to use", +) +def print_config(config, output): + """ + Prints the given configuration as a combined yaml or json file, with all rules and options + included. + + CONFIG is a path to configuration file (filepath or URL). + """ + raise NotImplementedError if __name__ == "__main__": - main() + cli() diff --git a/tests/acceptance/util.py b/tests/acceptance/util.py index cbfa6e418..7ba5fc87c 100644 --- a/tests/acceptance/util.py +++ b/tests/acceptance/util.py @@ -247,7 +247,7 @@ def start_logprep(config_path: str, env: dict = None) -> subprocess.Popen: env = {} env.update({"PYTHONPATH": "."}) return subprocess.Popen( # nosemgrep - f"{sys.executable} logprep/run_logprep.py {config_path}", + f"{sys.executable} logprep/run_logprep.py run {config_path}", shell=True, env=env, stdin=subprocess.PIPE, diff --git a/tests/unit/test_quickstart.py b/tests/unit/test_quickstart.py index 37a34b580..f143ff4e0 100644 --- a/tests/unit/test_quickstart.py +++ b/tests/unit/test_quickstart.py @@ -20,11 +20,11 @@ def test_quickstart_rules_are_valid(self): "sys.argv", [ "logprep", - "--disable-logging", - "--validate-rules", + "test", + "config", self.QUICKSTART_CONFIG_PATH, ], ): with pytest.raises(SystemExit) as e_info: - run_logprep.main() + run_logprep.cli() assert e_info.value.code == 0 diff --git a/tests/unit/test_run_logprep.py b/tests/unit/test_run_logprep.py index 7e9bbd088..1c0f2a955 100644 --- a/tests/unit/test_run_logprep.py +++ b/tests/unit/test_run_logprep.py @@ -1,148 +1,122 @@ # pylint: disable=missing-docstring # pylint: disable=protected-access -import os +# pylint: disable=attribute-defined-outside-init +import logging import sys from pathlib import Path from unittest import mock -import pytest import requests import responses -from yaml import safe_load +from click.testing import CliRunner -from logprep import run_logprep from logprep._version import get_versions -from logprep.run_logprep import DEFAULT_LOCATION_CONFIG +from logprep.run_logprep import cli from logprep.util.configuration import InvalidConfigurationError -class TestRunLogprep: - @mock.patch("logprep.run_logprep._run_logprep") - def test_main_calls_run_logprep_with_test_config(self, mock_run_logprep): - with mock.patch( - "sys.argv", - [ - "logprep", - "--disable-logging", - "tests/testdata/config/config.yml", - ], - ): - run_logprep.main() - mock_run_logprep.assert_called() +class TestRunLogprepCli: + def setup_method(self): + self.cli_runner = CliRunner() + + @mock.patch("logprep.run_logprep.Runner") + def test_cli_run_starts_runner_with_config(self, mock_runner): + runner_instance = mock.MagicMock() + mock_runner.get_runner.return_value = runner_instance + args = ["run", "tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + runner_instance.start.assert_called() + config_file_path = "tests/testdata/config/config.yml" + runner_instance.load_configuration.assert_called_with(config_file_path) + + @mock.patch("logprep.run_logprep.Runner") + def test_cli_run_uses_getter_to_get_config(self, mock_runner): + runner_instance = mock.MagicMock() + mock_runner.get_runner.return_value = runner_instance + args = ["run", "file://tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + runner_instance.start.assert_called() + config_file_path = "file://tests/testdata/config/config.yml" + runner_instance.load_configuration.assert_called_with(config_file_path) - @mock.patch("logprep.util.schema_and_rule_checker.SchemaAndRuleChecker.validate_rules") - def test_main_calls_validates_rules(self, mock_validate_rules): - with mock.patch( - "sys.argv", - [ - "logprep", - "--disable-logging", - "--validate-rules", - "tests/testdata/config/config.yml", - ], - ): - with pytest.raises(SystemExit): - run_logprep.main() - mock_validate_rules.assert_called() + def test_exits_after_getter_error_for_not_existing_protocol(self): + args = ["run", "almighty_protocol://tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 1 + assert "No getter for protocol 'almighty_protocol'" in result.output - def test_uses_getter_to_get_config(self): - with mock.patch( - "sys.argv", - [ - "logprep", - "--disable-logging", - "--validate-rules", - "file://tests/testdata/config/config.yml", - ], - ): - with pytest.raises(SystemExit, match="0"): - run_logprep.main() + @mock.patch("logprep.util.configuration.Configuration.verify") + def test_test_config_verifies_configuration_successfully(self, mock_verify): + args = ["test", "config", "tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + mock_verify.assert_called() + assert "The verification of the configuration was successful" in result.stdout - def test_exits_after_getter_error_for_not_existing_protocol(self): - with mock.patch( - "sys.argv", - [ - "logprep", - "--disable-logging", - "--validate-rules", - "almighty_protocol://tests/testdata/config/config.yml", - ], - ): - with pytest.raises(SystemExit, match="1"): - run_logprep.main() + @mock.patch("logprep.util.configuration.Configuration.verify") + def test_test_config_verifies_configuration_unsuccessfully(self, mock_verify): + mock_verify.side_effect = InvalidConfigurationError + args = ["test", "config", "tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 1 + mock_verify.assert_called() + assert "The verification of the configuration was successful" not in result.stdout @responses.activate def test_gets_config_from_https(self): pipeline_config = Path("tests/testdata/config/config.yml").read_text(encoding="utf8") responses.add(responses.GET, "https://does.not.exits/pipline.yml", pipeline_config) - with mock.patch( - "sys.argv", - [ - "logprep", - "--disable-logging", - "--validate-rules", - "https://does.not.exits/pipline.yml", - ], - ): - with pytest.raises(SystemExit, match="0"): - run_logprep.main() - - def test_version_arg_prints_logprep_version_without_config_argument(self, capsys): - with mock.patch("sys.argv", ["logprep", "--version"]): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - python_line, logprep_line, config_line = captured.out.strip().split("\n") - assert python_line == f"python version: {sys.version.split()[0]}" - assert logprep_line == f"logprep version: {get_versions()['version']}" + args = ["test", "config", "https://does.not.exits/pipline.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + + def test_version_arg_prints_logprep_version(self): + result = self.cli_runner.invoke(cli, ["--version"]) + assert result.exit_code == 0 + assert f"python version: {sys.version.split()[0]}" in result.output + assert f"logprep version: {get_versions()['version']}" in result.output + assert f"configuration version: no configuration found" in result.output + + def test_run_version_arg_prints_logprep_version_with_config_version(self): + args = ["run", "--version", "tests/testdata/config/config.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + assert f"python version: {sys.version.split()[0]}" in result.output + assert f"logprep version: {get_versions()['version']}" in result.output assert ( - config_line - == f"configuration version: no configuration found in '{DEFAULT_LOCATION_CONFIG}'" + "configuration version: 1, file://tests/testdata/config/config.yml" in result.output ) - def test_version_arg_prints_also_config_version_if_version_key_is_found(self, capsys): - config_path = "tests/testdata/config/config.yml" - with mock.patch("sys.argv", ["logprep", "--version", config_path]): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - lines = captured.out.strip() - with open(config_path, "r", encoding="utf-8") as file: - configuration = safe_load(file) - expected_lines = ( - f"python version: {sys.version.split()[0]}\n" - f"logprep version: {get_versions()['version']}\n" - f"configuration version: {configuration['version']}, file://{config_path}" + def test_run_version_arg_prints_logprep_version_without_config_value(self): + args = ["run", "--version", "tests/testdata/config/config2.yml"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + assert f"python version: {sys.version.split()[0]}" in result.output + assert f"logprep version: {get_versions()['version']}" in result.output + assert ( + "configuration version: unset, file://tests/testdata/config/config2.yml" + in result.output ) - assert lines == expected_lines @responses.activate - def test_version_arg_prints_with_http_config(self, capsys): + def test_run_version_arg_prints_with_http_config(self): config_path = "tests/testdata/config/config.yml" responses.add( responses.GET, - "http://localhost:32000/tests/testdata/config/config.yml", + f"http://localhost:32000/{config_path}", Path(config_path).read_text(encoding="utf8"), ) - with mock.patch( - "sys.argv", ["logprep", "--version", f"http://localhost:32000/{config_path}"] - ): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - lines = captured.out.strip() - with open(config_path, "r", encoding="utf-8") as file: - configuration = safe_load(file) - expected_lines = ( - f"python version: {sys.version.split()[0]}\n" - f"logprep version: {get_versions()['version']}\n" - f"configuration version: {configuration['version']}," - f" http://localhost:32000/{config_path}" - ) - assert lines == expected_lines + args = ["run", "--version", f"http://localhost:32000/{config_path}"] + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + assert f"python version: {sys.version.split()[0]}" in result.output + assert f"logprep version: {get_versions()['version']}" in result.output + assert f"configuration version: 1, http://localhost:32000/{config_path}" in result.output @responses.activate - def test_version_arg_prints_with_http_config_without_exposing_secret_data(self, capsys): + def test_run_version_arg_prints_with_http_config_without_exposing_secret_data(self): config_path = "tests/testdata/config/config.yml" mock_env = { "LOGPREP_CONFIG_ATUH_USERNAME": "username", @@ -150,123 +124,94 @@ def test_version_arg_prints_with_http_config_without_exposing_secret_data(self, } responses.add( responses.GET, - "http://localhost:32000/tests/testdata/config/config.yml", + f"http://localhost:32000/{config_path}", Path(config_path).read_text(encoding="utf8"), ) + args = ["run", "--version", f"http://localhost:32000/{config_path}"] with mock.patch("os.environ", mock_env): - with mock.patch( - "sys.argv", - [ - "logprep", - "--version", - f"http://localhost:32000/{config_path}", - ], - ): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - lines = captured.out.strip() - with open(config_path, "r", encoding="utf-8") as file: - configuration = safe_load(file) - expected_lines = ( - f"python version: {sys.version.split()[0]}\n" - f"logprep version: {get_versions()['version']}\n" - f"configuration version: {configuration['version']}," - f" http://localhost:32000/{config_path}" - ) - assert lines == expected_lines - - def test_no_config_error_is_printed_if_no_config_was_arg_was_given(self, capsys): - with mock.patch("sys.argv", ["logprep"]): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - error_lines = captured.err.strip() - expected_lines = ( - f"The given config file does not exist: {DEFAULT_LOCATION_CONFIG}\nCreate the " - f"configuration or change the path. Use '--help' for more information." - ) - assert error_lines == expected_lines - - def test_no_config_error_is_printed_if_given_config_file_does_not_exist(self, capsys): + result = self.cli_runner.invoke(cli, args) + assert result.exit_code == 0 + assert f"python version: {sys.version.split()[0]}" in result.output + assert f"logprep version: {get_versions()['version']}" in result.output + assert f"configuration version: 1, http://localhost:32000/{config_path}" in result.output + assert "username" not in result.output + assert "password" not in result.output + + def test_run_no_config_error_is_printed_if_no_config_was_arg_was_given(self): + result = self.cli_runner.invoke(cli, ["run"]) + assert result.exit_code == 2 + assert "Usage: logprep run [OPTIONS] CONFIG\nTry 'logprep run --help' for help.\n\nError: Missing argument 'CONFIG'." + + def test_run_no_config_error_is_printed_if_given_config_file_does_not_exist(self, capsys): non_existing_config_file = "/tmp/does/not/exist.yml" - with mock.patch("sys.argv", ["logprep", non_existing_config_file]): - with pytest.raises(SystemExit): - run_logprep.main() - captured = capsys.readouterr() - error_lines = captured.err.strip() + result = self.cli_runner.invoke(cli, ["run", non_existing_config_file]) + assert result.exit_code == 1 expected_lines = ( f"The given config file does not exist: {non_existing_config_file}\nCreate the " f"configuration or change the path. Use '--help' for more information." ) - assert error_lines == expected_lines - - @mock.patch("logprep.runner.Runner.load_configuration") - @mock.patch("logprep.runner.Runner.start") - def test_main_loads_configuration_and_starts_runner(self, mock_start, mock_load): - config_path = "tests/testdata/config/config.yml" - with mock.patch("sys.argv", ["logprep", config_path]): - run_logprep.main() - mock_load.assert_called_with(config_path) - mock_start.assert_called() + assert expected_lines in result.output @mock.patch("logprep.runner.Runner.start") @mock.patch("logprep.runner.Runner.stop") def test_main_calls_runner_stop_on_any_exception(self, mock_stop, mock_start): mock_start.side_effect = Exception config_path = "tests/testdata/config/config.yml" - with pytest.raises(SystemExit): - with mock.patch("sys.argv", ["logprep", config_path]): - run_logprep.main() + result = self.cli_runner.invoke(cli, ["run", config_path]) + assert result.exit_code == 1 mock_stop.assert_called() def test_logprep_exits_if_logger_can_not_be_created(self): with mock.patch("logprep.run_logprep.Configuration.get") as mock_create: mock_create.side_effect = BaseException config_path = "tests/testdata/config/config.yml" - with mock.patch("sys.argv", ["logprep", config_path]): - with pytest.raises(SystemExit): - run_logprep.main() + result = self.cli_runner.invoke(cli, ["run", config_path]) + assert result.exit_code == 1 def test_logprep_exits_on_invalid_configuration(self): with mock.patch("logprep.util.configuration.Configuration.verify") as mock_verify: mock_verify.side_effect = InvalidConfigurationError config_path = "tests/testdata/config/config.yml" - with mock.patch("sys.argv", ["logprep", config_path]): - with pytest.raises(SystemExit): - run_logprep.main() + result = self.cli_runner.invoke(cli, ["run", config_path]) + assert result.exit_code == 1 def test_logprep_exits_on_any_exception_during_verify(self): with mock.patch("logprep.util.configuration.Configuration.verify") as mock_verify: mock_verify.side_effect = Exception config_path = "tests/testdata/config/config.yml" - with mock.patch("sys.argv", ["logprep", config_path]): - with pytest.raises(SystemExit): - run_logprep.main() + result = self.cli_runner.invoke(cli, ["run", config_path]) + assert result.exit_code == 1 def test_logprep_exits_on_request_exception(self): with mock.patch("logprep.util.getter.HttpGetter.get_raw") as mock_verify: mock_verify.side_effect = requests.RequestException("connection refused") - with mock.patch("sys.argv", ["logprep", "http://localhost/does-not-exists"]): - with pytest.raises(SystemExit): - run_logprep.main() + config_path = "http://localhost/does-not-exists" + result = self.cli_runner.invoke(cli, ["run", config_path]) + assert result.exit_code == 1 - @mock.patch("logprep.util.configuration.Configuration.verify") - def test_logprep_verifies_config(self, mock_verify): - with mock.patch( - "sys.argv", - ["logprep", "tests/testdata/config/config.yml", "--verify-config"], - ): - run_logprep.main() - mock_verify.assert_called() + @mock.patch("logprep.util.rule_dry_runner.DryRunner.run") + def test_test_dry_run_starts_dry_runner(self, mock_dry_runner): + config_path = "tests/testdata/config/config.yml" + events_path = "quickstart/exampledata/input_logdata/test_input.jsonl" + result = self.cli_runner.invoke(cli, ["test", "dry-run", config_path, events_path]) + assert result.exit_code == 0 + mock_dry_runner.assert_called() - @mock.patch("logprep.util.configuration.Configuration.verify") - def test_logprep_exits_on_verify_config_with_invalid_config(self, mock_verify): - mock_verify.side_effect = InvalidConfigurationError - with mock.patch( - "sys.argv", - ["logprep", "tests/testdata/config/config.yml", "--verify-config"], - ): - with pytest.raises(SystemExit): - run_logprep.main() - mock_verify.assert_called() + @mock.patch("logprep.util.auto_rule_tester.auto_rule_tester.AutoRuleTester.run") + def test_test_rules_starts_auto_rule_tester(self, mock_tester): + config_path = "tests/testdata/config/config.yml" + result = self.cli_runner.invoke(cli, ["test", "unit", config_path]) + assert result.exit_code == 0 + mock_tester.assert_called() + # the AutoRuleTester deactivates the logger which then has side effects on other tests + # so the logger is being activated here again. + logger = logging.getLogger() + logger.disabled = False + + @mock.patch("logprep.util.auto_rule_tester.auto_rule_corpus_tester.RuleCorpusTester.run") + def test_test_ruleset_starts_rule_corpus_tester(self, mock_tester): + config_path = "tests/testdata/config/config.yml" + test_data_path = "path/to/testset" + result = self.cli_runner.invoke(cli, ["test", "integration", config_path, test_data_path]) + assert result.exit_code == 0 + mock_tester.assert_called()