diff --git a/src/BenchmarkManager.py b/src/BenchmarkManager.py index 1adcfeba..3e7c2418 100644 --- a/src/BenchmarkManager.py +++ b/src/BenchmarkManager.py @@ -48,10 +48,13 @@ class BenchmarkManager: respective framework components. After executing the benchmarks, it collects the generated data and saves it. """ - def __init__(self): + def __init__(self, fail_fast: bool = False): """ Constructor method + :param fail_fast: Boolean whether a single failed benchmark run causes QUARK to fail + :type fail_fast: bool """ + self.fail_fast = fail_fast self.application = None self.application_configs = None self.results = [] @@ -160,6 +163,8 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int): except Exception as error: logging.exception(f"Error during benchmark run: {error}", exc_info=True) + if self.fail_fast: + raise for record in benchmark_records: record.sum_up_times() diff --git a/src/main.py b/src/main.py index 7193d4d5..391e63b7 100644 --- a/src/main.py +++ b/src/main.py @@ -82,7 +82,7 @@ def setup_logging() -> None: logging.info(" ============================================================ ") -def start_benchmark_run(config_file: str = None, store_dir: str = None) -> None: +def start_benchmark_run(config_file: str = None, store_dir: str = None, fail_fast: bool = False) -> None: """ Starts a benchmark run from the code @@ -110,7 +110,7 @@ def start_benchmark_run(config_file: str = None, store_dir: str = None) -> None: config_manager = ConfigManager() config_manager.set_config(benchmark_config) - benchmark_manager = BenchmarkManager() + benchmark_manager = BenchmarkManager(fail_fast=fail_fast) # Can be overridden by using the -m|--modules option installer = Installer() @@ -125,6 +125,8 @@ def create_benchmark_parser(parser: argparse.ArgumentParser): parser.add_argument('-s', '--summarize', nargs='+', help='If you want to summarize multiple experiments', required=False) parser.add_argument('-m', '--modules', help="Provide a file listing the modules to be loaded") + parser.add_argument('-ff', '--failfast', help='Flag whether a single failed benchmark run causes QUARK to fail', + required=False, action=argparse.BooleanOptionalAction) parser.set_defaults(goal='benchmark') @@ -152,7 +154,7 @@ def handle_benchmark_run(args: argparse.Namespace) -> None: :rtype: None """ from BenchmarkManager import BenchmarkManager # pylint: disable=C0415 - benchmark_manager = BenchmarkManager() + benchmark_manager = BenchmarkManager(fail_fast=args.failfast) if args.summarize: benchmark_manager.summarize_results(args.summarize)