diff --git a/benchmark_size.py b/benchmark_size.py index 66e32909..d091fe4c 100755 --- a/benchmark_size.py +++ b/benchmark_size.py @@ -206,6 +206,20 @@ def build_parser(): help='Section categories to include in metric: one or more of "text", "rodata", ' + '"data" or "bss". Default "text"', ) + parser.add_argument( + '--benchmark', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to measure. By default all tests are measured. Results obtained from subsets are not valid Embench scores.' + ) + parser.add_argument( + '--exclude', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.' + ) return parser @@ -257,6 +271,9 @@ def validate_args(args): else: gp['metric'] = ['text'] + gp['benchmark'] = args.benchmark + gp['exclude'] = args.exclude + def benchmark_size(bench, metrics): """Compute the total size of the desired sections in a benchmark. Returns the size in bytes, which may be zero if the section wasn't found.""" @@ -342,6 +359,8 @@ def collect_data(benchmarks): # Output it if gp['output_format'] == output_format.JSON: + if gp['benchmark'] or gp['exclude']: + log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.') log.info('{ "size results" :') log.info(' { "detailed size results" :') for bench in benchmarks: @@ -360,6 +379,8 @@ def collect_data(benchmarks): log.info(' },') elif gp['output_format'] == output_format.TEXT: + if gp['benchmark'] or gp['exclude']: + log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.') log.info('Benchmark size') log.info('--------- ----') for bench in benchmarks: @@ -370,6 +391,9 @@ def collect_data(benchmarks): res_output = f' {rel_data[bench]:6.2f}' log.info(f'{bench:15} {res_output:8}') elif gp['output_format'] == output_format.BASELINE: + if gp['benchmark'] or gp['exclude']: + log.info('ERROR: These results are not valid Embench scores as they are taken from a subset of the Embench suite.') + return [], [] log.info('{') for bench in benchmarks: res_output = '' diff --git a/benchmark_speed.py b/benchmark_speed.py index d7312066..6964fce2 100755 --- a/benchmark_speed.py +++ b/benchmark_speed.py @@ -132,6 +132,20 @@ def get_common_args(): action='store_false', help='Launch all benchmarks in series (the default)' ) + parser.add_argument( + '--benchmark', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to measure. By default all tests are measured. Results obtained from subsets are not valid Embench scores.' + ) + parser.add_argument( + '--exclude', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.' + ) return parser.parse_known_args() @@ -168,6 +182,9 @@ def validate_args(args): gp['timeout'] = args.timeout gp['sim_parallel'] = args.sim_parallel + gp['benchmark'] = args.benchmark + gp['exclude'] = args.exclude + try: newmodule = importlib.import_module(args.target_module) except ImportError as error: @@ -297,6 +314,8 @@ def collect_data(benchmarks, remnant): # Output it if gp['output_format'] == output_format.JSON: + if gp['benchmark'] or gp['exclude']: + log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.') log.info('{ "speed results" :') log.info(' { "detailed speed results" :') for bench in benchmarks: @@ -315,6 +334,8 @@ def collect_data(benchmarks, remnant): log.info(f' "{bench}" : {output},') log.info(' },') elif gp['output_format'] == output_format.TEXT: + if gp['benchmark'] or gp['exclude']: + log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.') log.info('Benchmark Speed') log.info('--------- -----') for bench in benchmarks: @@ -327,6 +348,9 @@ def collect_data(benchmarks, remnant): # Want relative results (the default). Only use non-zero values. log.info(f'{bench:15} {output:8}') elif gp['output_format'] == output_format.BASELINE: + if gp['benchmark'] or gp['exclude']: + log.info('ERROR: These results are not valid Embench scores as they are taken from a subset of the Embench suite.') + return [], [] log.info('{') for bench in benchmarks: if bench == benchmarks[-1]: diff --git a/build_all.py b/build_all.py index c154a289..187d9f26 100755 --- a/build_all.py +++ b/build_all.py @@ -121,6 +121,20 @@ def build_parser(): default=5, help='Timeout used for the compiler and linker invocations' ) + parser.add_argument( + '--benchmark', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to build. By default all tests are built. Results obtained from subsets are not valid Embench scores.' + ) + parser.add_argument( + '--exclude', + type=str, + default=[], + nargs='+', + help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.' + ) return parser @@ -196,6 +210,9 @@ def validate_args(args): var, val = envarg.split('=', 1) gp['env'][var] = val + gp['benchmark'] = args.benchmark + gp['exclude'] = args.exclude + # Other args validated later. diff --git a/pylib/embench_core.py b/pylib/embench_core.py index 442f5122..7a784e8a 100644 --- a/pylib/embench_core.py +++ b/pylib/embench_core.py @@ -130,11 +130,16 @@ def find_benchmarks(): Return the list of benchmarks.""" gp['benchdir'] = os.path.join(gp['rootdir'], 'src') gp['bd_benchdir'] = os.path.join(gp['bd'], 'src') - dirlist = os.listdir(gp['benchdir']) + if gp['benchmark']: + dirlist = gp['benchmark'] + else: + dirlist = os.listdir(gp['benchdir']) benchmarks = [] for bench in dirlist: + if bench in gp['exclude']: + continue abs_b = os.path.join(gp['benchdir'], bench) if os.path.isdir(abs_b): benchmarks.append(bench)