Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add --benchmark and --exclude option to the benchmark scripts for debugging #145

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions benchmark_size.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,20 @@ def build_parser():
help='Section categories to include in metric: one or more of "text", "rodata", '
+ '"data" or "bss". Default "text"',
)
parser.add_argument(
'--benchmark',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to measure. By default all tests are measured. Results obtained from subsets are not valid Embench scores.'
)
parser.add_argument(
'--exclude',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.'
)

return parser

Expand Down Expand Up @@ -257,6 +271,9 @@ def validate_args(args):
else:
gp['metric'] = ['text']

gp['benchmark'] = args.benchmark
gp['exclude'] = args.exclude

def benchmark_size(bench, metrics):
"""Compute the total size of the desired sections in a benchmark. Returns
the size in bytes, which may be zero if the section wasn't found."""
Expand Down Expand Up @@ -342,6 +359,8 @@ def collect_data(benchmarks):

# Output it
if gp['output_format'] == output_format.JSON:
if gp['benchmark'] or gp['exclude']:
log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
log.info('{ "size results" :')
log.info(' { "detailed size results" :')
for bench in benchmarks:
Expand All @@ -360,6 +379,8 @@ def collect_data(benchmarks):

log.info(' },')
elif gp['output_format'] == output_format.TEXT:
if gp['benchmark'] or gp['exclude']:
log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
log.info('Benchmark size')
log.info('--------- ----')
for bench in benchmarks:
Expand All @@ -370,6 +391,9 @@ def collect_data(benchmarks):
res_output = f' {rel_data[bench]:6.2f}'
log.info(f'{bench:15} {res_output:8}')
elif gp['output_format'] == output_format.BASELINE:
if gp['benchmark'] or gp['exclude']:
log.info('ERROR: These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
return [], []
log.info('{')
for bench in benchmarks:
res_output = ''
Expand Down
24 changes: 24 additions & 0 deletions benchmark_speed.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,20 @@ def get_common_args():
action='store_false',
help='Launch all benchmarks in series (the default)'
)
parser.add_argument(
'--benchmark',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to measure. By default all tests are measured. Results obtained from subsets are not valid Embench scores.'
)
parser.add_argument(
'--exclude',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.'
)

return parser.parse_known_args()

Expand Down Expand Up @@ -168,6 +182,9 @@ def validate_args(args):
gp['timeout'] = args.timeout
gp['sim_parallel'] = args.sim_parallel

gp['benchmark'] = args.benchmark
gp['exclude'] = args.exclude

try:
newmodule = importlib.import_module(args.target_module)
except ImportError as error:
Expand Down Expand Up @@ -297,6 +314,8 @@ def collect_data(benchmarks, remnant):

# Output it
if gp['output_format'] == output_format.JSON:
if gp['benchmark'] or gp['exclude']:
log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
log.info('{ "speed results" :')
log.info(' { "detailed speed results" :')
for bench in benchmarks:
Expand All @@ -315,6 +334,8 @@ def collect_data(benchmarks, remnant):
log.info(f' "{bench}" : {output},')
log.info(' },')
elif gp['output_format'] == output_format.TEXT:
if gp['benchmark'] or gp['exclude']:
log.info('These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
log.info('Benchmark Speed')
log.info('--------- -----')
for bench in benchmarks:
Expand All @@ -327,6 +348,9 @@ def collect_data(benchmarks, remnant):
# Want relative results (the default). Only use non-zero values.
log.info(f'{bench:15} {output:8}')
elif gp['output_format'] == output_format.BASELINE:
if gp['benchmark'] or gp['exclude']:
log.info('ERROR: These results are not valid Embench scores as they are taken from a subset of the Embench suite.')
return [], []
log.info('{')
for bench in benchmarks:
if bench == benchmarks[-1]:
Expand Down
17 changes: 17 additions & 0 deletions build_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,20 @@ def build_parser():
default=5,
help='Timeout used for the compiler and linker invocations'
)
parser.add_argument(
'--benchmark',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to build. By default all tests are built. Results obtained from subsets are not valid Embench scores.'
)
parser.add_argument(
'--exclude',
type=str,
default=[],
nargs='+',
help='Benchmark name(s) to exclude. Results obtained from subsets are not valid Embench scores.'
)

return parser

Expand Down Expand Up @@ -196,6 +210,9 @@ def validate_args(args):
var, val = envarg.split('=', 1)
gp['env'][var] = val

gp['benchmark'] = args.benchmark
gp['exclude'] = args.exclude

# Other args validated later.


Expand Down
7 changes: 6 additions & 1 deletion pylib/embench_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,11 +130,16 @@ def find_benchmarks():
Return the list of benchmarks."""
gp['benchdir'] = os.path.join(gp['rootdir'], 'src')
gp['bd_benchdir'] = os.path.join(gp['bd'], 'src')
dirlist = os.listdir(gp['benchdir'])
if gp['benchmark']:
dirlist = gp['benchmark']
else:
dirlist = os.listdir(gp['benchdir'])

benchmarks = []

for bench in dirlist:
if bench in gp['exclude']:
continue
abs_b = os.path.join(gp['benchdir'], bench)
if os.path.isdir(abs_b):
benchmarks.append(bench)
Expand Down