From 3a4c9c6c324c8b1be54f1343cd1ebbe0b1b3011c Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 19:56:34 +0000 Subject: [PATCH 01/26] DAOS-623 ci: Add config and Action for python formatting. Required-githooks: true Signed-off-by: Ashley Pittman --- .flake8 | 2 +- .flake8-scons | 2 +- .github/workflows/flake.yml | 4 ++-- .github/workflows/linting.yml | 5 +++++ pyproject.toml | 19 +++++++++++++++++++ 5 files changed, 28 insertions(+), 4 deletions(-) diff --git a/.flake8 b/.flake8 index da6f12a49e1..0fca16cebaa 100644 --- a/.flake8 +++ b/.flake8 @@ -13,4 +13,4 @@ exclude = build, install max-line-length: 100 -ignore=W503 +ignore=W503,E203 diff --git a/.flake8-scons b/.flake8-scons index 2215e2c640e..f3dcd1f837f 100644 --- a/.flake8-scons +++ b/.flake8-scons @@ -13,4 +13,4 @@ exclude = install filename=*/SConscript, SConstruct max-line-length: 100 -ignore=F821,F841,W503 +ignore=F821,F841,W503,E203 diff --git a/.github/workflows/flake.yml b/.github/workflows/flake.yml index 8a2f2d53563..14d2b8b034c 100644 --- a/.github/workflows/flake.yml +++ b/.github/workflows/flake.yml @@ -28,12 +28,12 @@ jobs: with: # W503 and W504 are related as they conflict. W503 is the preferred style and all code # should be using it now. - ignore: 'W503' + ignore: 'W503,E203' exclude: 'src/control/vendor,src/client/pydaos/raw' max-line-length: '100' - name: flake8 Lint on SCons files. uses: py-actions/flake8@v2 with: - ignore: 'F821,W503,F841' + ignore: 'F821,W503,F841,E203' max-line-length: '100' args: '--filename */SConscript, SConstruct' diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 500d6abeea8..754fcbd93d4 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -40,3 +40,8 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - name: Check DAOS logging macro use. run: ./utils/cq/d_logging_check.py --github src + - name: Black + uses: psf/black@stable + with: + options: "--check --verbose --extend-exclude ftest" + src: "./src" diff --git a/pyproject.toml b/pyproject.toml index 15ea695e048..8be266b0a42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,22 @@ +[tool.black] +line-length = 100 +extend-exclude = ''' +( + ^/venv/ + | ^/src/rdb/raft/ + | ^/build/ + | ^/install/ + | ^/src/control/vendor/ +) +''' +include = ''' +( + SConstruct + | SConscript + | \.py$ +) +''' +skip-string-normalization = true [tool.isort] supported_extensions = ["py"] skip = [".git/", "src/rdb/raft", "build", "install", "venv", "src/control/vendor/"] From a63a1341cbbc2953e4400ae1dc1c0a6f0dc8d74f Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:05:05 +0000 Subject: [PATCH 02/26] Change options. Required-githooks: true Signed-off-by: Ashley Pittman --- .github/workflows/linting.yml | 3 +- src/SConscript | 63 ++++++++++++++++++++++++++--------- 2 files changed, 48 insertions(+), 18 deletions(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 754fcbd93d4..af10d26c1df 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -43,5 +43,4 @@ jobs: - name: Black uses: psf/black@stable with: - options: "--check --verbose --extend-exclude ftest" - src: "./src" + options: "--check --verbose --extend-exclude (ftest|vendor)" diff --git a/src/SConscript b/src/SConscript index c4cb419e047..f194a1c56a0 100644 --- a/src/SConscript +++ b/src/SConscript @@ -2,16 +2,45 @@ import os -HEADERS = ['daos_api.h', 'daos_types.h', 'daos_errno.h', 'daos_kv.h', - 'daos_event.h', 'daos_mgmt.h', 'daos_types.h', 'daos_array.h', - 'daos_task.h', 'daos_fs.h', 'daos_uns.h', 'daos_security.h', - 'daos_prop.h', 'daos_obj_class.h', 'daos_obj.h', 'daos_pool.h', - 'daos_cont.h', 'daos_version.h', 'daos_fs_sys.h', 'daos_s3.h', 'daos_pipeline.h'] +HEADERS = [ + 'daos_api.h', + 'daos_types.h', + 'daos_errno.h', + 'daos_kv.h', + 'daos_event.h', + 'daos_mgmt.h', + 'daos_types.h', + 'daos_array.h', + 'daos_task.h', + 'daos_fs.h', + 'daos_uns.h', + 'daos_security.h', + 'daos_prop.h', + 'daos_obj_class.h', + 'daos_obj.h', + 'daos_pool.h', + 'daos_cont.h', + 'daos_version.h', + 'daos_fs_sys.h', + 'daos_s3.h', + 'daos_pipeline.h', +] HEADERS_SRV = ['vos.h', 'vos_types.h'] -HEADERS_GURT = ['dlog.h', 'debug.h', 'common.h', 'hash.h', 'list.h', - 'heap.h', 'fault_inject.h', 'debug_setup.h', - 'types.h', 'atomic.h', 'slab.h', - 'telemetry_consumer.h', 'telemetry_common.h'] +HEADERS_GURT = [ + 'dlog.h', + 'debug.h', + 'common.h', + 'hash.h', + 'list.h', + 'heap.h', + 'fault_inject.h', + 'debug_setup.h', + 'types.h', + 'atomic.h', + 'slab.h', + 'telemetry_consumer.h', + 'telemetry_common.h', +] HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] @@ -40,13 +69,15 @@ def read_and_save_version(env): return version tmpl_hdr_in = os.path.join('include', 'daos_version.h.in') - subst_dict = {'@TMPL_MAJOR@': API_VERSION_MAJOR, - '@TMPL_MINOR@': API_VERSION_MINOR, - '@TMPL_FIX@': API_VERSION_FIX, - '@TMPL_PKG_MAJOR@': major, - '@TMPL_PKG_MINOR@': minor, - '@TMPL_PKG_FIX@': fix, - '@Template for @': ''} + subst_dict = { + '@TMPL_MAJOR@': API_VERSION_MAJOR, + '@TMPL_MINOR@': API_VERSION_MINOR, + '@TMPL_FIX@': API_VERSION_FIX, + '@TMPL_PKG_MAJOR@': major, + '@TMPL_PKG_MINOR@': minor, + '@TMPL_PKG_FIX@': fix, + '@Template for @': '', + } out = env.Substfile(tmpl_hdr_in, SUBST_DICT=subst_dict) if not GetOption('silent'): From 787a57a89f6a057079af155641a62a1171143cdc Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:18:39 +0000 Subject: [PATCH 03/26] Change some things. Required-githooks: true Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- ci/gha_helper.py | 18 +- ci/jira_query.py | 38 +- pyproject.toml | 1 + site_scons/prereq_tools/base.py | 1 + src/SConscript | 4 +- src/bio/SConscript | 14 +- src/cart/SConscript | 33 +- src/client/api/SConscript | 23 +- src/client/api/tests/SConscript | 22 +- src/client/dfs/SConscript | 3 +- src/client/dfuse/SConscript | 82 +- src/client/pydaos/SConscript | 19 +- src/client/pydaos/__init__.py | 2 +- src/client/pydaos/pydaos_core.py | 43 +- src/client/pydaos/raw/conversion.py | 14 +- src/client/pydaos/raw/daos_api.py | 834 +++++----- src/client/pydaos/raw/daos_cref.py | 301 ++-- src/client/setup.py | 8 +- src/common/SConscript | 78 +- src/common/tests/SConscript | 107 +- src/container/SConscript | 23 +- src/control/SConscript | 73 +- src/control/lib/spdk/ctests/SConscript | 6 +- src/dtx/SConscript | 8 +- src/engine/SConscript | 27 +- src/engine/tests/SConscript | 41 +- src/gurt/SConscript | 16 +- src/gurt/tests/SConscript | 10 +- src/mgmt/SConscript | 28 +- src/mgmt/tests/SConscript | 8 +- src/object/SConscript | 46 +- src/object/tests/SConscript | 21 +- src/pipeline/SConscript | 21 +- src/placement/SConscript | 5 +- src/placement/tests/SConscript | 24 +- src/pool/SConscript | 25 +- src/rdb/SConscript | 21 +- src/rdb/tests/SConscript | 8 +- src/rebuild/SConscript | 6 +- src/security/tests/SConscript | 16 +- src/tests/SConscript | 48 +- src/tests/suite/SConscript | 27 +- src/tests/suite/io_conf/SConscript | 12 +- src/vos/SConscript | 35 +- src/vos/storage_estimator/common/__init__.py | 8 +- src/vos/storage_estimator/common/dfs_sb.py | 78 +- src/vos/storage_estimator/common/explorer.py | 128 +- src/vos/storage_estimator/common/parse_csv.py | 46 +- .../common/tests/storage_estimator_test.py | 322 ++-- .../storage_estimator/common/tests/util.py | 7 +- src/vos/storage_estimator/common/util.py | 110 +- src/vos/storage_estimator/common/vos_size.py | 80 +- .../common/vos_structures.py | 86 +- .../daos_storage_estimator.py | 263 ++-- src/vos/tests/SConscript | 64 +- src/vos/tests/evt_stress.py | 3 +- utils/ansible/ftest/library/daos_hugepages.py | 51 +- utils/certs/SConscript | 8 +- utils/cq/d_logging_check.py | 52 +- utils/cq/daos_pylint.py | 79 +- .../10-submodule-update-check.py | 19 +- utils/node_local_test.py | 1339 +++++++++-------- utils/run_utest.py | 137 +- utils/sl/fake_scons/SCons/Action/__init__.py | 2 +- utils/sl/fake_scons/SCons/Builder/__init__.py | 3 +- utils/sl/fake_scons/SCons/Errors/__init__.py | 3 +- utils/sl/fake_scons/SCons/Script/__init__.py | 74 +- utils/sl/fake_scons/SCons/Subst/__init__.py | 2 +- .../sl/fake_scons/SCons/Variables/__init__.py | 15 +- .../sl/fake_scons/SCons/Warnings/__init__.py | 2 +- utils/sl/fake_scons/SCons/__init__.py | 8 +- 71 files changed, 2783 insertions(+), 2406 deletions(-) diff --git a/ci/gha_helper.py b/ci/gha_helper.py index ac4e2965b74..da165145a48 100755 --- a/ci/gha_helper.py +++ b/ci/gha_helper.py @@ -9,14 +9,16 @@ import sys from os.path import join -BUILD_FILES = ['site_scons/prereq_tools', - 'site_scons/components', - 'utils/build.config', - 'SConstruct', - '.github/workflows/landing-builds.yml', - '.dockerignore', - 'requirements.txt', - 'ci/gha_helper.py'] +BUILD_FILES = [ + 'site_scons/prereq_tools', + 'site_scons/components', + 'utils/build.config', + 'SConstruct', + '.github/workflows/landing-builds.yml', + '.dockerignore', + 'requirements.txt', + 'ci/gha_helper.py', +] COMMIT_CMD = ['git', 'rev-parse', '--short', 'HEAD'] diff --git a/ci/jira_query.py b/ci/jira_query.py index 06de4db7b7f..874d81ff08c 100755 --- a/ci/jira_query.py +++ b/ci/jira_query.py @@ -28,8 +28,22 @@ # Expected components from the commit message, and directory in src/, src/client or utils/ is also # valid. We've never checked/enforced these before so there have been a lot of values used in the # past. -VALID_COMPONENTS = ('agent', 'build', 'ci', 'csum', 'doc', 'gha', 'il', 'md', 'mercury', - 'packaging', 'pil4dfs', 'swim', 'test', 'tools') +VALID_COMPONENTS = ( + 'agent', + 'build', + 'ci', + 'csum', + 'doc', + 'gha', + 'il', + 'md', + 'mercury', + 'packaging', + 'pil4dfs', + 'swim', + 'test', + 'tools', +) # Expected ticket prefix. VALID_TICKET_PREFIX = ('DAOS', 'CORCI', 'SRE') @@ -57,9 +71,11 @@ def set_output(key, value): def valid_comp_from_dir(component): """Checks is a component is valid based on src tree""" - return os.path.isdir(os.path.join('src', component)) \ - or os.path.isdir(os.path.join('src', 'client', component)) \ + return ( + os.path.isdir(os.path.join('src', component)) + or os.path.isdir(os.path.join('src', 'client', component)) or os.path.isdir(os.path.join('utils', component)) + ) def fetch_pr_data(): @@ -144,8 +160,10 @@ def main(): ticket = server.issue(ticket_number, fields=FIELDS) except jira.exceptions.JIRAError: errors.append('Unable to load ticket data') - output = [f'Errors are {",".join(errors)}', - f'https://daosio.atlassian.net/browse/{ticket_number}'] + output = [ + f'Errors are {",".join(errors)}', + f'https://daosio.atlassian.net/browse/{ticket_number}', + ] set_output('message', '\n'.join(output)) print('Unable to load ticket data. Ticket may be private, or may not exist') return @@ -158,15 +176,17 @@ def main(): # Elevated priority, PRs to master where ticket is "Required for Version" is set. if ticket.fields.customfield_10045: - # Check the target branch here. Can not be done from a ticket number alone, so only perform # this check if we can. rv_priority = None for version in ticket.fields.customfield_10045: - if str(version) in ('2.0.3 Community Release', '2.0.3 Community Release', - '2.2 Community Release'): + if str(version) in ( + '2.0.3 Community Release', + '2.0.3 Community Release', + '2.2 Community Release', + ): rv_priority = 2 elif str(version) in ('2.4 Community Release'): rv_priority = 3 diff --git a/pyproject.toml b/pyproject.toml index 8be266b0a42..57507ae6249 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,7 @@ include = ''' ''' skip-string-normalization = true [tool.isort] +profile = "black" supported_extensions = ["py"] skip = [".git/", "src/rdb/raft", "build", "install", "venv", "src/control/vendor/"] line_length = 99 diff --git a/site_scons/prereq_tools/base.py b/site_scons/prereq_tools/base.py index b72bf04ecef..6cdff7d12a0 100644 --- a/site_scons/prereq_tools/base.py +++ b/site_scons/prereq_tools/base.py @@ -24,6 +24,7 @@ import datetime import errno import json + # pylint: disable=too-many-lines import os import shutil diff --git a/src/SConscript b/src/SConscript index f194a1c56a0..f41182306a0 100644 --- a/src/SConscript +++ b/src/SConscript @@ -26,7 +26,7 @@ HEADERS = [ 'daos_pipeline.h', ] HEADERS_SRV = ['vos.h', 'vos_types.h'] -HEADERS_GURT = [ +HEADERS_GURT = { 'dlog.h', 'debug.h', 'common.h', @@ -40,7 +40,7 @@ HEADERS_GURT = [ 'slab.h', 'telemetry_consumer.h', 'telemetry_common.h', -] +} HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] diff --git a/src/bio/SConscript b/src/bio/SConscript index 88211d8eaf1..98f65f5e902 100644 --- a/src/bio/SConscript +++ b/src/bio/SConscript @@ -1,7 +1,17 @@ """Build blob I/O""" -FILES = ['bio_buffer.c', 'bio_bulk.c', 'bio_config.c', 'bio_context.c', 'bio_device.c', - 'bio_monitor.c', 'bio_recovery.c', 'bio_xstream.c', 'bio_wal.c', 'smd.pb-c.c'] +FILES = [ + 'bio_buffer.c', + 'bio_bulk.c', + 'bio_config.c', + 'bio_context.c', + 'bio_device.c', + 'bio_monitor.c', + 'bio_recovery.c', + 'bio_xstream.c', + 'bio_wal.c', + 'smd.pb-c.c', +] def scons(): diff --git a/src/cart/SConscript b/src/cart/SConscript index 9b5b2c8ecef..eb3ee9a5bb0 100644 --- a/src/cart/SConscript +++ b/src/cart/SConscript @@ -7,12 +7,27 @@ from datetime import date import SCons.Action -SRC = ['crt_bulk.c', 'crt_context.c', 'crt_corpc.c', - 'crt_ctl.c', 'crt_debug.c', 'crt_group.c', 'crt_hg.c', 'crt_hg_proc.c', - 'crt_init.c', 'crt_iv.c', 'crt_register.c', - 'crt_rpc.c', 'crt_self_test_client.c', 'crt_self_test_service.c', - 'crt_swim.c', 'crt_tree.c', 'crt_tree_flat.c', 'crt_tree_kary.c', - 'crt_tree_knomial.c'] +SRC = [ + 'crt_bulk.c', + 'crt_context.c', + 'crt_corpc.c', + 'crt_ctl.c', + 'crt_debug.c', + 'crt_group.c', + 'crt_hg.c', + 'crt_hg_proc.c', + 'crt_init.c', + 'crt_iv.c', + 'crt_register.c', + 'crt_rpc.c', + 'crt_self_test_client.c', + 'crt_self_test_service.c', + 'crt_swim.c', + 'crt_tree.c', + 'crt_tree_flat.c', + 'crt_tree_kary.c', + 'crt_tree_knomial.c', +] def parse_pp(env, pp_targets): @@ -38,8 +53,10 @@ def consolidate_pp(env, parsed_targets): grepv = r"'struct sockaddr_in {'" preamble = env.Substfile('macro_prefix.h_in', SUBST_DICT={'@YEAR@': date.today().year}) parsed_sources = [x.abspath for x in parsed_targets] - cmd = (f"cat {preamble[0].abspath} > $TARGET; cat {' '.join(parsed_sources)}" - + f" | grep -v {grepv} | sort -u | sed {sed_d} >> $TARGET") + cmd = ( + f"cat {preamble[0].abspath} > $TARGET; cat {' '.join(parsed_sources)}" + + f" | grep -v {grepv} | sort -u | sed {sed_d} >> $TARGET" + ) header = env.Command('_structures_from_macros.h', preamble + parsed_targets, cmd) env.AddPostAction(header, SCons.Action.Action(copy_header, None)) return header diff --git a/src/client/api/SConscript b/src/client/api/SConscript index 62ba96ef600..578a6ad007b 100644 --- a/src/client/api/SConscript +++ b/src/client/api/SConscript @@ -1,7 +1,21 @@ """Build DAOS client""" -LIBDAOS_SRC = ['agent.c', 'array.c', 'container.c', 'event.c', 'init.c', 'job.c', 'kv.c', 'mgmt.c', - 'object.c', 'pool.c', 'rpc.c', 'task.c', 'tx.c', 'pipeline.c'] +LIBDAOS_SRC = [ + 'agent.c', + 'array.c', + 'container.c', + 'event.c', + 'init.c', + 'job.c', + 'kv.c', + 'mgmt.c', + 'object.c', + 'pool.c', + 'rpc.c', + 'task.c', + 'tx.c', + 'pipeline.c', +] def scons(): @@ -15,8 +29,9 @@ def scons(): libdaos_tgts[:0] = denv.SharedObject(LIBDAOS_SRC) if prereqs.client_requested(): - libdaos = env.d_library('daos', libdaos_tgts, SHLIBVERSION=API_VERSION, - LIBS=['daos_common']) + libdaos = env.d_library( + 'daos', libdaos_tgts, SHLIBVERSION=API_VERSION, LIBS=['daos_common'] + ) if hasattr(env, 'InstallVersionedLib'): env.InstallVersionedLib('$PREFIX/lib64/', libdaos, SHLIBVERSION=API_VERSION) else: diff --git a/src/client/api/tests/SConscript b/src/client/api/tests/SConscript index c3d105e8a30..ebaf944c71b 100644 --- a/src/client/api/tests/SConscript +++ b/src/client/api/tests/SConscript @@ -5,15 +5,19 @@ def scons(): """Execute build""" Import('denv') - eq_tests = denv.d_test_program('eq_tests', 'eq_tests.c', - LIBS=['daos', 'daos_common', 'gurt', 'cart', - 'pthread', 'cmocka']) - agent_tests = denv.d_test_program('agent_tests', 'agent_tests.c', - LIBS=['daos', 'daos_common', 'gurt', 'cart', - 'pthread', 'cmocka']) - job_tests = denv.d_test_program('job_tests', 'job_tests.c', - LIBS=['daos', 'daos_common', 'gurt', 'cart', - 'pthread', 'cmocka']) + eq_tests = denv.d_test_program( + 'eq_tests', 'eq_tests.c', LIBS=['daos', 'daos_common', 'gurt', 'cart', 'pthread', 'cmocka'] + ) + agent_tests = denv.d_test_program( + 'agent_tests', + 'agent_tests.c', + LIBS=['daos', 'daos_common', 'gurt', 'cart', 'pthread', 'cmocka'], + ) + job_tests = denv.d_test_program( + 'job_tests', + 'job_tests.c', + LIBS=['daos', 'daos_common', 'gurt', 'cart', 'pthread', 'cmocka'], + ) denv.Install('$PREFIX/bin/', [eq_tests, agent_tests, job_tests]) diff --git a/src/client/dfs/SConscript b/src/client/dfs/SConscript index 38512536397..ad08c0950fa 100644 --- a/src/client/dfs/SConscript +++ b/src/client/dfs/SConscript @@ -13,8 +13,7 @@ def configure_lustre(denv): # If Lustre installed build a Lustre-aware libduns conf = Configure(denv) gotversion = False - if not conf.CheckLibWithHeader('lustreapi', 'linux/lustre/lustre_user.h', - 'c'): + if not conf.CheckLibWithHeader('lustreapi', 'linux/lustre/lustre_user.h', 'c'): _print("No installed Lustre version detected") else: _print("Installed Lustre version detected") diff --git a/src/client/dfuse/SConscript b/src/client/dfuse/SConscript index 5de8992a246..cb90ae980f0 100644 --- a/src/client/dfuse/SConscript +++ b/src/client/dfuse/SConscript @@ -3,33 +3,37 @@ import os HEADERS = ['ioil_io.h', 'ioil_defines.h', 'ioil_api.h', 'ioil.h'] COMMON_SRC = ['dfuse_obj_da.c', 'dfuse_vector.c'] -DFUSE_SRC = ['dfuse_core.c', - 'dfuse_main.c', - 'dfuse_fuseops.c', - 'dfuse_cont.c', - 'dfuse_thread.c', - 'dfuse_pool.c'] -OPS_SRC = ['create', - 'fgetattr', - 'forget', - 'getxattr', - 'listxattr', - 'ioctl', - 'lookup', - 'mknod', - 'open', - 'opendir', - 'read', - 'rename', - 'readdir', - 'readlink', - 'removexattr', - 'setxattr', - 'setattr', - 'symlink', - 'unlink', - 'write', - 'statfs'] +DFUSE_SRC = [ + 'dfuse_core.c', + 'dfuse_main.c', + 'dfuse_fuseops.c', + 'dfuse_cont.c', + 'dfuse_thread.c', + 'dfuse_pool.c', +] +OPS_SRC = [ + 'create', + 'fgetattr', + 'forget', + 'getxattr', + 'listxattr', + 'ioctl', + 'lookup', + 'mknod', + 'open', + 'opendir', + 'read', + 'rename', + 'readdir', + 'readlink', + 'removexattr', + 'setxattr', + 'setattr', + 'symlink', + 'unlink', + 'write', + 'statfs', +] IOIL_SRC = ['int_posix.c', 'int_read.c', 'int_write.c'] PIL4DFS_SRC = ['int_dfs.c', 'hook.c'] @@ -88,11 +92,9 @@ def build_client_libs_shared(env, prereqs): gen_script = ilenv.d_program('il/gen_script', ['il/gen_script.c'], LIBS=[]) if prereqs.test_requested(): - script = ilenv.Command('il/check_ioil_syms', gen_script, - "$SOURCE -s $TARGET") + script = ilenv.Command('il/check_ioil_syms', gen_script, "$SOURCE -s $TARGET") env.Install('$PREFIX/lib/daos/TESTING/scripts', script) - script = ilenv.Command('il/ioil-ld-opts', gen_script, - '$SOURCE -l $TARGET') + script = ilenv.Command('il/ioil-ld-opts', gen_script, '$SOURCE -l $TARGET') env.Install('$PREFIX/share/daos', script) env.InstallVersionedLib(os.path.join("$PREFIX", 'lib64'), dfuse_lib) @@ -137,7 +139,8 @@ def check_ioctl_def(context, ctype): context.Message(f'Checking if fuse ioctl is type {ctype} ') # pylint: disable-next=consider-using-f-string - src = """#include + src = ( + """#include extern void my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, @@ -146,7 +149,9 @@ my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, struct fuse_lowlevel_ops ops = {.ioctl = my_ioctl}; -""" % ctype # pylint: disable=consider-using-f-string +""" + % ctype + ) # pylint: disable=consider-using-f-string rc = context.TryCompile(src, '.c') context.Result(rc) @@ -158,13 +163,12 @@ def configure_fuse(cenv): if GetOption('help') or GetOption('clean'): return - check = Configure(cenv, - custom_tests={'CheckStructMember': check_struct_member, - 'CheckFuseIoctl': check_ioctl_def}) + check = Configure( + cenv, + custom_tests={'CheckStructMember': check_struct_member, 'CheckFuseIoctl': check_ioctl_def}, + ) - if check.CheckStructMember('#include ', - 'struct fuse_file_info', - 'cache_readdir'): + if check.CheckStructMember('#include ', 'struct fuse_file_info', 'cache_readdir'): cenv.AppendUnique(CPPDEFINES={'HAVE_CACHE_READDIR': '1'}) if check.CheckFuseIoctl('unsigned int'): diff --git a/src/client/pydaos/SConscript b/src/client/pydaos/SConscript index 6894fde3b0e..adb929f342f 100644 --- a/src/client/pydaos/SConscript +++ b/src/client/pydaos/SConscript @@ -24,15 +24,16 @@ def build_shim_module(): new_env.compiler_setup() - obj = new_env.SharedObject('pydaos_shim', 'pydaos_shim.c', - SHLINKFLAGS=[], - SHLIBPREFIX="") - base = new_env.d_library(target='pydaos_shim', source=[obj], - install_off="../../../..", - SHLINK='gcc -pthread -shared', - SHLINKFLAGS=[], - SHLIBPREFIX="", - SHLIBSUFFIX='.so') + obj = new_env.SharedObject('pydaos_shim', 'pydaos_shim.c', SHLINKFLAGS=[], SHLIBPREFIX="") + base = new_env.d_library( + target='pydaos_shim', + source=[obj], + install_off="../../../..", + SHLINK='gcc -pthread -shared', + SHLINKFLAGS=[], + SHLIBPREFIX="", + SHLIBSUFFIX='.so', + ) install_path = f'$PREFIX/lib64/python{version}/site-packages/pydaos' new_env.Install(install_path, base) # install new wrappers too diff --git a/src/client/pydaos/__init__.py b/src/client/pydaos/__init__.py index 2f13d83dd8d..871fefb37b0 100644 --- a/src/client/pydaos/__init__.py +++ b/src/client/pydaos/__init__.py @@ -34,7 +34,7 @@ def __str__(self): return self.message -class DaosClient(): +class DaosClient: # pylint: disable=too-few-public-methods # pylint: disable=attribute-defined-outside-init """ diff --git a/src/client/pydaos/pydaos_core.py b/src/client/pydaos/pydaos_core.py index 401b77a0982..60a4eced4b7 100644 --- a/src/client/pydaos/pydaos_core.py +++ b/src/client/pydaos/pydaos_core.py @@ -16,12 +16,12 @@ # Import Object class as an enumeration ObjClassID = enum.Enum( "Enumeration of the DAOS object classes (OC).", - {key: value for key, value in list(pydaos_shim.__dict__.items()) - if key.startswith("OC_")}) + {key: value for key, value in list(pydaos_shim.__dict__.items()) if key.startswith("OC_")}, +) def _get_object_id(cid): - """ Get the existing DAOS object class ID based on name. """ + """Get the existing DAOS object class ID based on name.""" # Default to OC_UNKNOWN (0), which will automatically select an object class. if cid == "0": @@ -30,7 +30,7 @@ def _get_object_id(cid): class DObjNotFound(Exception): - """Raised by get if name associated with DAOS object not found """ + """Raised by get if name associated with DAOS object not found""" def __init__(self, name): self.name = name @@ -40,7 +40,7 @@ def __str__(self): return "Failed to open '{}'".format(self.name) -class DCont(): +class DCont: """ Class representing of DAOS python container Can be identified via a path or a combination of pool label and container @@ -73,8 +73,7 @@ def __init__(self, pool=None, cont=None, path=None): self._dc = DaosClient() self._hdl = None if path is None and (pool is None or cont is None): - raise PyDError("invalid pool or container UUID", - -pydaos_shim.DER_INVAL) + raise PyDError("invalid pool or container UUID", -pydaos_shim.DER_INVAL) if path is not None: self.pool = None self.cont = None @@ -95,7 +94,7 @@ def __del__(self): raise PyDError("failed to close container", ret) def get(self, name): - """ Look up DAOS object associated with name """ + """Look up DAOS object associated with name""" (ret, hi, lo, otype) = pydaos_shim.cont_get(DAOS_MAGIC, self._hdl, name) if ret == -pydaos_shim.DER_NONEXIST: @@ -114,14 +113,15 @@ def __getitem__(self, name): return self.get(name) def dict(self, name, v: dict = None, cid="0"): - """ Create new DDict object """ + """Create new DDict object""" # Get the existing class ID based on class name given. Default to 0 objId = _get_object_id(cid) # Insert name into root kv and get back an object ID - (ret, hi, lo) = pydaos_shim.cont_newobj(DAOS_MAGIC, self._hdl, name, - objId, pydaos_shim.PYDAOS_DICT) + (ret, hi, lo) = pydaos_shim.cont_newobj( + DAOS_MAGIC, self._hdl, name, objId, pydaos_shim.PYDAOS_DICT + ) if ret != pydaos_shim.DER_SUCCESS: raise PyDError("failed to create DAOS dict", ret) @@ -135,14 +135,15 @@ def dict(self, name, v: dict = None, cid="0"): def array(self, name, v: list = None, cid="0"): # pylint: disable=unused-argument - """ Create new DArray object """ + """Create new DArray object""" # Get the existing class ID based on class name given. Default to 0 objId = _get_object_id(cid) # Insert name into root kv and get back an object ID - (ret, hi, lo) = pydaos_shim.cont_newobj(DAOS_MAGIC, self._hdl, name, - objId, pydaos_shim.PYDAOS_ARRAY) + (ret, hi, lo) = pydaos_shim.cont_newobj( + DAOS_MAGIC, self._hdl, name, objId, pydaos_shim.PYDAOS_ARRAY + ) if ret != pydaos_shim.DER_SUCCESS: raise PyDError("failed to create DAOS array", ret) @@ -160,7 +161,7 @@ def __repr__(self): return 'daos://{}/{}'.format(self.pool, self.cont) -class _DObj(): +class _DObj: # pylint: disable=no-member def __init__(self, name, hdl, hi, lo, cont): @@ -189,9 +190,10 @@ def __repr__(self): return "[" + hex(self.hi) + ":" + hex(self.lo) + "]" -class DDictIter(): +class DDictIter: # pylint: disable=too-few-public-methods - """ Iterator class for DDict """ + """Iterator class for DDict""" + def __init__(self, ddict): self._dc = DaosClient() self._entries = [] @@ -213,9 +215,9 @@ def __next__(self): raise StopIteration() # read more entries - (ret, nr, sz, anchor) = pydaos_shim.kv_iter(DAOS_MAGIC, self._kv.oh, - self._entries, self._nr, - self._size, self._anchor) + (ret, nr, sz, anchor) = pydaos_shim.kv_iter( + DAOS_MAGIC, self._kv.oh, self._entries, self._nr, self._size, self._anchor + ) if ret != pydaos_shim.DER_SUCCESS: raise PyDError("failed to enumerate Dictionary", ret) @@ -390,6 +392,7 @@ def __ne__(self, other): def __iter__(self): return DDictIter(self) + # pylint: disable=too-few-public-methods diff --git a/src/client/pydaos/raw/conversion.py b/src/client/pydaos/raw/conversion.py index fd8a0a9fe75..657a75d5237 100644 --- a/src/client/pydaos/raw/conversion.py +++ b/src/client/pydaos/raw/conversion.py @@ -10,20 +10,22 @@ def c_uuid_to_str(cuuid): - """ utility function to convert a C uuid into a standard string format """ - return '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}' \ - '{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(*cuuid) + """utility function to convert a C uuid into a standard string format""" + return ( + '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}' + '{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(*cuuid) + ) def c_uuid(puuid, cuuid): - """ utility function to create a UUID in C format from a python UUID """ + """utility function to create a UUID in C format from a python UUID""" hexstr = puuid.hex for index in range(0, 31, 2): - cuuid[int(index / 2)] = int(hexstr[index:index + 2], 16) + cuuid[int(index / 2)] = int(hexstr[index : index + 2], 16) def str_to_c_uuid(uuidstr): - """ utility function to convert string format uuid to a C uuid """ + """utility function to convert string format uuid to a C uuid""" uuidstr2 = '{' + uuidstr + '}' puuid = uuid.UUID(uuidstr2) cuuid = (ctypes.c_ubyte * 16)() diff --git a/src/client/pydaos/raw/daos_api.py b/src/client/pydaos/raw/daos_api.py index d12d53eb355..f31ed7b510d 100644 --- a/src/client/pydaos/raw/daos_api.py +++ b/src/client/pydaos/raw/daos_api.py @@ -21,13 +21,17 @@ DaosObjClass = enum.Enum( "DaosObjClass", - {key: value for key, value in list(pydaos_shim.__dict__.items()) - if key.startswith("OC_")}) + {key: value for key, value in list(pydaos_shim.__dict__.items()) if key.startswith("OC_")}, +) DaosContPropEnum = enum.Enum( "DaosContPropEnum", - {key: value for key, value in list(pydaos_shim.__dict__.items()) - if key.startswith("DAOS_PROP_")}) + { + key: value + for key, value in list(pydaos_shim.__dict__.items()) + if key.startswith("DAOS_PROP_") + }, +) # Value used to determine whether we need to call daos_obj_list_dkey again. # This is a value used in daos_anchor_is_eof in daos_api.h. Not sure how to @@ -35,7 +39,7 @@ DAOS_ANCHOR_TYPE_EOF = 3 -class DaosPool(): +class DaosPool: """A python object representing a DAOS pool.""" def __init__(self, context): @@ -82,8 +86,14 @@ def connect(self, flags, cb_func=None): uuid_str = self.get_uuid_str() if cb_func is None: - ret = func(bytes(uuid_str, encoding='utf-8'), self.group, c_flags, - ctypes.byref(self.handle), ctypes.byref(c_info), None) + ret = func( + bytes(uuid_str, encoding='utf-8'), + self.group, + c_flags, + ctypes.byref(self.handle), + ctypes.byref(c_info), + None, + ) if ret != 0: self.handle = 0 @@ -91,14 +101,17 @@ def connect(self, flags, cb_func=None): self.connected = 1 else: event = daos_cref.DaosEvent() - params = [bytes(uuid_str, encoding='utf-8'), self.group, c_flags, - ctypes.byref(self.handle), ctypes.byref(c_info), event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [ + bytes(uuid_str, encoding='utf-8'), + self.group, + c_flags, + ctypes.byref(self.handle), + ctypes.byref(c_info), + event, + ] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def disconnect(self, cb_func=None): @@ -112,12 +125,9 @@ def disconnect(self, cb_func=None): else: event = daos_cref.DaosEvent() params = [self.handle, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def local2global(self): @@ -172,11 +182,9 @@ def pool_svc_stop(self, cb_func=None): else: event = daos_cref.DaosEvent() params = [self.handle, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def pool_query(self, cb_func=None): @@ -187,21 +195,16 @@ def pool_query(self, cb_func=None): self.pool_info.pi_bits = ctypes.c_ulong(-1) if cb_func is None: - ret = func(self.handle, None, ctypes.byref(self.pool_info), - None, None) + ret = func(self.handle, None, ctypes.byref(self.pool_info), None, None) if ret != 0: raise DaosApiError("Pool query returned non-zero. RC: {0}".format(ret)) return self.pool_info event = daos_cref.DaosEvent() - params = [self.handle, None, ctypes.byref(self.pool_info), None, - event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [self.handle, None, ctypes.byref(self.pool_info), None, event] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() return None @@ -211,20 +214,16 @@ def target_query(self, tgt, rank, cb_func=None): func = self.context.get_function('query-target') if cb_func is None: - ret = func(self.handle, tgt, rank, ctypes.byref(self.target_info), - None) + ret = func(self.handle, tgt, rank, ctypes.byref(self.target_info), None) if ret != 0: raise DaosApiError("Pool query returned non-zero. RC: {0}".format(ret)) return self.target_info event = daos_cref.DaosEvent() params = [self.handle, tgt, rank, ctypes.byref(self.target_info), event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() return None @@ -260,8 +259,7 @@ def list_attr(self, poh=None, cb_func=None): func = self.context.get_function('list-pool-attr') ret = func(self.handle, sbuf, t_size, None) if ret != 0: - raise DaosApiError("Pool List-attr returned non-zero. RC:{0}" - .format(ret)) + raise DaosApiError("Pool List-attr returned non-zero. RC:{0}".format(ret)) buf = t_size[0] buff = ctypes.create_string_buffer(buf + 1).raw @@ -271,17 +269,13 @@ def list_attr(self, poh=None, cb_func=None): if cb_func is None: ret = func(self.handle, buff, total_size, None) if ret != 0: - raise DaosApiError("Pool List Attribute returned non-zero. " - "RC: {0}".format(ret)) + raise DaosApiError("Pool List Attribute returned non-zero. " "RC: {0}".format(ret)) else: event = daos_cref.DaosEvent() params = [self.handle, buff, total_size, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() return total_size.contents, buff @@ -321,17 +315,13 @@ def set_attr(self, data, poh=None, cb_func=None): if cb_func is None: ret = func(self.handle, no_of_att, names, values, sizes, None) if ret != 0: - raise DaosApiError("Pool Set Attribute returned non-zero" - "RC: {0}".format(ret)) + raise DaosApiError("Pool Set Attribute returned non-zero" "RC: {0}".format(ret)) else: event = daos_cref.DaosEvent() params = [self.handle, no_of_att, names, values, sizes, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def get_attr(self, attr_names, poh=None, cb_func=None): @@ -359,36 +349,43 @@ def get_attr(self, attr_names, poh=None, cb_func=None): no_of_att = ctypes.c_int(attr_count) buffers = ctypes.c_char_p * attr_count - buff = buffers(*[ctypes.c_char_p(ctypes.create_string_buffer(100).raw) - for i in range(attr_count)]) + buff = buffers( + *[ctypes.c_char_p(ctypes.create_string_buffer(100).raw) for i in range(attr_count)] + ) size_of_att_val = [100] * attr_count sizes = (ctypes.c_size_t * attr_count)(*size_of_att_val) func = self.context.get_function('get-pool-attr') if cb_func is None: - ret = func(self.handle, no_of_att, ctypes.byref(attr_names_c), - ctypes.byref(buff), sizes, None) + ret = func( + self.handle, no_of_att, ctypes.byref(attr_names_c), ctypes.byref(buff), sizes, None + ) if ret != 0: - raise DaosApiError("Pool Get Attribute returned non-zero. " - "RC: {0}".format(ret)) + raise DaosApiError("Pool Get Attribute returned non-zero. " "RC: {0}".format(ret)) # Construct the results dictionary from buff and sizes set in the function # call. results = {} index = 0 for attr in attr_names: - results[attr] = buff[index][:sizes[index]] + results[attr] = buff[index][: sizes[index]] index += 1 return results # Asynchronous mode. event = daos_cref.DaosEvent() - params = [self.handle, no_of_att, ctypes.byref(attr_names_c), ctypes.byref(buff), - sizes, event] + params = [ + self.handle, + no_of_att, + ctypes.byref(attr_names_c), + ctypes.byref(buff), + sizes, + event, + ] thread = threading.Thread( - target=daos_cref.async_worker, args=( - func, params, self.context, cb_func, self)) + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() # Return buff and sizes because at this point, the values aren't set. The @@ -430,19 +427,19 @@ class DaosObjClassOld(enum.IntEnum): # pylint: disable=no-member ConvertObjClass = { - DaosObjClassOld.DAOS_OC_TINY_RW: DaosObjClass.OC_S1, - DaosObjClassOld.DAOS_OC_SMALL_RW: DaosObjClass.OC_S4, - DaosObjClassOld.DAOS_OC_LARGE_RW: DaosObjClass.OC_SX, - DaosObjClassOld.DAOS_OC_R2S_RW: DaosObjClass.OC_RP_2G1, - DaosObjClassOld.DAOS_OC_R2_RW: DaosObjClass.OC_RP_2G2, - DaosObjClassOld.DAOS_OC_R2_MAX_RW: DaosObjClass.OC_RP_2GX, - DaosObjClassOld.DAOS_OC_R3S_RW: DaosObjClass.OC_RP_3G1, - DaosObjClassOld.DAOS_OC_R3_RW: DaosObjClass.OC_RP_3G2, - DaosObjClassOld.DAOS_OC_R3_MAX_RW: DaosObjClass.OC_RP_3GX, - DaosObjClassOld.DAOS_OC_R4S_RW: DaosObjClass.OC_RP_4G1, - DaosObjClassOld.DAOS_OC_R4_RW: DaosObjClass.OC_RP_4G2, - DaosObjClassOld.DAOS_OC_R4_MAX_RW: DaosObjClass.OC_RP_4GX, - DaosObjClassOld.DAOS_OC_REPL_MAX_RW: DaosObjClass.OC_RP_XSF + DaosObjClassOld.DAOS_OC_TINY_RW: DaosObjClass.OC_S1, + DaosObjClassOld.DAOS_OC_SMALL_RW: DaosObjClass.OC_S4, + DaosObjClassOld.DAOS_OC_LARGE_RW: DaosObjClass.OC_SX, + DaosObjClassOld.DAOS_OC_R2S_RW: DaosObjClass.OC_RP_2G1, + DaosObjClassOld.DAOS_OC_R2_RW: DaosObjClass.OC_RP_2G2, + DaosObjClassOld.DAOS_OC_R2_MAX_RW: DaosObjClass.OC_RP_2GX, + DaosObjClassOld.DAOS_OC_R3S_RW: DaosObjClass.OC_RP_3G1, + DaosObjClassOld.DAOS_OC_R3_RW: DaosObjClass.OC_RP_3G2, + DaosObjClassOld.DAOS_OC_R3_MAX_RW: DaosObjClass.OC_RP_3GX, + DaosObjClassOld.DAOS_OC_R4S_RW: DaosObjClass.OC_RP_4G1, + DaosObjClassOld.DAOS_OC_R4_RW: DaosObjClass.OC_RP_4G2, + DaosObjClassOld.DAOS_OC_R4_MAX_RW: DaosObjClass.OC_RP_4GX, + DaosObjClassOld.DAOS_OC_REPL_MAX_RW: DaosObjClass.OC_RP_XSF, } # pylint: enable=no-member @@ -477,18 +474,16 @@ def get_object_class(item): return ConvertObjClass[item] except KeyError: # No conversion exists for the old DAOS object class - raise DaosApiError( - "No conversion exists for the {} DAOS object class".format( - item)) + raise DaosApiError("No conversion exists for the {} DAOS object class".format(item)) elif isinstance(item, DaosObjClass): return item else: raise DaosApiError( - "Unknown DAOS object enumeration class for {} ({})".format( - item, type(item))) + "Unknown DAOS object enumeration class for {} ({})".format(item, type(item)) + ) -class DaosObj(): +class DaosObj: """A class representing an object stored in a DAOS container.""" def __init__(self, context, container, c_oid=None): @@ -507,8 +502,10 @@ def __del__(self): func = self.context.get_function('close-obj') ret = func(self.obj_handle, None) if ret != 0: - raise DaosApiError("Object close returned non-zero. RC: {0} " - "handle: {1}".format(ret, self.obj_handle)) + raise DaosApiError( + "Object close returned non-zero. RC: {0} " + "handle: {1}".format(ret, self.obj_handle) + ) self.obj_handle = None def __str__(self): @@ -548,11 +545,9 @@ def create(self, rank=None, objcls=None, seed=None): self.c_oid.hi = func(seed) func = self.context.get_function('generate-oid') - ret = func(self.container.coh, ctypes.byref(self.c_oid), 0, obj_cls_int, - 0, 0) + ret = func(self.container.coh, ctypes.byref(self.c_oid), 0, obj_cls_int, 0, 0) if ret != 0: - raise DaosApiError("Object generate oid returned non-zero. RC: {0} " - .format(ret)) + raise DaosApiError("Object generate oid returned non-zero. RC: {0} ".format(ret)) if rank is not None: self.c_oid.hi |= rank << 24 @@ -563,11 +558,9 @@ def open(self): self.obj_handle = ctypes.c_uint64(0) func = self.context.get_function('open-obj') - ret = func(self.container.coh, self.c_oid, c_mode, - ctypes.byref(self.obj_handle), None) + ret = func(self.container.coh, self.c_oid, c_mode, ctypes.byref(self.obj_handle), None) if ret != 0: - raise DaosApiError("Object open returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object open returned non-zero. RC: {0}".format(ret)) def close(self): """Close this object.""" @@ -575,8 +568,7 @@ def close(self): func = self.context.get_function('close-obj') ret = func(self.obj_handle, None) if ret != 0: - raise DaosApiError("Object close returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object close returned non-zero. RC: {0}".format(ret)) self.obj_handle = None def refresh_attr(self, txn=daos_cref.DAOS_TX_NONE): @@ -588,13 +580,13 @@ def refresh_attr(self, txn=daos_cref.DAOS_TX_NONE): an independent transaction """ if self.c_oid is None: - raise DaosApiError( - "refresh_attr called but object not initialized") + raise DaosApiError("refresh_attr called but object not initialized") if self.obj_handle is None: self.open() - rank_list = ctypes.cast(ctypes.pointer((ctypes.c_uint32 * 5)()), - ctypes.POINTER(ctypes.c_uint32)) + rank_list = ctypes.cast( + ctypes.pointer((ctypes.c_uint32 * 5)()), ctypes.POINTER(ctypes.c_uint32) + ) self.c_tgts = daos_cref.RankList(rank_list, 5) func = self.context.get_function('query-obj') @@ -613,15 +605,15 @@ def get_layout(self): obj_layout_ptr = ctypes.POINTER(daos_cref.DaosObjLayout)() func = self.context.get_function('get-layout') - ret = func( - self.container.coh, self.c_oid, ctypes.byref(obj_layout_ptr)) + ret = func(self.container.coh, self.c_oid, ctypes.byref(obj_layout_ptr)) if ret == 0: shards = obj_layout_ptr[0].ol_shards[0][0].os_replica_nr del self.tgt_rank_list[:] for index in range(shards): self.tgt_rank_list.append( - obj_layout_ptr[0].ol_shards[0][0].os_shard_loc[index].sd_rank) + obj_layout_ptr[0].ol_shards[0][0].os_shard_loc[index].sd_rank + ) else: raise DaosApiError("get_layout returned. RC: {0}".format(ret)) @@ -643,17 +635,13 @@ def punch(self, txn, cb_func=None): if cb_func is None: ret = func(self.obj_handle, c_tx, 0, None) if ret != 0: - raise DaosApiError("punch-dkeys returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("punch-dkeys returned non-zero. RC: {0}".format(ret)) else: event = daos_cref.DaosEvent() params = [self.obj_handle, c_tx, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def punch_dkeys(self, txn, dkeys, cb_func=None): @@ -691,22 +679,15 @@ def punch_dkeys(self, txn, dkeys, cb_func=None): # create synchronously, if its there then run it in a thread func = self.context.get_function('punch-dkeys') if cb_func is None: - ret = func(self.obj_handle, c_tx, 0, c_len_dkeys, - ctypes.byref(c_dkeys), None) + ret = func(self.obj_handle, c_tx, 0, c_len_dkeys, ctypes.byref(c_dkeys), None) if ret != 0: - raise DaosApiError("punch-dkeys returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("punch-dkeys returned non-zero. RC: {0}".format(ret)) else: event = daos_cref.DaosEvent() - params = [ - self.obj_handle, c_tx, c_len_dkeys, ctypes.byref(c_dkeys), - event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [self.obj_handle, c_tx, c_len_dkeys, ctypes.byref(c_dkeys), event] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def punch_akeys(self, txn, dkey, akeys, cb_func=None): @@ -747,32 +728,40 @@ def punch_akeys(self, txn, dkey, akeys, cb_func=None): # create synchronously, if its there then run it in a thread func = self.context.get_function('punch-akeys') if cb_func is None: - ret = func(self.obj_handle, c_tx, 0, ctypes.byref(c_dkey_iov), - c_len_akeys, ctypes.byref(c_akeys), None) + ret = func( + self.obj_handle, + c_tx, + 0, + ctypes.byref(c_dkey_iov), + c_len_akeys, + ctypes.byref(c_akeys), + None, + ) if ret != 0: - raise DaosApiError("punch-akeys returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("punch-akeys returned non-zero. RC: {0}".format(ret)) else: event = daos_cref.DaosEvent() - params = [self.obj_handle, c_tx, ctypes.byref(c_dkey_iov), - c_len_akeys, ctypes.byref(c_akeys), event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [ + self.obj_handle, + c_tx, + ctypes.byref(c_dkey_iov), + c_len_akeys, + ctypes.byref(c_akeys), + event, + ] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() -class IORequest(): +class IORequest: """Python object that centralizes details about an I/O type. Type is either 1 (single) or 2 (array) """ - def __init__(self, context, container, obj, rank=None, iotype=1, - objtype=None): + def __init__(self, context, container, obj, rank=None, iotype=1, objtype=None): """Initialize an IORequest object. Args: @@ -827,8 +816,7 @@ def insert_array(self, dkey, akey, c_data, txn=daos_cref.DAOS_TX_NONE): sgl_iov_list[idx].iov_buf = ctypes.cast(item[0], ctypes.c_void_p) idx += 1 - self.sgl.sg_iovs = ctypes.cast(ctypes.pointer(sgl_iov_list), - ctypes.POINTER(daos_cref.IOV)) + self.sgl.sg_iovs = ctypes.cast(ctypes.pointer(sgl_iov_list), ctypes.POINTER(daos_cref.IOV)) self.sgl.sg_nr = len(c_data) self.sgl.sg_nr_out = len(c_data) @@ -854,14 +842,20 @@ def insert_array(self, dkey, akey, c_data, txn=daos_cref.DAOS_TX_NONE): dkey_iov.iov_buf_len = ctypes.sizeof(dkey) dkey_iov.iov_len = ctypes.sizeof(dkey) - ret = func(self.obj.obj_handle, txn, 0, ctypes.byref(dkey_iov), - 1, ctypes.byref(self.iod), ctypes.byref(self.sgl), None) + ret = func( + self.obj.obj_handle, + txn, + 0, + ctypes.byref(dkey_iov), + 1, + ctypes.byref(self.iod), + ctypes.byref(self.sgl), + None, + ) if ret != 0: - raise DaosApiError("Object update returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object update returned non-zero. RC: {0}".format(ret)) - def fetch_array(self, dkey, akey, rec_count, rec_size, - txn=daos_cref.DAOS_TX_NONE): + def fetch_array(self, dkey, akey, rec_count, rec_size, txn=daos_cref.DAOS_TX_NONE): """Retrieve an array data from a dkey/akey pair. dkey --1st level key for the array value @@ -891,11 +885,10 @@ def fetch_array(self, dkey, akey, rec_count, rec_size, sgl_iov_list = (daos_cref.IOV * rec_count.value)() for index in range(rec_count.value): sgl_iov_list[index].iov_buf_len = rec_size - sgl_iov_list[index].iov_buf = ( - ctypes.cast(ctypes.create_string_buffer(rec_size.value), - ctypes.c_void_p)) - self.sgl.sg_iovs = ctypes.cast(ctypes.pointer(sgl_iov_list), - ctypes.POINTER(daos_cref.IOV)) + sgl_iov_list[index].iov_buf = ctypes.cast( + ctypes.create_string_buffer(rec_size.value), ctypes.c_void_p + ) + self.sgl.sg_iovs = ctypes.cast(ctypes.pointer(sgl_iov_list), ctypes.POINTER(daos_cref.IOV)) self.sgl.sg_nr = rec_count self.sgl.sg_nr_out = rec_count @@ -907,22 +900,28 @@ def fetch_array(self, dkey, akey, rec_count, rec_size, # now do it func = self.context.get_function('fetch-obj') - ret = func(self.obj.obj_handle, txn, 0, ctypes.byref(dkey_iov), 1, - ctypes.byref(self.iod), ctypes.byref(self.sgl), None, None) + ret = func( + self.obj.obj_handle, + txn, + 0, + ctypes.byref(dkey_iov), + 1, + ctypes.byref(self.iod), + ctypes.byref(self.sgl), + None, + None, + ) if ret != 0: - raise DaosApiError("Array fetch returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Array fetch returned non-zero. RC: {0}".format(ret)) # convert the output into a python list rather than return C types # outside this file output = [] for index in range(rec_count.value): - output.append(ctypes.string_at(sgl_iov_list[index].iov_buf, - rec_size.value)) + output.append(ctypes.string_at(sgl_iov_list[index].iov_buf, rec_size.value)) return output - def single_insert(self, dkey, akey, value, size, - txn=daos_cref.DAOS_TX_NONE): + def single_insert(self, dkey, akey, value, size, txn=daos_cref.DAOS_TX_NONE): """Update object with with a single value. dkey --1st level key for the array value @@ -967,14 +966,20 @@ def single_insert(self, dkey, akey, value, size, dkey_ptr = None func = self.context.get_function('update-obj') - ret = func(self.obj.obj_handle, txn, 0, dkey_ptr, 1, - ctypes.byref(self.iod), ctypes.byref(self.sgl), None) + ret = func( + self.obj.obj_handle, + txn, + 0, + dkey_ptr, + 1, + ctypes.byref(self.iod), + ctypes.byref(self.sgl), + None, + ) if ret != 0: - raise DaosApiError("Object update returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object update returned non-zero. RC: {0}".format(ret)) - def single_fetch(self, dkey, akey, size, test_hints=None, - txn=daos_cref.DAOS_TX_NONE): + def single_fetch(self, dkey, akey, size, test_hints=None, txn=daos_cref.DAOS_TX_NONE): """Retrieve a single value from a dkey/akey pair. dkey --1st level key for the single value @@ -1037,11 +1042,9 @@ def single_fetch(self, dkey, akey, size, test_hints=None, # now do it func = self.context.get_function('fetch-obj') - ret = func(self.obj.obj_handle, txn, 0, dkey_ptr, - 1, iod_ptr, sgl_ptr, None, None) + ret = func(self.obj.obj_handle, txn, 0, dkey_ptr, 1, iod_ptr, sgl_ptr, None, None) if ret != 0: - raise DaosApiError("Object fetch returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object fetch returned non-zero. RC: {0}".format(ret)) return buf def multi_akey_insert(self, dkey, data, txn): @@ -1061,7 +1064,6 @@ def multi_akey_insert(self, dkey, data, txn): sgl_list = (daos_cref.SGL * count)() index = 0 for tup in data: - sgl_iov = daos_cref.IOV() sgl_iov.iov_len = ctypes.c_size_t(len(tup[1]) + 1) sgl_iov.iov_buf_len = ctypes.c_size_t(len(tup[1]) + 1) @@ -1093,11 +1095,9 @@ def multi_akey_insert(self, dkey, data, txn): # now do it func = self.context.get_function('update-obj') - ret = func(self.obj.obj_handle, txn, 0, dkey_ptr, c_count, - iod_ptr, sgl_ptr, None) + ret = func(self.obj.obj_handle, txn, 0, dkey_ptr, c_count, iod_ptr, sgl_ptr, None) if ret != 0: - raise DaosApiError("Object update returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Object update returned non-zero. RC: {0}".format(ret)) def multi_akey_fetch(self, dkey, keys, txn): """Retrieve multiple akeys & associated data. @@ -1149,16 +1149,23 @@ def multi_akey_fetch(self, dkey, keys, txn): # now do it func = self.context.get_function('fetch-obj') - ret = func(self.obj.obj_handle, txn, 0, ctypes.byref(dkey_iov), - c_count, ctypes.byref(iods), sgl_ptr, None, None) + ret = func( + self.obj.obj_handle, + txn, + 0, + ctypes.byref(dkey_iov), + c_count, + ctypes.byref(iods), + sgl_ptr, + None, + None, + ) if ret != 0: - raise DaosApiError("multikey fetch returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("multikey fetch returned non-zero. RC: {0}".format(ret)) result = {} index = 0 for sgl in sgl_list: - char_p = ctypes.cast((sgl.sg_iovs).contents.iov_buf, - ctypes.c_char_p) + char_p = ctypes.cast((sgl.sg_iovs).contents.iov_buf, ctypes.c_char_p) result[(keys[index][0]).value] = char_p.value index += 1 @@ -1235,13 +1242,12 @@ def collect_keys(key_count, daos_kds, buf): cur = 0 for index in range(key_count): - keys.append(buf[cur:cur + daos_kds[index].kd_key_len]) + keys.append(buf[cur : cur + daos_kds[index].kd_key_len]) cur += daos_kds[index].kd_key_len return keys - def list_dkey(self, obj_handle=None, key_num=5, key_len=512, - txn=daos_cref.DAOS_TX_NONE): + def list_dkey(self, obj_handle=None, key_num=5, key_len=512, txn=daos_cref.DAOS_TX_NONE): """Return dkeys in the object. Because the underlying method takes buffer of given size and stores the @@ -1286,8 +1292,7 @@ def list_dkey(self, obj_handle=None, key_num=5, key_len=512, nr_val = ctypes.c_uint32(key_num) # Define the array of ctypes DaosKeyDescriptor. - daos_kds = (daos_cref.DaosKeyDescriptor * key_num)( - daos_cref.DaosKeyDescriptor()) + daos_kds = (daos_cref.DaosKeyDescriptor * key_num)(daos_cref.DaosKeyDescriptor()) # Prepare the scatter/gather list to store the dkeys obtained. Use # key_num * key_len for the buffer size. @@ -1303,13 +1308,17 @@ def list_dkey(self, obj_handle=None, key_num=5, key_len=512, while not anchor.da_type == DAOS_ANCHOR_TYPE_EOF: ret = list_dkey_func( - obj_handle, txn, ctypes.byref(nr_val), - ctypes.byref(daos_kds), ctypes.byref(self.sgl), - ctypes.byref(anchor), None) + obj_handle, + txn, + ctypes.byref(nr_val), + ctypes.byref(daos_kds), + ctypes.byref(self.sgl), + ctypes.byref(anchor), + None, + ) if ret != 0: - raise DaosApiError( - "list_dkey returned non-zero. RC: {0}".format(ret)) + raise DaosApiError("list_dkey returned non-zero. RC: {0}".format(ret)) # No dkeys returned. if nr_val.value == 0: @@ -1318,13 +1327,11 @@ def list_dkey(self, obj_handle=None, key_num=5, key_len=512, # The keys are written contiguously in the buffer. Use the key size # set in the kds (key descriptors) object to get the individual # keys. - dkeys.extend( - self.collect_keys(key_count=nr_val.value, daos_kds=daos_kds, buf=buf)) + dkeys.extend(self.collect_keys(key_count=nr_val.value, daos_kds=daos_kds, buf=buf)) return dkeys - def list_akey(self, dkey, obj_handle=None, key_num=5, key_len=512, - txn=daos_cref.DAOS_TX_NONE): + def list_akey(self, dkey, obj_handle=None, key_num=5, key_len=512, txn=daos_cref.DAOS_TX_NONE): """Return akeys from given dkey in the object. See list_dkey doc for details. @@ -1353,8 +1360,7 @@ def list_akey(self, dkey, obj_handle=None, key_num=5, key_len=512, nr_val = ctypes.c_uint32(key_num) - daos_kds = (daos_cref.DaosKeyDescriptor * key_num)( - daos_cref.DaosKeyDescriptor()) + daos_kds = (daos_cref.DaosKeyDescriptor * key_num)(daos_cref.DaosKeyDescriptor()) buf = self.prepare_sgl(key_num=key_num, key_len=key_len) @@ -1368,13 +1374,18 @@ def list_akey(self, dkey, obj_handle=None, key_num=5, key_len=512, while not anchor.da_type == DAOS_ANCHOR_TYPE_EOF: ret = list_akey_func( - obj_handle, txn, dkey_ptr, ctypes.byref(nr_val), - ctypes.byref(daos_kds), ctypes.byref(self.sgl), - ctypes.byref(anchor), None) + obj_handle, + txn, + dkey_ptr, + ctypes.byref(nr_val), + ctypes.byref(daos_kds), + ctypes.byref(self.sgl), + ctypes.byref(anchor), + None, + ) if ret != 0: - raise DaosApiError( - "list_akey returned non-zero. RC: {0}".format(ret)) + raise DaosApiError("list_akey returned non-zero. RC: {0}".format(ret)) # No akeys returned. if nr_val.value == 0: @@ -1387,19 +1398,21 @@ def list_akey(self, dkey, obj_handle=None, key_num=5, key_len=512, class DaosContProperties(ctypes.Structure): # pylint: disable=too-few-public-methods - """ This is a python container properties + """This is a python container properties structure used to set the type(eg: posix), enable checksum. NOTE: This structure can be enhanced in future for setting other container properties (if needed) """ - _fields_ = [("type", ctypes.c_char * 10), - ("enable_chksum", ctypes.c_bool), - ("srv_verify", ctypes.c_bool), - ("chksum_type", ctypes.c_uint64), - ("chunk_size", ctypes.c_uint64), - ("rd_lvl", ctypes.c_uint64)] + _fields_ = [ + ("type", ctypes.c_char * 10), + ("enable_chksum", ctypes.c_bool), + ("srv_verify", ctypes.c_bool), + ("chksum_type", ctypes.c_uint64), + ("chunk_size", ctypes.c_uint64), + ("rd_lvl", ctypes.c_uint64), + ] def __init__(self): # Set some default values for @@ -1418,13 +1431,14 @@ def __init__(self): self.rd_lvl = ctypes.c_uint64(daos_cref.DAOS_PROP_CO_REDUN_DEFAULT) -class DaosInputParams(): +class DaosInputParams: # pylint: disable=too-few-public-methods - """ This is a helper python method + """This is a helper python method which can be used to pack input parameters for create methods (eg: container or pool (future)). """ + def __init__(self): super().__init__() # Get the input params for setting @@ -1433,7 +1447,7 @@ def __init__(self): self.co_prop = DaosContProperties() def get_con_create_params(self): - """ Get the container create params. + """Get the container create params. This method is used to pack input parameters as a structure. Perform a get_con_create_params @@ -1444,7 +1458,7 @@ def get_con_create_params(self): return self.co_prop -class DaosContainer(): +class DaosContainer: # pylint: disable=too-many-public-methods """A python object representing a DAOS container.""" @@ -1503,52 +1517,59 @@ def create(self, poh, con_prop=None, cb_func=None): idx = 0 if self.cont_input_values.type.decode("UTF-8") != "Unknown": self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( - DaosContPropEnum.DAOS_PROP_CO_LAYOUT_TYPE.value) - if self.cont_input_values.type.decode( - "UTF-8") in ("posix", "POSIX"): + DaosContPropEnum.DAOS_PROP_CO_LAYOUT_TYPE.value + ) + if self.cont_input_values.type.decode("UTF-8") in ("posix", "POSIX"): self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - DaosContPropEnum.DAOS_PROP_CO_LAYOUT_POSIX.value) + DaosContPropEnum.DAOS_PROP_CO_LAYOUT_POSIX.value + ) elif self.cont_input_values.type.decode("UTF-8") == "hdf5": self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - DaosContPropEnum.DAOS_PROP_CO_LAYOUT_HDF5.value) + DaosContPropEnum.DAOS_PROP_CO_LAYOUT_HDF5.value + ) else: # TODO: # pylint: disable=W0511 # This should ideally fail. self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - DaosContPropEnum.DAOS_PROP_CO_LAYOUT_UNKNOWN.value) + DaosContPropEnum.DAOS_PROP_CO_LAYOUT_UNKNOWN.value + ) idx = idx + 1 # If checksum flag is enabled. if self.cont_input_values.enable_chksum is True: self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( - DaosContPropEnum.DAOS_PROP_CO_CSUM.value) + DaosContPropEnum.DAOS_PROP_CO_CSUM.value + ) if self.cont_input_values.chksum_type == 100: self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64(1) else: self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - self.cont_input_values.chksum_type) + self.cont_input_values.chksum_type + ) idx = idx + 1 self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( - DaosContPropEnum.DAOS_PROP_CO_CSUM_SERVER_VERIFY.value) + DaosContPropEnum.DAOS_PROP_CO_CSUM_SERVER_VERIFY.value + ) if self.cont_input_values.srv_verify is True: self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64(1) else: self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64(0) idx = idx + 1 self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( - DaosContPropEnum.DAOS_PROP_CO_CSUM_CHUNK_SIZE.value) + DaosContPropEnum.DAOS_PROP_CO_CSUM_CHUNK_SIZE.value + ) if self.cont_input_values.chunk_size == 0: - self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - 16384) + self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64(16384) else: self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - self.cont_input_values.chunk_size) + self.cont_input_values.chunk_size + ) idx = idx + 1 if self.cont_input_values.rd_lvl != daos_cref.DAOS_PROP_CO_REDUN_DEFAULT: self.cont_prop.dpp_entries[idx].dpe_type = ctypes.c_uint32( - DaosContPropEnum.DAOS_PROP_CO_REDUN_LVL.value) - self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64( - self.cont_input_values.rd_lvl) + DaosContPropEnum.DAOS_PROP_CO_REDUN_LVL.value + ) + self.cont_prop.dpp_entries[idx].dpe_val = ctypes.c_uint64(self.cont_input_values.rd_lvl) func = self.context.get_function('create-cont') @@ -1558,26 +1579,20 @@ def create(self, poh, con_prop=None, cb_func=None): if self.cont_prop is None: ret = func(self.poh, ctypes.byref(self.uuid), None, None) else: - ret = func(self.poh, ctypes.byref(self.uuid), ctypes.byref(self.cont_prop), - None) + ret = func(self.poh, ctypes.byref(self.uuid), ctypes.byref(self.cont_prop), None) if ret != 0: self.uuid = (ctypes.c_ubyte * 1)(0) - raise DaosApiError( - "Container create returned non-zero. RC: {0}".format(ret)) + raise DaosApiError("Container create returned non-zero. RC: {0}".format(ret)) self.attached = 1 else: event = daos_cref.DaosEvent() if self.cont_prop is None: params = [self.poh, ctypes.byref(self.uuid), None, event] else: - params = [self.poh, ctypes.byref(self.uuid), ctypes.byref(self.cont_prop), - event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [self.poh, ctypes.byref(self.uuid), ctypes.byref(self.cont_prop), event] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def destroy(self, force=1, poh=None, con_uuid=None, cb_func=None): @@ -1600,18 +1615,14 @@ def destroy(self, force=1, poh=None, con_uuid=None, cb_func=None): if cb_func is None: ret = func(self.poh, bytes(uuid_str, encoding='utf-8'), c_force, None) if ret != 0: - raise DaosApiError("Container destroy returned non-zero. " - "RC: {0}".format(ret)) + raise DaosApiError("Container destroy returned non-zero. " "RC: {0}".format(ret)) self.attached = 0 else: event = daos_cref.DaosEvent() params = [self.poh, bytes(uuid_str, encoding='utf-8'), c_force, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def open(self, poh=None, cuuid=None, flags=None, cb_func=None): @@ -1637,22 +1648,30 @@ def open(self, poh=None, cuuid=None, flags=None, cb_func=None): # the callback function is optional, if not supplied then run the # create synchronously, if its there then run it in a thread if cb_func is None: - ret = func(self.poh, bytes(uuid_str, encoding='utf-8'), c_flags, - ctypes.byref(self.coh), ctypes.byref(self.info), None) + ret = func( + self.poh, + bytes(uuid_str, encoding='utf-8'), + c_flags, + ctypes.byref(self.coh), + ctypes.byref(self.info), + None, + ) if ret != 0: - raise DaosApiError("Container open returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Container open returned non-zero. RC: {0}".format(ret)) self.opened = 1 else: event = daos_cref.DaosEvent() - params = [self.poh, bytes(uuid_str, encoding='utf-8'), c_flags, - ctypes.byref(self.coh), ctypes.byref(self.info), event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + params = [ + self.poh, + bytes(uuid_str, encoding='utf-8'), + c_flags, + ctypes.byref(self.coh), + ctypes.byref(self.info), + event, + ] + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def close(self, coh=None, cb_func=None): @@ -1669,18 +1688,14 @@ def close(self, coh=None, cb_func=None): if cb_func is None: ret = func(self.coh, None) if ret != 0: - raise DaosApiError("Container close returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Container close returned non-zero. RC: {0}".format(ret)) self.opened = 0 else: event = daos_cref.DaosEvent() params = [self.coh, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def query(self, coh=None, cont_prop=None, cb_func=None): @@ -1707,20 +1722,18 @@ def query(self, coh=None, cont_prop=None, cb_func=None): if cb_func is None: if cont_prop: - ret = func( - self.coh, ctypes.byref(self.info), ctypes.byref(cont_prop), None) + ret = func(self.coh, ctypes.byref(self.info), ctypes.byref(cont_prop), None) else: ret = func(self.coh, ctypes.byref(self.info), None, None) if ret != 0: - raise DaosApiError( - "Container query returned non-zero. RC: {0}".format(ret)) + raise DaosApiError("Container query returned non-zero. RC: {0}".format(ret)) return self.info event = daos_cref.DaosEvent() params = [self.coh, ctypes.byref(self.info), None, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, params, self.context, - cb_func, self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() return None @@ -1737,8 +1750,7 @@ def get_new_tx(self): func = self.context.get_function('open-tx') ret = func(self.coh, ctypes.byref(c_tx), 0, None) if ret != 0: - raise DaosApiError("tx open returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("tx open returned non-zero. RC: {0}".format(ret)) return c_tx.value @@ -1751,8 +1763,7 @@ def commit_tx(self, txn): func = self.context.get_function('commit-tx') ret = func(txn, None) if ret != 0: - raise DaosApiError("TX commit returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("TX commit returned non-zero. RC: {0}".format(ret)) def close_tx(self, txn): """Close out a transaction that is done being modified.""" @@ -1765,8 +1776,7 @@ def close_tx(self, txn): func = self.context.get_function('close-tx') ret = func(c_tx, None) if ret != 0: - raise DaosApiError("TX close returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("TX close returned non-zero. RC: {0}".format(ret)) def abort_tx(self, txn): """Abort a transaction that is done being modified.""" @@ -1779,8 +1789,7 @@ def abort_tx(self, txn): func = self.context.get_function('destroy-tx') ret = func(c_tx, None) if ret != 0: - raise DaosApiError("TX abort returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("TX abort returned non-zero. RC: {0}".format(ret)) def restart_tx(self, txn): """Restart a transaction that is being modified.""" @@ -1793,11 +1802,11 @@ def restart_tx(self, txn): func = self.context.get_function('restart-tx') ret = func(c_tx, None) if ret != 0: - raise DaosApiError("TX restart returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("TX restart returned non-zero. RC: {0}".format(ret)) - def write_an_array_value(self, datalist, dkey, akey, obj=None, rank=None, - obj_cls=None, txn=daos_cref.DAOS_TX_NONE): + def write_an_array_value( + self, datalist, dkey, akey, obj=None, rank=None, obj_cls=None, txn=daos_cref.DAOS_TX_NONE + ): """Write an array of data to an object. If an object is not supplied a new one is created. The update occurs @@ -1826,8 +1835,17 @@ def write_an_array_value(self, datalist, dkey, akey, obj=None, rank=None, return ioreq.obj - def write_an_obj(self, thedata, size, dkey, akey, obj=None, rank=None, - obj_cls=None, txn=daos_cref.DAOS_TX_NONE): + def write_an_obj( + self, + thedata, + size, + dkey, + akey, + obj=None, + rank=None, + obj_cls=None, + txn=daos_cref.DAOS_TX_NONE, + ): """Write a single value to an object. If an object isn't supplied a new one is created. The update occurs in @@ -1859,8 +1877,9 @@ def write_an_obj(self, thedata, size, dkey, akey, obj=None, rank=None, return ioreq.obj - def write_multi_akeys(self, dkey, data, obj=None, rank=None, obj_cls=None, - txn=daos_cref.DAOS_TX_NONE): + def write_multi_akeys( + self, dkey, data, obj=None, rank=None, obj_cls=None, txn=daos_cref.DAOS_TX_NONE + ): """Write multiple values to an object, each tagged with a unique akey. If an object isn't supplied a new one is created. The update @@ -1886,8 +1905,7 @@ def write_multi_akeys(self, dkey, data, obj=None, rank=None, obj_cls=None, c_data = [] for tup in data: - newtup = (ctypes.create_string_buffer(tup[0]), - ctypes.create_string_buffer(tup[1])) + newtup = (ctypes.create_string_buffer(tup[0]), ctypes.create_string_buffer(tup[1])) c_data.append(newtup) # obj can be None in which case a new one is created @@ -1897,8 +1915,7 @@ def write_multi_akeys(self, dkey, data, obj=None, rank=None, obj_cls=None, return ioreq.obj - def read_an_array(self, rec_count, rec_size, dkey, akey, obj, - txn=daos_cref.DAOS_TX_NONE): + def read_an_array(self, rec_count, rec_size, dkey, akey, obj, txn=daos_cref.DAOS_TX_NONE): """Read an array value from the specified object. rec_count --number of records (array indices) to read @@ -1915,8 +1932,7 @@ def read_an_array(self, rec_count, rec_size, dkey, akey, obj, c_akey = ctypes.create_string_buffer(akey) ioreq = IORequest(self.context, self, obj) - buf = ioreq.fetch_array(c_dkey, c_akey, c_rec_count, - c_rec_size, txn) + buf = ioreq.fetch_array(c_dkey, c_akey, c_rec_count, c_rec_size, txn) return buf def read_multi_akeys(self, dkey, data, obj, txn=daos_cref.DAOS_TX_NONE): @@ -1939,16 +1955,14 @@ def read_multi_akeys(self, dkey, data, obj, txn=daos_cref.DAOS_TX_NONE): c_data = [] for tup in data: - newtup = (ctypes.create_string_buffer(tup[0]), - ctypes.c_size_t(tup[1])) + newtup = (ctypes.create_string_buffer(tup[0]), ctypes.c_size_t(tup[1])) c_data.append(newtup) ioreq = IORequest(self.context, self, obj) buf = ioreq.multi_akey_fetch(c_dkey, c_data, txn) return buf - def read_an_obj(self, size, dkey, akey, obj, test_hints=None, - txn=daos_cref.DAOS_TX_NONE): + def read_an_obj(self, size, dkey, akey, obj, test_hints=None, txn=daos_cref.DAOS_TX_NONE): """Read a single value from an object in this container.""" # init test_hints if necessary if test_hints is None: @@ -1978,8 +1992,7 @@ def local2global(self): func = self.context.get_function("convert-clocal") ret = func(self.coh, ctypes.byref(c_glob)) if ret != 0: - raise DaosApiError("Cntnr local2global returned non-zero. RC: {0}" - .format(ret)) + raise DaosApiError("Cntnr local2global returned non-zero. RC: {0}".format(ret)) # now call it for real c_buf = ctypes.create_string_buffer(c_glob.iov_buf_len) c_glob.iov_buf = ctypes.cast(c_buf, ctypes.c_void_p) @@ -2003,8 +2016,7 @@ def global2local(self, context, iov_len, buf_len, buf): ret = func(self.poh, c_glob, ctypes.byref(local_handle)) if ret != 0: - raise DaosApiError("Container global2local returned non-zero. " - "RC: {0}".format(ret)) + raise DaosApiError("Container global2local returned non-zero. " "RC: {0}".format(ret)) self.coh = local_handle return local_handle @@ -2035,8 +2047,7 @@ def list_attr(self, coh=None, cb_func=None): t_size = ctypes.pointer(ctypes.c_size_t(100)) ret = func(self.coh, sbuf, t_size, None) if ret != 0: - raise DaosApiError("Container list-cont-attr returned non-zero. " - "RC: {0}".format(ret)) + raise DaosApiError("Container list-cont-attr returned non-zero. " "RC: {0}".format(ret)) buf = t_size[0] buff = ctypes.create_string_buffer(buf + 1).raw @@ -2047,17 +2058,14 @@ def list_attr(self, coh=None, cb_func=None): ret = func(self.coh, buff, total_size, None) if ret != 0: raise DaosApiError( - "Container List Attribute returned non-zero. " - "RC: {0}".format(ret)) + "Container List Attribute returned non-zero. " "RC: {0}".format(ret) + ) else: event = daos_cref.DaosEvent() params = [self.coh, buff, total_size, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() return total_size[0], buff @@ -2098,17 +2106,15 @@ def set_attr(self, data, coh=None, cb_func=None): if cb_func is None: ret = func(self.coh, no_of_att, names, values, sizes, None) if ret != 0: - raise DaosApiError("Container Set Attribute returned non-zero " - "RC: {0}".format(ret)) + raise DaosApiError( + "Container Set Attribute returned non-zero " "RC: {0}".format(ret) + ) else: event = daos_cref.DaosEvent() params = [self.coh, no_of_att, names, values, sizes, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() def get_attr(self, attr_names, coh=None, cb_func=None): @@ -2139,35 +2145,37 @@ def get_attr(self, attr_names, coh=None, cb_func=None): no_of_att = ctypes.c_int(attr_count) buffers = ctypes.c_char_p * attr_count - buff = buffers(*[ctypes.c_char_p(ctypes.create_string_buffer(100).raw) - for i in range(attr_count)]) + buff = buffers( + *[ctypes.c_char_p(ctypes.create_string_buffer(100).raw) for i in range(attr_count)] + ) size_of_att_val = [100] * attr_count sizes = (ctypes.c_size_t * attr_count)(*size_of_att_val) func = self.context.get_function('get-cont-attr') if cb_func is None: - ret = func(self.coh, no_of_att, ctypes.byref(attr_names_c), - ctypes.byref(buff), sizes, None) + ret = func( + self.coh, no_of_att, ctypes.byref(attr_names_c), ctypes.byref(buff), sizes, None + ) if ret != 0: - raise DaosApiError("Container Get Attribute returned non-zero " - "RC: {0}".format(ret)) + raise DaosApiError( + "Container Get Attribute returned non-zero " "RC: {0}".format(ret) + ) # Construct the results dictionary from buff and sizes set in the function # call. results = {} index = 0 for attr in attr_names: - results[attr] = buff[index][:sizes[index]] + results[attr] = buff[index][: sizes[index]] index += 1 return results event = daos_cref.DaosEvent() - params = [self.coh, no_of_att, ctypes.byref(attr_names_c), ctypes.byref(buff), - sizes, event] + params = [self.coh, no_of_att, ctypes.byref(attr_names_c), ctypes.byref(buff), sizes, event] thread = threading.Thread( - target=daos_cref.async_worker, args=( - func, params, self.context, cb_func, self)) + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() # Return buff and sizes because at this point, the values aren't set. The @@ -2194,21 +2202,17 @@ def aggregate(self, coh, epoch, cb_func=None): if cb_func is None: retcode = func(coh, epoch, None) if retcode != 0: - raise DaosApiError("cont aggregate returned non-zero.RC: {0}" - .format(retcode)) + raise DaosApiError("cont aggregate returned non-zero.RC: {0}".format(retcode)) else: event = daos_cref.DaosEvent() params = [coh, epoch, event] - thread = threading.Thread(target=daos_cref.async_worker, - args=(func, - params, - self.context, - cb_func, - self)) + thread = threading.Thread( + target=daos_cref.async_worker, args=(func, params, self.context, cb_func, self) + ) thread.start() -class DaosSnapshot(): +class DaosSnapshot: """A python object that can represent a DAOS snapshot. We do not save the coh in the snapshot since it is different each time the @@ -2222,7 +2226,7 @@ def __init__(self, context, name=None): libdaos we know to always convert it to a ctype. """ self.context = context - self.name = name # currently unused + self.name = name # currently unused self.epoch = 0 def create(self, coh): @@ -2237,8 +2241,7 @@ def create(self, coh): retcode = func(coh, ctypes.byref(epoch), None, None) self.epoch = epoch.value if retcode != 0: - raise DaosApiError("Snapshot create returned non-zero. RC: {0}" - .format(retcode)) + raise DaosApiError("Snapshot create returned non-zero. RC: {0}".format(retcode)) # To be Done: Generalize this function to accept and return the number of # snapshots and the epochs and names lists. See description of @@ -2258,11 +2261,11 @@ def list(self, coh, epoch=None): num = ctypes.c_uint64(1) epoch = ctypes.c_uint64(self.epoch) anchor = daos_cref.Anchor() - retcode = func(coh, ctypes.byref(num), ctypes.byref(epoch), None, - ctypes.byref(anchor), None) + retcode = func( + coh, ctypes.byref(num), ctypes.byref(epoch), None, ctypes.byref(anchor), None + ) if retcode != 0: - raise DaosApiError("Snapshot create returned non-zero. RC: {0}" - .format(retcode)) + raise DaosApiError("Snapshot create returned non-zero. RC: {0}".format(retcode)) return epoch.value def open(self, coh, epoch=None): @@ -2279,8 +2282,7 @@ def open(self, coh, epoch=None): txhndl = ctypes.c_uint64(0) retcode = func(coh, epoch, ctypes.byref(txhndl), None) if retcode != 0: - raise DaosApiError("Snapshot handle returned non-zero. RC: {0}" - .format(retcode)) + raise DaosApiError("Snapshot handle returned non-zero. RC: {0}".format(retcode)) return txhndl def destroy(self, coh, epoch=None, evnt=None): @@ -2306,7 +2308,7 @@ def destroy(self, coh, epoch=None, evnt=None): raise DaosApiError("Failed to destroy the snapshot. RC: {}".format(retcode)) -class DaosContext(): +class DaosContext: # pylint: disable=too-few-public-methods """Provides environment and other info for a DAOS client.""" @@ -2314,70 +2316,68 @@ def __init__(self, path): """Set up the DAOS API and MPI.""" # first find the DAOS version self._dc = None - with open(os.path.join(path, "daos", "API_VERSION"), - "r") as version_file: + with open(os.path.join(path, "daos", "API_VERSION"), "r") as version_file: daos_version = version_file.read().rstrip() self.libdaos = ctypes.CDLL( - os.path.join(path, 'libdaos.so.{}'.format(daos_version)), - mode=ctypes.DEFAULT_MODE) - ctypes.CDLL(os.path.join(path, 'libdaos_common.so'), - mode=ctypes.RTLD_GLOBAL) + os.path.join(path, 'libdaos.so.{}'.format(daos_version)), mode=ctypes.DEFAULT_MODE + ) + ctypes.CDLL(os.path.join(path, 'libdaos_common.so'), mode=ctypes.RTLD_GLOBAL) - self.libtest = ctypes.CDLL(os.path.join(path, 'libdaos_tests.so'), - mode=ctypes.DEFAULT_MODE) + self.libtest = ctypes.CDLL(os.path.join(path, 'libdaos_tests.so'), mode=ctypes.DEFAULT_MODE) # Note: action-subject format self.ftable = { - 'close-cont': self.libdaos.daos_cont_close, - 'close-obj': self.libdaos.daos_obj_close, - 'close-tx': self.libdaos.daos_tx_close, - 'commit-tx': self.libdaos.daos_tx_commit, - 'connect-pool': self.libdaos.daos_pool_connect, + 'close-cont': self.libdaos.daos_cont_close, + 'close-obj': self.libdaos.daos_obj_close, + 'close-tx': self.libdaos.daos_tx_close, + 'commit-tx': self.libdaos.daos_tx_commit, + 'connect-pool': self.libdaos.daos_pool_connect, 'convert-cglobal': self.libdaos.daos_cont_global2local, - 'convert-clocal': self.libdaos.daos_cont_local2global, + 'convert-clocal': self.libdaos.daos_cont_local2global, 'convert-pglobal': self.libdaos.daos_pool_global2local, - 'convert-plocal': self.libdaos.daos_pool_local2global, - 'create-cont': self.libdaos.daos_cont_create, - 'create-eq': self.libdaos.daos_eq_create, - 'create-snap': self.libdaos.daos_cont_create_snap, - 'd_log': self.libtest.dts_log, - 'destroy-cont': self.libdaos.daos_cont_destroy, - 'destroy-eq': self.libdaos.daos_eq_destroy, - 'destroy-snap': self.libdaos.daos_cont_destroy_snap, - 'destroy-tx': self.libdaos.daos_tx_abort, + 'convert-plocal': self.libdaos.daos_pool_local2global, + 'create-cont': self.libdaos.daos_cont_create, + 'create-eq': self.libdaos.daos_eq_create, + 'create-snap': self.libdaos.daos_cont_create_snap, + 'd_log': self.libtest.dts_log, + 'destroy-cont': self.libdaos.daos_cont_destroy, + 'destroy-eq': self.libdaos.daos_eq_destroy, + 'destroy-snap': self.libdaos.daos_cont_destroy_snap, + 'destroy-tx': self.libdaos.daos_tx_abort, 'disconnect-pool': self.libdaos.daos_pool_disconnect, - 'fetch-obj': self.libdaos.daos_obj_fetch, - 'generate-oid': self.libdaos.daos_obj_generate_oid, - 'get-cont-attr': self.libdaos.daos_cont_get_attr, - 'get-pool-attr': self.libdaos.daos_pool_get_attr, - 'get-layout': self.libdaos.daos_obj_layout_get, - 'init-event': self.libdaos.daos_event_init, - 'list-akey': self.libdaos.daos_obj_list_akey, - 'list-attr': self.libdaos.daos_cont_list_attr, - 'list-cont-attr': self.libdaos.daos_cont_list_attr, - 'list-dkey': self.libdaos.daos_obj_list_dkey, - 'list-pool-attr': self.libdaos.daos_pool_list_attr, - 'cont-aggregate': self.libdaos.daos_cont_aggregate, - 'list-snap': self.libdaos.daos_cont_list_snap, - 'open-cont': self.libdaos.daos_cont_open, - 'open-obj': self.libdaos.daos_obj_open, - 'open-snap': self.libdaos.daos_tx_open_snap, - 'open-tx': self.libdaos.daos_tx_open, - 'poll-eq': self.libdaos.daos_eq_poll, - 'punch-akeys': self.libdaos.daos_obj_punch_akeys, - 'punch-dkeys': self.libdaos.daos_obj_punch_dkeys, - 'punch-obj': self.libdaos.daos_obj_punch, - 'query-cont': self.libdaos.daos_cont_query, - 'query-obj': self.libdaos.daos_obj_query, - 'query-pool': self.libdaos.daos_pool_query, - 'query-target': self.libdaos.daos_pool_query_target, - 'restart-tx': self.libdaos.daos_tx_restart, - 'set-cont-attr': self.libdaos.daos_cont_set_attr, - 'set-pool-attr': self.libdaos.daos_pool_set_attr, - 'stop-service': self.libdaos.daos_pool_stop_svc, - 'test-event': self.libdaos.daos_event_test, - 'update-obj': self.libdaos.daos_obj_update, - 'oid_gen': self.libtest.dts_oid_gen} + 'fetch-obj': self.libdaos.daos_obj_fetch, + 'generate-oid': self.libdaos.daos_obj_generate_oid, + 'get-cont-attr': self.libdaos.daos_cont_get_attr, + 'get-pool-attr': self.libdaos.daos_pool_get_attr, + 'get-layout': self.libdaos.daos_obj_layout_get, + 'init-event': self.libdaos.daos_event_init, + 'list-akey': self.libdaos.daos_obj_list_akey, + 'list-attr': self.libdaos.daos_cont_list_attr, + 'list-cont-attr': self.libdaos.daos_cont_list_attr, + 'list-dkey': self.libdaos.daos_obj_list_dkey, + 'list-pool-attr': self.libdaos.daos_pool_list_attr, + 'cont-aggregate': self.libdaos.daos_cont_aggregate, + 'list-snap': self.libdaos.daos_cont_list_snap, + 'open-cont': self.libdaos.daos_cont_open, + 'open-obj': self.libdaos.daos_obj_open, + 'open-snap': self.libdaos.daos_tx_open_snap, + 'open-tx': self.libdaos.daos_tx_open, + 'poll-eq': self.libdaos.daos_eq_poll, + 'punch-akeys': self.libdaos.daos_obj_punch_akeys, + 'punch-dkeys': self.libdaos.daos_obj_punch_dkeys, + 'punch-obj': self.libdaos.daos_obj_punch, + 'query-cont': self.libdaos.daos_cont_query, + 'query-obj': self.libdaos.daos_obj_query, + 'query-pool': self.libdaos.daos_pool_query, + 'query-target': self.libdaos.daos_pool_query_target, + 'restart-tx': self.libdaos.daos_tx_restart, + 'set-cont-attr': self.libdaos.daos_cont_set_attr, + 'set-pool-attr': self.libdaos.daos_pool_set_attr, + 'stop-service': self.libdaos.daos_pool_stop_svc, + 'test-event': self.libdaos.daos_event_test, + 'update-obj': self.libdaos.daos_obj_update, + 'oid_gen': self.libtest.dts_oid_gen, + } def get_function(self, function): """Call a function through the API.""" diff --git a/src/client/pydaos/raw/daos_cref.py b/src/client/pydaos/raw/daos_cref.py index 3d5aeb2f878..cf85325f315 100644 --- a/src/client/pydaos/raw/daos_cref.py +++ b/src/client/pydaos/raw/daos_cref.py @@ -10,121 +10,148 @@ # DAOS api C structures class RankList(ctypes.Structure): - """ For those DAOS calls that take a rank list. + """For those DAOS calls that take a rank list. Represents struct: d_rank_list_t""" - _fields_ = [("rl_ranks", ctypes.POINTER(ctypes.c_uint32)), - ("rl_nr", ctypes.c_uint)] + + _fields_ = [("rl_ranks", ctypes.POINTER(ctypes.c_uint32)), ("rl_nr", ctypes.c_uint)] class DTgtList(ctypes.Structure): - """ Structure to represent rank/target list for target + """Structure to represent rank/target list for target Represents struct: d_tgt_list""" - _fields_ = [("tl_ranks", ctypes.POINTER(ctypes.c_uint32)), - ("tl_tgts", ctypes.POINTER(ctypes.c_int32)), - ("tl_nr", ctypes.c_uint32)] + + _fields_ = [ + ("tl_ranks", ctypes.POINTER(ctypes.c_uint32)), + ("tl_tgts", ctypes.POINTER(ctypes.c_int32)), + ("tl_nr", ctypes.c_uint32), + ] class IOV(ctypes.Structure): """Represents struct: d_iov_t daos_key_t""" - _fields_ = [("iov_buf", ctypes.c_void_p), - ("iov_buf_len", ctypes.c_size_t), - ("iov_len", ctypes.c_size_t)] + + _fields_ = [ + ("iov_buf", ctypes.c_void_p), + ("iov_buf_len", ctypes.c_size_t), + ("iov_len", ctypes.c_size_t), + ] class SGL(ctypes.Structure): """Represents struct: d_sg_list_t""" - _fields_ = [("sg_nr", ctypes.c_uint32), - ("sg_nr_out", ctypes.c_uint32), - ("sg_iovs", ctypes.POINTER(IOV))] + + _fields_ = [ + ("sg_nr", ctypes.c_uint32), + ("sg_nr_out", ctypes.c_uint32), + ("sg_iovs", ctypes.POINTER(IOV)), + ] class EpochRange(ctypes.Structure): """Represents struct: daos_epoch_range_t""" - _fields_ = [("epr_lo", ctypes.c_uint64), - ("epr_hi", ctypes.c_uint64)] + + _fields_ = [("epr_lo", ctypes.c_uint64), ("epr_hi", ctypes.c_uint64)] class RebuildStatus(ctypes.Structure): - """ Structure to represent rebuild status info + """Structure to represent rebuild status info Represents struct: daos_rebuild_status""" - _fields_ = [("rs_version", ctypes.c_uint32), - ("rs_seconds", ctypes.c_uint32), - ("rs_errno", ctypes.c_uint32), - ("rs_state", ctypes.c_uint32), - ("rs_padding32", ctypes.c_uint32), - ("rs_fail_rank", ctypes.c_uint32), - ("rs_toberb_obj_nr", ctypes.c_uint64), - ("rs_obj_nr", ctypes.c_uint64), - ("rs_rec_nr", ctypes.c_uint64), - ("rs_size", ctypes.c_uint64)] + + _fields_ = [ + ("rs_version", ctypes.c_uint32), + ("rs_seconds", ctypes.c_uint32), + ("rs_errno", ctypes.c_uint32), + ("rs_state", ctypes.c_uint32), + ("rs_padding32", ctypes.c_uint32), + ("rs_fail_rank", ctypes.c_uint32), + ("rs_toberb_obj_nr", ctypes.c_uint64), + ("rs_obj_nr", ctypes.c_uint64), + ("rs_rec_nr", ctypes.c_uint64), + ("rs_size", ctypes.c_uint64), + ] class DaosHandle(ctypes.Structure): - """ Structure to represent rebuild status info - Represents struct: : daos_handle_t """ + """Structure to represent rebuild status info + Represents struct: : daos_handle_t""" + _fields_ = [("cookie", ctypes.c_uint64)] class DaosSpace(ctypes.Structure): - """ Structure to represent Pool Target Space usage info + """Structure to represent Pool Target Space usage info Represents struct: daos_space""" - _fields_ = [("s_total", ctypes.c_uint64 * 2), - ("s_free", ctypes.c_uint64 * 2)] + + _fields_ = [("s_total", ctypes.c_uint64 * 2), ("s_free", ctypes.c_uint64 * 2)] class TargetInfo(ctypes.Structure): - """ Represents info about a given target + """Represents info about a given target Represents struct: daos_target_info_t""" - _fields_ = [("ta_type", ctypes.c_uint), - ("ta_state", ctypes.c_uint), - ("ta_perf", ctypes.c_int), - ("ta_space", DaosSpace)] + + _fields_ = [ + ("ta_type", ctypes.c_uint), + ("ta_state", ctypes.c_uint), + ("ta_perf", ctypes.c_int), + ("ta_space", DaosSpace), + ] class PoolSpace(ctypes.Structure): - """ Structure to represent Pool space usage info + """Structure to represent Pool space usage info Represents struct: daos_pool_space""" - _fields_ = [("ps_space", DaosSpace), - ("ps_free_min", ctypes.c_uint64 * 2), - ("ps_free_max", ctypes.c_uint64 * 2), - ("ps_free_mean", ctypes.c_uint64 * 2), - ("ps_ntargets", ctypes.c_uint32), - ("ps_padding", ctypes.c_uint32)] + + _fields_ = [ + ("ps_space", DaosSpace), + ("ps_free_min", ctypes.c_uint64 * 2), + ("ps_free_max", ctypes.c_uint64 * 2), + ("ps_free_mean", ctypes.c_uint64 * 2), + ("ps_ntargets", ctypes.c_uint32), + ("ps_padding", ctypes.c_uint32), + ] class PoolInfo(ctypes.Structure): - """ Structure to represent information about a pool + """Structure to represent information about a pool Represents struct: daos_pool_info_t""" - _fields_ = [("pi_uuid", ctypes.c_ubyte * 16), - ("pi_ntargets", ctypes.c_uint32), - ("pi_nnodes", ctypes.c_uint32), - ("pi_ndisabled", ctypes.c_uint32), - ("pi_map_ver", ctypes.c_uint32), - ("pi_leader", ctypes.c_uint32), - ("pi_bits", ctypes.c_uint64), - ("pi_space", PoolSpace), - ("pi_rebuild_st", RebuildStatus)] + + _fields_ = [ + ("pi_uuid", ctypes.c_ubyte * 16), + ("pi_ntargets", ctypes.c_uint32), + ("pi_nnodes", ctypes.c_uint32), + ("pi_ndisabled", ctypes.c_uint32), + ("pi_map_ver", ctypes.c_uint32), + ("pi_leader", ctypes.c_uint32), + ("pi_bits", ctypes.c_uint64), + ("pi_space", PoolSpace), + ("pi_rebuild_st", RebuildStatus), + ] class DaosPropertyEntry(ctypes.Structure): - """Represents struct: daos_prop_entry """ - _fields_ = [("dpe_type", ctypes.c_uint32), - ("dpe_flags", ctypes.c_uint16), - ("dpe_reserv", ctypes.c_uint16), - ("dpe_val", ctypes.c_uint64)] + """Represents struct: daos_prop_entry""" + + _fields_ = [ + ("dpe_type", ctypes.c_uint32), + ("dpe_flags", ctypes.c_uint16), + ("dpe_reserv", ctypes.c_uint16), + ("dpe_val", ctypes.c_uint64), + ] class DaosProperty(ctypes.Structure): """Represents struct: daos_prop_t""" - _fields_ = [("dpp_nr", ctypes.c_uint32), - ("dpp_reserv", ctypes.c_uint32), - ("dpp_entries", ctypes.POINTER(DaosPropertyEntry))] + + _fields_ = [ + ("dpp_nr", ctypes.c_uint32), + ("dpp_reserv", ctypes.c_uint32), + ("dpp_entries", ctypes.POINTER(DaosPropertyEntry)), + ] def __init__(self, num_structs): super().__init__() total_prop_entries = (DaosPropertyEntry * num_structs)() - self.dpp_entries = ctypes.cast(total_prop_entries, - ctypes.POINTER(DaosPropertyEntry)) + self.dpp_entries = ctypes.cast(total_prop_entries, ctypes.POINTER(DaosPropertyEntry)) self.dpp_nr = num_structs self.dpp_reserv = 0 for num in range(0, num_structs): @@ -135,103 +162,120 @@ def __init__(self, num_structs): class ContInfo(ctypes.Structure): - """ Structure to represent daos_cont_info_t a struct + """Structure to represent daos_cont_info_t a struct Represents struct: daos_cont_info_t""" - _fields_ = [("ci_uuid", ctypes.c_ubyte * 16), - ("ci_lsnapshots", ctypes.c_uint64), - ("ci_nhandles", ctypes.c_uint32), - ("ci_nsnapshots", ctypes.c_uint32), - ("ci_md_otime", ctypes.c_uint64), - ("ci_md_mtime", ctypes.c_uint64)] + + _fields_ = [ + ("ci_uuid", ctypes.c_ubyte * 16), + ("ci_lsnapshots", ctypes.c_uint64), + ("ci_nhandles", ctypes.c_uint32), + ("ci_nsnapshots", ctypes.c_uint32), + ("ci_md_otime", ctypes.c_uint64), + ("ci_md_mtime", ctypes.c_uint64), + ] class DaosEvent(ctypes.Structure): """Represents struct: daos_event_t""" - _fields_ = [("ev_error", ctypes.c_int), - ("ev_private", ctypes.c_ulonglong * 20), - ("ev_debug", ctypes.c_ulonglong)] + + _fields_ = [ + ("ev_error", ctypes.c_int), + ("ev_private", ctypes.c_ulonglong * 20), + ("ev_debug", ctypes.c_ulonglong), + ] class DaosObjClassAttr(ctypes.Structure): """Represents struct: daos_oclass_attr""" - _fields_ = [("ca_schema", ctypes.c_int), - ("ca_resil", ctypes.c_int), - ("ca_resil_degree", ctypes.c_int), - ("ca_grp_nr", ctypes.c_uint), - ("u", ctypes.c_uint * 4), # 3 uint, 2 ushort - ] + + _fields_ = [ + ("ca_schema", ctypes.c_int), + ("ca_resil", ctypes.c_int), + ("ca_resil_degree", ctypes.c_int), + ("ca_grp_nr", ctypes.c_uint), + ("u", ctypes.c_uint * 4), # 3 uint, 2 ushort + ] class DaosObjAttr(ctypes.Structure): """Represents struct: daos_obj_attr""" - _fields_ = [("oa_rank", ctypes.c_int), - ("oa_oa", DaosObjClassAttr)] + + _fields_ = [("oa_rank", ctypes.c_int), ("oa_oa", DaosObjClassAttr)] class DaosObjId(ctypes.Structure): """Represents struct: daos_obj_id_t""" - _fields_ = [("lo", ctypes.c_uint64), - ("hi", ctypes.c_uint64)] + + _fields_ = [("lo", ctypes.c_uint64), ("hi", ctypes.c_uint64)] class DaosShardLoc(ctypes.Structure): - """ Structure to represent shard + """Structure to represent shard Represents struct: daos_shard_loc""" - _fields_ = [("sd_rank", ctypes.c_uint32), - ("sd_tgt_idx", ctypes.c_uint32)] + + _fields_ = [("sd_rank", ctypes.c_uint32), ("sd_tgt_idx", ctypes.c_uint32)] # Note hard-coded number of ranks, might eventually be a problem class DaosObjShard(ctypes.Structure): - """ Structure to represent one shard of an obj layout - Represents struct: daos_obj_shard """ - _fields_ = [("os_replica_nr", ctypes.c_uint32), - ("os_shard_loc", DaosShardLoc * 5)] + """Structure to represent one shard of an obj layout + Represents struct: daos_obj_shard""" + + _fields_ = [("os_replica_nr", ctypes.c_uint32), ("os_shard_loc", DaosShardLoc * 5)] # note the hard-coded number of ranks, might eventually be a problem class DaosObjLayout(ctypes.Structure): - """ Structure to represent obj layout + """Structure to represent obj layout Represents struct: daos_obj_layout""" - _fields_ = [("ol_ver", ctypes.c_uint32), - ("ol_class", ctypes.c_uint32), - ("ol_nr", ctypes.c_uint32), - ("ol_shards", ctypes.POINTER(DaosObjShard * 5))] + + _fields_ = [ + ("ol_ver", ctypes.c_uint32), + ("ol_class", ctypes.c_uint32), + ("ol_nr", ctypes.c_uint32), + ("ol_shards", ctypes.POINTER(DaosObjShard * 5)), + ] class Extent(ctypes.Structure): """Represents struct: daos_recx_t""" - _fields_ = [("rx_idx", ctypes.c_uint64), - ("rx_nr", ctypes.c_uint64)] + + _fields_ = [("rx_idx", ctypes.c_uint64), ("rx_nr", ctypes.c_uint64)] class DaosIODescriptor(ctypes.Structure): """Represents struct: daos_iod_t""" - _fields_ = [("iod_name", IOV), - ("iod_type", ctypes.c_int), # enum - ("iod_size", ctypes.c_uint64), - ("iod_flags", ctypes.c_uint64), - ("iod_nr", ctypes.c_uint32), - ("iod_recxs", ctypes.POINTER(Extent))] + + _fields_ = [ + ("iod_name", IOV), + ("iod_type", ctypes.c_int), # enum + ("iod_size", ctypes.c_uint64), + ("iod_flags", ctypes.c_uint64), + ("iod_nr", ctypes.c_uint32), + ("iod_recxs", ctypes.POINTER(Extent)), + ] class Anchor(ctypes.Structure): - """ Class to represent a C daos_anchor_t struct. """ - _fields_ = [('da_type', ctypes.c_uint16), - ('da_shard', ctypes.c_uint16), - ('da_flags', ctypes.c_uint32), - ('da_sub_anchors', ctypes.c_uint64), - ('da_buff', ctypes.c_uint8 * 104)] + """Class to represent a C daos_anchor_t struct.""" + + _fields_ = [ + ('da_type', ctypes.c_uint16), + ('da_shard', ctypes.c_uint16), + ('da_flags', ctypes.c_uint32), + ('da_sub_anchors', ctypes.c_uint64), + ('da_buff', ctypes.c_uint8 * 104), + ] class DaosKeyDescriptor(ctypes.Structure): """Represents struct: daos_key_desc_t""" - _fields_ = [("kd_key_len", ctypes.c_uint64), - ("kd_val_type", ctypes.c_uint32)] + + _fields_ = [("kd_key_len", ctypes.c_uint64), ("kd_val_type", ctypes.c_uint32)] -class CallbackEvent(): - """ Class to represent a call back event. """ +class CallbackEvent: + """Class to represent a call back event.""" def __init__(self, obj, event): self.obj = obj @@ -239,21 +283,21 @@ def __init__(self, obj, event): def async_worker(func_ref, param_list, context, cb_func=None, obj=None): - """ Wrapper function that calls the daos C code. This can - be used to run the DAOS library functions in a thread - (or to just run them in the current thread too). - - func_ref --which daos_api function to call - param_list --parameters the c function takes - context --the API context object - cb_func --optional if caller wants notification of completion - obj --optional passed to the callback function - - This is done in a way that exercises the - DAOS event code which is cumbersome and done more simply - by other means. Its good for testing but replace this - implementation if this is used as something other than a test - tool. + """Wrapper function that calls the daos C code. This can + be used to run the DAOS library functions in a thread + (or to just run them in the current thread too). + + func_ref --which daos_api function to call + param_list --parameters the c function takes + context --the API context object + cb_func --optional if caller wants notification of completion + obj --optional passed to the callback function + + This is done in a way that exercises the + DAOS event code which is cumbersome and done more simply + by other means. Its good for testing but replace this + implementation if this is used as something other than a test + tool. """ # TO be Done insufficient error handling in this function @@ -293,6 +337,7 @@ def async_worker(func_ref, param_list, context, cb_func=None, obj=None): class Logfac: """Log warning levels.""" + DEBUG = 0 INFO = 1 WARNING = 2 diff --git a/src/client/setup.py b/src/client/setup.py index 668e7871d47..0f4a016dc28 100644 --- a/src/client/setup.py +++ b/src/client/setup.py @@ -29,14 +29,12 @@ def load_conf(): conf = load_conf() -args = {'sources': ['pydaos/pydaos_shim.c'], - 'libraries': ['daos', 'duns']} +args = {'sources': ['pydaos/pydaos_shim.c'], 'libraries': ['daos', 'duns']} if conf: args['include_dirs'] = [os.path.join(conf['PREFIX'], 'include')] if conf.get('CART_PREFIX', None): - args['include_dirs'].extend(os.path.join( - conf['CART_PREFIX'], 'include')) + args['include_dirs'].extend(os.path.join(conf['CART_PREFIX'], 'include')) args['library_dirs'] = [os.path.join(conf['PREFIX'], 'lib64')] args['runtime_library_dirs'] = args['library_dirs'] @@ -50,5 +48,5 @@ def load_conf(): version='0.2', packages=find_packages(), description='DAOS interface', - ext_modules=[module1] + ext_modules=[module1], ) diff --git a/src/common/SConscript b/src/common/SConscript index 151ba5f0ed4..c3cce4a2482 100644 --- a/src/common/SConscript +++ b/src/common/SConscript @@ -1,19 +1,44 @@ """Build common libraries""" -COMMON_FILES = ['debug.c', 'mem.c', 'fail_loc.c', 'lru.c', - 'misc.c', 'pool_map.c', 'sort.c', 'btree.c', 'prop.c', - 'btree_class.c', 'tse.c', 'rsvc.c', 'checksum.c', - 'drpc.c', 'drpc.pb-c.c', 'proc.c', - 'acl_api.c', 'acl_util.c', 'acl_principal.c', 'cont_props.c', - 'dedup.c', 'profile.c', 'compression.c', 'compression_isal.c', - 'compression_qat.c', 'multihash.c', 'multihash_isal.c', - 'cipher.c', 'cipher_isal.c', 'qat.c', 'fault_domain.c', - 'policy.c'] +COMMON_FILES = [ + 'debug.c', + 'mem.c', + 'fail_loc.c', + 'lru.c', + 'misc.c', + 'pool_map.c', + 'sort.c', + 'btree.c', + 'prop.c', + 'btree_class.c', + 'tse.c', + 'rsvc.c', + 'checksum.c', + 'drpc.c', + 'drpc.pb-c.c', + 'proc.c', + 'acl_api.c', + 'acl_util.c', + 'acl_principal.c', + 'cont_props.c', + 'dedup.c', + 'profile.c', + 'compression.c', + 'compression_isal.c', + 'compression_qat.c', + 'multihash.c', + 'multihash_isal.c', + 'cipher.c', + 'cipher_isal.c', + 'qat.c', + 'fault_domain.c', + 'policy.c', +] def build_daos_common(denv, client): - """ Building non-pmem version for client's common lib""" + """Building non-pmem version for client's common lib""" benv = denv.Clone() stack_mmap_files = [] ad_mem_files = [] @@ -23,12 +48,28 @@ def build_daos_common(denv, client): if client: libname = 'daos_common' else: - dav_src = ['dav/alloc_class.c', 'dav/bucket.c', 'dav/container_ravl.c', - 'dav/container_seglists.c', 'dav/critnib.c', 'dav/dav_clogs.c', - 'dav/dav_iface.c', 'dav/heap.c', 'dav/memblock.c', - 'dav/memops.c', 'dav/os_thread_posix.c', 'dav/palloc.c', 'dav/ravl.c', - 'dav/ravl_interval.c', 'dav/recycler.c', 'dav/stats.c', 'dav/tx.c', 'dav/ulog.c', - 'dav/util.c', 'dav/wal_tx.c'] + dav_src = [ + 'dav/alloc_class.c', + 'dav/bucket.c', + 'dav/container_ravl.c', + 'dav/container_seglists.c', + 'dav/critnib.c', + 'dav/dav_clogs.c', + 'dav/dav_iface.c', + 'dav/heap.c', + 'dav/memblock.c', + 'dav/memops.c', + 'dav/os_thread_posix.c', + 'dav/palloc.c', + 'dav/ravl.c', + 'dav/ravl_interval.c', + 'dav/recycler.c', + 'dav/stats.c', + 'dav/tx.c', + 'dav/ulog.c', + 'dav/util.c', + 'dav/wal_tx.c', + ] ad_mem_files = ['ad_mem.c', 'ad_tx.c'] common_libs.extend(['pmemobj', 'abt']) benv.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) @@ -41,8 +82,9 @@ def build_daos_common(denv, client): stack_mmap_files = ['stack_mmap.c'] benv.Append(CCFLAGS=['-DULT_MMAP_STACK']) - common = benv.d_library(libname, COMMON_FILES + dav_src + ad_mem_files + stack_mmap_files, - LIBS=common_libs) + common = benv.d_library( + libname, COMMON_FILES + dav_src + ad_mem_files + stack_mmap_files, LIBS=common_libs + ) benv.Install('$PREFIX/lib64/', common) return common diff --git a/src/common/tests/SConscript b/src/common/tests/SConscript index f2d8807e391..9a41d4a0fc3 100644 --- a/src/common/tests/SConscript +++ b/src/common/tests/SConscript @@ -8,38 +8,49 @@ def scons(): tenv.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) tenv.require('argobots') - tenv.d_test_program('btree', ['btree.c', utest_utils], - LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka']) - tenv.d_test_program('umem_test_bmem', ['umem_test_bmem.c', utest_utils], - LIBS=['daos_common_pmem', 'gurt', 'cmocka']) - tenv.d_test_program('umem_test', ['umem_test.c', utest_utils], - LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka']) - tenv.d_test_program('btree_direct', ['btree_direct.c', utest_utils], - LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka']) - tenv.d_test_program('other', 'other.c', - LIBS=['daos_common_pmem', 'gurt', 'cart']) - tenv.d_test_program('common_test', ['common_test.c', 'checksum_tests.c', - 'compress_tests.c', 'misc_tests.c'], - LIBS=['daos_common', 'daos_tests', 'gurt', - 'cart', 'cmocka']) + tenv.d_test_program( + 'btree', ['btree.c', utest_utils], LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka'] + ) + tenv.d_test_program( + 'umem_test_bmem', + ['umem_test_bmem.c', utest_utils], + LIBS=['daos_common_pmem', 'gurt', 'cmocka'], + ) + tenv.d_test_program( + 'umem_test', + ['umem_test.c', utest_utils], + LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka'], + ) + tenv.d_test_program( + 'btree_direct', + ['btree_direct.c', utest_utils], + LIBS=['daos_common_pmem', 'gurt', 'pmemobj', 'cmocka'], + ) + tenv.d_test_program('other', 'other.c', LIBS=['daos_common_pmem', 'gurt', 'cart']) + tenv.d_test_program( + 'common_test', + ['common_test.c', 'checksum_tests.c', 'compress_tests.c', 'misc_tests.c'], + LIBS=['daos_common', 'daos_tests', 'gurt', 'cart', 'cmocka'], + ) tenv.d_test_program('lru', 'lru.c', LIBS=['daos_common_pmem', 'gurt', 'cart']) - tenv.d_test_program('sched', 'sched.c', - LIBS=['daos_common', 'gurt', 'cart', 'cmocka', 'pthread']) + tenv.d_test_program( + 'sched', 'sched.c', LIBS=['daos_common', 'gurt', 'cart', 'cmocka', 'pthread'] + ) new_env = tenv.Clone() if tenv["STACK_MMAP"] == 1: new_env.Append(CCFLAGS=['-DULT_MMAP_STACK']) - new_env.d_test_program('abt_perf', 'abt_perf.c', - LIBS=['daos_common_pmem', 'gurt', 'abt']) - tenv.d_test_program('acl_real_tests', 'acl_util_real_tests.c', - LIBS=['daos_common', 'gurt', 'cmocka']) - tenv.d_test_program('prop_tests', 'prop_tests.c', - LIBS=['daos_common', 'gurt', 'cmocka']) - tenv.d_test_program('fault_domain_tests', 'fault_domain_tests.c', - LIBS=['daos_common', 'gurt', 'cmocka']) - tenv.d_test_program('policy_tests', 'policy_tests.c', - LIBS=['daos_common', 'gurt', 'cmocka']) - tenv.d_test_program('ad_mem_tests', 'ad_mem_tests.c', - LIBS=['daos_common_pmem', 'gurt', 'cmocka']) + new_env.d_test_program('abt_perf', 'abt_perf.c', LIBS=['daos_common_pmem', 'gurt', 'abt']) + tenv.d_test_program( + 'acl_real_tests', 'acl_util_real_tests.c', LIBS=['daos_common', 'gurt', 'cmocka'] + ) + tenv.d_test_program('prop_tests', 'prop_tests.c', LIBS=['daos_common', 'gurt', 'cmocka']) + tenv.d_test_program( + 'fault_domain_tests', 'fault_domain_tests.c', LIBS=['daos_common', 'gurt', 'cmocka'] + ) + tenv.d_test_program('policy_tests', 'policy_tests.c', LIBS=['daos_common', 'gurt', 'cmocka']) + tenv.d_test_program( + 'ad_mem_tests', 'ad_mem_tests.c', LIBS=['daos_common_pmem', 'gurt', 'cmocka'] + ) tenv.d_test_program('checksum_timing', 'checksum_timing.c', LIBS=['daos_common', 'gurt']) tenv.d_test_program('compress_timing', 'compress_timing.c', LIBS=['daos_common', 'gurt']) @@ -56,29 +67,39 @@ def scons(): acl_api = unit_env.Object('../acl_api.c') Depends('acl_api_tests', common_mock_ld_script) - unit_env.d_test_program('acl_api_tests', - source=['acl_api_tests.c', acl_api, mock_test_utils], - LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'acl_api_tests', + source=['acl_api_tests.c', acl_api, mock_test_utils], + LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka'], + ) Depends('acl_valid_tests', common_mock_ld_script) - unit_env.d_test_program('acl_valid_tests', - source=['acl_valid_tests.c', acl_api, mock_test_utils], - LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'acl_valid_tests', + source=['acl_valid_tests.c', acl_api, mock_test_utils], + LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka'], + ) Depends('acl_util_tests', common_mock_ld_script) - unit_env.d_test_program('acl_util_tests', - source=['acl_util_tests.c', '../acl_util.c', mock_test_utils], - LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'acl_util_tests', + source=['acl_util_tests.c', '../acl_util.c', mock_test_utils], + LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka'], + ) Depends('acl_principal_tests', common_mock_ld_script) - unit_env.d_test_program('acl_principal_tests', - source=['acl_principal_tests.c', '../acl_principal.c', mock_test_utils], - LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'acl_principal_tests', + source=['acl_principal_tests.c', '../acl_principal.c', mock_test_utils], + LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka'], + ) Depends('drpc_tests', common_mock_ld_script) - unit_env.d_test_program('drpc_tests', - ['drpc_tests.c', '../drpc.c', '../drpc.pb-c.c', mock_test_utils], - LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'drpc_tests', + ['drpc_tests.c', '../drpc.c', '../drpc.pb-c.c', mock_test_utils], + LIBS=['protobuf-c', 'daos_common', 'gurt', 'cmocka'], + ) if __name__ == "SCons.Script": diff --git a/src/container/SConscript b/src/container/SConscript index 64fd9c26a59..f1ba7e0813f 100644 --- a/src/container/SConscript +++ b/src/container/SConscript @@ -22,12 +22,23 @@ def scons(): senv = denv.Clone() senv.require('argobots', 'pmdk', headers_only=True) senv.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) - ds_cont = senv.d_library('cont', - ['srv.c', 'srv_container.c', 'srv_epoch.c', - 'srv_target.c', 'srv_layout.c', 'oid_iv.c', - 'container_iv.c', 'srv_cli.c', 'srv_oi_table.c', - 'srv_metrics.c', common], - install_off="../..") + ds_cont = senv.d_library( + 'cont', + [ + 'srv.c', + 'srv_container.c', + 'srv_epoch.c', + 'srv_target.c', + 'srv_layout.c', + 'oid_iv.c', + 'container_iv.c', + 'srv_cli.c', + 'srv_oi_table.c', + 'srv_metrics.c', + common, + ], + install_off="../..", + ) senv.Install('$PREFIX/lib64/daos_srv', ds_cont) diff --git a/src/control/SConscript b/src/control/SConscript index 933b090b442..19760040c82 100644 --- a/src/control/SConscript +++ b/src/control/SConscript @@ -38,7 +38,7 @@ def get_build_flags(benv): def gen_build_id(): """generate a unique build id per binary for use by RPM - https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" + https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" buildid = b2a_hex(urandom(20)) return '0x' + buildid.decode() @@ -48,9 +48,13 @@ def go_ldflags(): Import('daos_version', 'conf_dir') path = 'github.com/daos-stack/daos/src/control/build' - return ' '.join([f'-X {path}.DaosVersion={daos_version}', - f'-X {path}.ConfigDir={conf_dir}', - f'-B $({gen_build_id()}$)']) + return ' '.join( + [ + f'-X {path}.DaosVersion={daos_version}', + f'-X {path}.ConfigDir={conf_dir}', + f'-B $({gen_build_id()}$)', + ] + ) def install_go_bin(env, name, libs=None, install_man=False): @@ -70,12 +74,16 @@ def install_go_bin(env, name, libs=None, install_man=False): libs = [] libs.extend(['daos_common', 'cart', 'gurt']) - target = env.d_run_command(name, sources, libs, - f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' - + f'-ldflags "{go_ldflags()}" ' - + f'{get_build_flags(env)} ' - + f'{get_build_tags(env)} ' - + f'-o {build_bin} {install_src}') + target = env.d_run_command( + name, + sources, + libs, + f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' + + f'-ldflags "{go_ldflags()}" ' + + f'{get_build_flags(env)} ' + + f'{get_build_tags(env)} ' + + f'-o {build_bin} {install_src}', + ) env.Install('$PREFIX/bin', target) if install_man: gen_bin = join('$BUILD_DIR/src/control', name) @@ -98,6 +106,7 @@ def scons(): prefix = denv.subst("$PREFIX") sprefix = denv.subst("$SPDK_PREFIX") if sprefix not in ["", prefix]: + def install_dir(srcdir): """walk a directory and install targets""" for root, _dirs, files in os.walk(srcdir): @@ -121,14 +130,17 @@ def scons(): install_go_bin(denv, "hello_drpc") dbenv = denv.Clone() - dblibs = dbenv.subst("-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$BUILD_DIR/src/client/dfs " - "-L$BUILD_DIR/src/utils $_RPATH") + dblibs = dbenv.subst( + "-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$BUILD_DIR/src/client/dfs " + "-L$BUILD_DIR/src/utils $_RPATH" + ) dbenv.AppendENVPath("CGO_LDFLAGS", dblibs, sep=" ") - install_go_bin(dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], - install_man=True) + install_go_bin( + dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], install_man=True + ) if not prereqs.server_requested(): return @@ -151,20 +163,25 @@ def scons(): aenv.AppendUnique(LINKFLAGS=["-Wl,--no-as-needed"]) aenv.Replace(RPATH=[]) - cgolibdirs = aenv.subst("-L$BUILD_DIR/src/control/lib/spdk " - "-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$SPDK_PREFIX/lib " - "-L$OFI_PREFIX/lib $_RPATH") + cgolibdirs = aenv.subst( + "-L$BUILD_DIR/src/control/lib/spdk " + "-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$SPDK_PREFIX/lib " + "-L$OFI_PREFIX/lib $_RPATH" + ) # Explicitly link RTE & SPDK libs for CGO access - ldopts = cgolibdirs + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" + \ - " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" + ldopts = ( + cgolibdirs + + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" + + " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" + ) aenv.AppendENVPath("CGO_LDFLAGS", ldopts, sep=" ") - aenv.AppendENVPath("CGO_CFLAGS", - senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), - sep=" ") + aenv.AppendENVPath( + "CGO_CFLAGS", senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), sep=" " + ) # Sets CGO_LDFLAGS for rpath aenv.d_add_rpaths(None, True, True) diff --git a/src/control/lib/spdk/ctests/SConscript b/src/control/lib/spdk/ctests/SConscript index a8bd196fed2..16a9bf21cb3 100644 --- a/src/control/lib/spdk/ctests/SConscript +++ b/src/control/lib/spdk/ctests/SConscript @@ -30,9 +30,9 @@ def scons(): look_path = os.path.join(nvme_lib_src, 'nvme_internal.h') if os.path.isfile(look_path): - testbin = unit_env.d_test_program('nvme_control_ctests', - ['nvme_control_ut.c', nc_obj, ncc_obj, control_tgts], - LIBS=libs) + testbin = unit_env.d_test_program( + 'nvme_control_ctests', ['nvme_control_ut.c', nc_obj, ncc_obj, control_tgts], LIBS=libs + ) unit_env.Install("$PREFIX/bin", testbin) else: print(f'{look_path} missing, skipping nvme_control_ut build') diff --git a/src/dtx/SConscript b/src/dtx/SConscript index 5a6849671de..fc4eebbeae1 100644 --- a/src/dtx/SConscript +++ b/src/dtx/SConscript @@ -17,9 +17,11 @@ def scons(): # dtx denv.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) - dtx = denv.d_library('dtx', - ['dtx_srv.c', 'dtx_rpc.c', 'dtx_resync.c', 'dtx_common.c', 'dtx_cos.c'], - install_off="../..") + dtx = denv.d_library( + 'dtx', + ['dtx_srv.c', 'dtx_rpc.c', 'dtx_resync.c', 'dtx_common.c', 'dtx_cos.c'], + install_off="../..", + ) denv.Install('$PREFIX/lib64/daos_srv', dtx) diff --git a/src/engine/SConscript b/src/engine/SConscript index ceb00a409d0..108c119b83e 100644 --- a/src/engine/SConscript +++ b/src/engine/SConscript @@ -25,13 +25,26 @@ def scons(): # Add runtime paths for daos libraries denv.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) - sources = ['drpc_client.c', 'drpc_ras.c', - 'drpc_handler.c', 'drpc_listener.c', - 'drpc_progress.c', 'init.c', 'module.c', - 'srv_cli.c', 'profile.c', 'rpc.c', - 'server_iv.c', 'srv.c', 'srv.pb-c.c', 'tls.c', - 'sched.c', 'ult.c', 'event.pb-c.c', - 'srv_metrics.c'] + libdaos_tgts + sources = [ + 'drpc_client.c', + 'drpc_ras.c', + 'drpc_handler.c', + 'drpc_listener.c', + 'drpc_progress.c', + 'init.c', + 'module.c', + 'srv_cli.c', + 'profile.c', + 'rpc.c', + 'server_iv.c', + 'srv.c', + 'srv.pb-c.c', + 'tls.c', + 'sched.c', + 'ult.c', + 'event.pb-c.c', + 'srv_metrics.c', + ] + libdaos_tgts if denv["STACK_MMAP"] == 1: denv.Append(CCFLAGS=['-DULT_MMAP_STACK']) diff --git a/src/engine/tests/SConscript b/src/engine/tests/SConscript index 4c9fa8a7dea..964c8eb30df 100644 --- a/src/engine/tests/SConscript +++ b/src/engine/tests/SConscript @@ -12,26 +12,39 @@ def scons(): unit_env.AppendUnique(LINKFLAGS=[f'-Wl,@{common_mock_ld_script}']) Depends('drpc_progress_tests', common_mock_ld_script) - unit_env.d_test_program('drpc_progress_tests', - ['drpc_progress_tests.c', drpc_test_utils, '../drpc_progress.c'], - LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'drpc_progress_tests', + ['drpc_progress_tests.c', drpc_test_utils, '../drpc_progress.c'], + LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka'], + ) Depends('drpc_handler_tests', common_mock_ld_script) - unit_env.d_test_program('drpc_handler_tests', - ['drpc_handler_tests.c', drpc_test_utils, '../drpc_handler.c'], - LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'drpc_handler_tests', + ['drpc_handler_tests.c', drpc_test_utils, '../drpc_handler.c'], + LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka'], + ) Depends('drpc_listener_tests', common_mock_ld_script) - unit_env.d_test_program('drpc_listener_tests', - ['drpc_listener_tests.c', drpc_test_utils, '../drpc_listener.c'], - LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka']) + unit_env.d_test_program( + 'drpc_listener_tests', + ['drpc_listener_tests.c', drpc_test_utils, '../drpc_listener.c'], + LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka'], + ) Depends('drpc_client_tests', common_mock_ld_script) - unit_env.d_test_program('drpc_client_tests', - ['drpc_client_tests.c', drpc_test_utils, '../drpc_client.c', - '../drpc_ras.c', '../srv.pb-c.c', '../event.pb-c.c'], - LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka', - 'uuid', 'pthread', 'abt', 'cart']) + unit_env.d_test_program( + 'drpc_client_tests', + [ + 'drpc_client_tests.c', + drpc_test_utils, + '../drpc_client.c', + '../drpc_ras.c', + '../srv.pb-c.c', + '../event.pb-c.c', + ], + LIBS=['daos_common', 'protobuf-c', 'gurt', 'cmocka', 'uuid', 'pthread', 'abt', 'cart'], + ) if __name__ == "SCons.Script": diff --git a/src/gurt/SConscript b/src/gurt/SConscript index 389ec182361..929df3513a9 100644 --- a/src/gurt/SConscript +++ b/src/gurt/SConscript @@ -4,8 +4,20 @@ # """Build libgurt""" -SRC = ['debug.c', 'dlog.c', 'hash.c', 'misc.c', 'heap.c', 'errno.c', - 'fault_inject.c', 'slab.c', 'telemetry.c', 'hlc.c', 'hlct.c', 'signals.c'] +SRC = [ + 'debug.c', + 'dlog.c', + 'hash.c', + 'misc.c', + 'heap.c', + 'errno.c', + 'fault_inject.c', + 'slab.c', + 'telemetry.c', + 'hlc.c', + 'hlct.c', + 'signals.c', +] def scons(): diff --git a/src/gurt/tests/SConscript b/src/gurt/tests/SConscript index 6ba63fe61df..03a1b245407 100644 --- a/src/gurt/tests/SConscript +++ b/src/gurt/tests/SConscript @@ -26,10 +26,12 @@ def scons(): flags = [] testobj = test_env.Object(test) testname = os.path.splitext(test)[0] - testprog = test_env.d_test_program(target=testname, - source=testobj + gurt_targets, - LIBS=test_env["LIBS"] + ['yaml'], - LINKFLAGS=flags) + testprog = test_env.d_test_program( + target=testname, + source=testobj + gurt_targets, + LIBS=test_env["LIBS"] + ['yaml'], + LINKFLAGS=flags, + ) tests.append(testprog) Default(tests) diff --git a/src/mgmt/SConscript b/src/mgmt/SConscript index 590f332ca54..534cc3af357 100644 --- a/src/mgmt/SConscript +++ b/src/mgmt/SConscript @@ -13,8 +13,9 @@ def scons(): denv.require('protobufc') - pb_objs = denv.SharedObject(['acl.pb-c.c', 'pool.pb-c.c', 'svc.pb-c.c', - 'smd.pb-c.c', 'cont.pb-c.c', 'server.pb-c.c']) + pb_objs = denv.SharedObject( + ['acl.pb-c.c', 'pool.pb-c.c', 'svc.pb-c.c', 'smd.pb-c.c', 'cont.pb-c.c', 'server.pb-c.c'] + ) common = denv.SharedObject(['rpc.c']) + pb_objs # Management client library @@ -27,13 +28,22 @@ def scons(): senv.require('argobots', 'pmdk', headers_only=True) # Management server module senv.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) - mgmt_srv = senv.d_library('mgmt', - [common, 'srv.c', 'srv_layout.c', - 'srv_pool.c', 'srv_system.c', - 'srv_target.c', 'srv_query.c', - 'srv_drpc.c', 'srv_util.c', - 'srv_container.c'], - install_off='../..') + mgmt_srv = senv.d_library( + 'mgmt', + [ + common, + 'srv.c', + 'srv_layout.c', + 'srv_pool.c', + 'srv_system.c', + 'srv_target.c', + 'srv_query.c', + 'srv_drpc.c', + 'srv_util.c', + 'srv_container.c', + ], + install_off='../..', + ) senv.Install('$PREFIX/lib64/daos_srv', mgmt_srv) denv = senv diff --git a/src/mgmt/tests/SConscript b/src/mgmt/tests/SConscript index 4641d19cdaf..0dbcb6f7a65 100644 --- a/src/mgmt/tests/SConscript +++ b/src/mgmt/tests/SConscript @@ -9,9 +9,11 @@ def scons(): denv.Append(CPPDEFINES={'DRPC_TEST': '1'}) # Isolated unit tests - denv.d_test_program('srv_drpc_tests', - source=[pb_objs, mocks, 'srv_drpc_tests.c', '../srv_drpc.c'], - LIBS=['cmocka', 'protobuf-c', 'daos_common_pmem', 'gurt', 'uuid']) + denv.d_test_program( + 'srv_drpc_tests', + source=[pb_objs, mocks, 'srv_drpc_tests.c', '../srv_drpc.c'], + LIBS=['cmocka', 'protobuf-c', 'daos_common_pmem', 'gurt', 'uuid'], + ) if __name__ == "SCons.Script": diff --git a/src/object/SConscript b/src/object/SConscript index e0df53cc3a5..da4ac07e68a 100644 --- a/src/object/SConscript +++ b/src/object/SConscript @@ -12,14 +12,24 @@ def scons(): denv.require('protobufc') # Common object code - common_tgts = denv.SharedObject(['obj_class.c', 'obj_rpc.c', 'obj_task.c', - 'obj_utils.c', 'rpc_csum.c', 'obj_tx.c', - 'obj_enum.c', 'obj_class_def.c', "obj_layout.c"]) + common_tgts = denv.SharedObject( + [ + 'obj_class.c', + 'obj_rpc.c', + 'obj_task.c', + 'obj_utils.c', + 'rpc_csum.c', + 'obj_tx.c', + 'obj_enum.c', + 'obj_class_def.c', + "obj_layout.c", + ] + ) # Object client library - dc_obj_tgts = denv.SharedObject(['cli_obj.c', 'cli_shard.c', - 'cli_mod.c', 'cli_ec.c', 'cli_csum.c', - 'obj_verify.c']) + dc_obj_tgts = denv.SharedObject( + ['cli_obj.c', 'cli_shard.c', 'cli_mod.c', 'cli_ec.c', 'cli_csum.c', 'obj_verify.c'] + ) libdaos_tgts.extend(dc_obj_tgts + common_tgts) if not prereqs.server_requested(): @@ -31,13 +41,23 @@ def scons(): senv.require('pmdk', headers_only=True) senv.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) - srv = senv.d_library('obj', - common_tgts + ['srv_obj.c', 'srv_mod.c', - 'srv_obj_remote.c', 'srv_ec.c', - 'srv_obj_migrate.c', 'srv_enum.c', - 'srv_cli.c', 'srv_ec_aggregate.c', - 'srv_csum.c', 'srv_io_map.c'], - install_off="../..") + srv = senv.d_library( + 'obj', + common_tgts + + [ + 'srv_obj.c', + 'srv_mod.c', + 'srv_obj_remote.c', + 'srv_ec.c', + 'srv_obj_migrate.c', + 'srv_enum.c', + 'srv_cli.c', + 'srv_ec_aggregate.c', + 'srv_csum.c', + 'srv_io_map.c', + ], + install_off="../..", + ) senv.Install('$PREFIX/lib64/daos_srv', srv) if prereqs.test_requested(): diff --git a/src/object/tests/SConscript b/src/object/tests/SConscript index 74c0f41da23..5a14e96d237 100644 --- a/src/object/tests/SConscript +++ b/src/object/tests/SConscript @@ -12,14 +12,19 @@ def scons(): unit_env.Append(RPATH_FULL=['$PREFIX/lib64/daos_srv']) unit_env.Append(CPPDEFINES=['-DDAOS_PMEM_BUILD']) - unit_env.d_test_program(['srv_checksum_tests.c', '../srv_csum.c'], - LIBS=['daos_common_pmem', 'gurt', 'cmocka', - 'vos', 'bio', 'abt']) - - unit_env.d_test_program(['cli_checksum_tests.c', - '../cli_csum.c', - '../../common/tests_lib.c'], - LIBS=['daos_common', 'cmocka', 'gurt', ]) + unit_env.d_test_program( + ['srv_checksum_tests.c', '../srv_csum.c'], + LIBS=['daos_common_pmem', 'gurt', 'cmocka', 'vos', 'bio', 'abt'], + ) + + unit_env.d_test_program( + ['cli_checksum_tests.c', '../cli_csum.c', '../../common/tests_lib.c'], + LIBS=[ + 'daos_common', + 'cmocka', + 'gurt', + ], + ) if __name__ == "SCons.Script": diff --git a/src/pipeline/SConscript b/src/pipeline/SConscript index adafa86dd39..3d7b3619a27 100644 --- a/src/pipeline/SConscript +++ b/src/pipeline/SConscript @@ -14,8 +14,7 @@ def scons(): common_tgts = denv.SharedObject(['pipeline_rpc.c', 'common_pipeline.c']) # Pipeline client library - dc_pipeline_tgts = denv.SharedObject(['cli_pipeline.c', 'pipeline_task.c', - 'cli_mod.c']) + dc_pipeline_tgts = denv.SharedObject(['cli_pipeline.c', 'pipeline_task.c', 'cli_mod.c']) libdaos_tgts.extend(dc_pipeline_tgts + common_tgts) if not prereqs.server_requested(): @@ -24,11 +23,19 @@ def scons(): # generate server module senv = denv.Clone() senv.require('argobots') - srv = senv.d_library('pipeline', - common_tgts + ['srv_pipeline.c', 'srv_mod.c', - 'filter.c', 'filter_funcs.c', - 'aggr_funcs.c', 'getdata_funcs.c'], - install_off="../..") + srv = senv.d_library( + 'pipeline', + common_tgts + + [ + 'srv_pipeline.c', + 'srv_mod.c', + 'filter.c', + 'filter_funcs.c', + 'aggr_funcs.c', + 'getdata_funcs.c', + ], + install_off="../..", + ) senv.Install('$PREFIX/lib64/daos_srv', srv) diff --git a/src/placement/SConscript b/src/placement/SConscript index 6d33b74f56d..a8ed01aa12a 100644 --- a/src/placement/SConscript +++ b/src/placement/SConscript @@ -14,8 +14,9 @@ def scons(): libraries = ['isal'] # Common placement code - common_tgts = denv.SharedObject(['pl_map.c', 'ring_map.c', 'jump_map.c', - 'jump_map_versions.c', 'pl_map_common.c']) + common_tgts = denv.SharedObject( + ['pl_map.c', 'ring_map.c', 'jump_map.c', 'jump_map_versions.c', 'pl_map_common.c'] + ) # placement client library libdaos_tgts.extend(common_tgts) diff --git a/src/placement/tests/SConscript b/src/placement/tests/SConscript index b60893f268f..e881317546b 100644 --- a/src/placement/tests/SConscript +++ b/src/placement/tests/SConscript @@ -16,22 +16,26 @@ def scons(): denv.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) - ring_test_tgt = denv.SharedObject(['ring_map_place_obj.c', - 'place_obj_common.c']) - jump_test_tgt = denv.SharedObject(['jump_map_place_obj.c', - 'place_obj_common.c', - 'jump_map_pda.c', - 'jump_map_pda_layout.c', - 'jump_map_dist.c', - 'placement_test.c']) + ring_test_tgt = denv.SharedObject(['ring_map_place_obj.c', 'place_obj_common.c']) + jump_test_tgt = denv.SharedObject( + [ + 'jump_map_place_obj.c', + 'place_obj_common.c', + 'jump_map_pda.c', + 'jump_map_pda_layout.c', + 'jump_map_dist.c', + 'placement_test.c', + ] + ) pl_bench_tgt = denv.SharedObject(['pl_bench.c', 'place_obj_common.c']) libraries = ['daos', 'daos_common', 'gurt', 'uuid', 'cmocka', 'isal', 'm'] ring_pl_test = denv.d_program('ring_pl_map', ring_test_tgt, LIBS=libraries) - jump_pl_test = denv.d_program('jump_pl_map', - jump_test_tgt + ['../../pool/srv_pool_map.c'], LIBS=libraries) + jump_pl_test = denv.d_program( + 'jump_pl_map', jump_test_tgt + ['../../pool/srv_pool_map.c'], LIBS=libraries + ) pl_bench = denv.d_program('pl_bench', pl_bench_tgt, LIBS=libraries) diff --git a/src/pool/SConscript b/src/pool/SConscript index 035142df377..e8dfae1bb1d 100644 --- a/src/pool/SConscript +++ b/src/pool/SConscript @@ -22,13 +22,24 @@ def scons(): senv = denv.Clone() senv.require('argobots') # ds_pool: Pool Server - ds_pool = senv.d_library('pool', - ['srv.c', 'srv_pool.c', 'srv_layout.c', - 'srv_target.c', 'srv_util.c', 'srv_iv.c', - 'srv_cli.c', - 'srv_pool_scrub_ult.c', 'srv_pool_map.c', - 'srv_metrics.c', 'srv_pool_chkpt.c', common], - install_off="../..") + ds_pool = senv.d_library( + 'pool', + [ + 'srv.c', + 'srv_pool.c', + 'srv_layout.c', + 'srv_target.c', + 'srv_util.c', + 'srv_iv.c', + 'srv_cli.c', + 'srv_pool_scrub_ult.c', + 'srv_pool_map.c', + 'srv_metrics.c', + 'srv_pool_chkpt.c', + common, + ], + install_off="../..", + ) senv.Install('$PREFIX/lib64/daos_srv', ds_pool) diff --git a/src/rdb/SConscript b/src/rdb/SConscript index 6af545650ce..6829d421c80 100644 --- a/src/rdb/SConscript +++ b/src/rdb/SConscript @@ -22,11 +22,22 @@ def scons(): denv.AppendUnique(LIBPATH=[Dir('raft/src')]) # rdb - rdb = denv.d_library('rdb', - ['rdb_util.c', 'rdb_path.c', 'rdb_layout.c', - 'rdb_kvs.c', 'rdb_rpc.c', 'rdb_raft.c', - 'rdb_tx.c', 'rdb.c', 'rdb_module.c'], - install_off="../..", LIBS=['raft']) + rdb = denv.d_library( + 'rdb', + [ + 'rdb_util.c', + 'rdb_path.c', + 'rdb_layout.c', + 'rdb_kvs.c', + 'rdb_rpc.c', + 'rdb_raft.c', + 'rdb_tx.c', + 'rdb.c', + 'rdb_module.c', + ], + install_off="../..", + LIBS=['raft'], + ) denv.Install('$PREFIX/lib64/daos_srv', rdb) # tests diff --git a/src/rdb/tests/SConscript b/src/rdb/tests/SConscript index cd46e41355f..6b42e9a386d 100644 --- a/src/rdb/tests/SConscript +++ b/src/rdb/tests/SConscript @@ -13,9 +13,11 @@ def scons(): tenv.Install('$PREFIX/lib64/daos_srv', librdbt) # rdbt client - rdbt = tenv.d_program('rdbt', ['rdbt.c', 'rpc.c'] + libdaos_tgts, - LIBS=['daos_common_pmem', 'cart', 'gurt', 'uuid', 'isal', 'protobuf-c', - 'pthread']) + rdbt = tenv.d_program( + 'rdbt', + ['rdbt.c', 'rpc.c'] + libdaos_tgts, + LIBS=['daos_common_pmem', 'cart', 'gurt', 'uuid', 'isal', 'protobuf-c', 'pthread'], + ) tenv.Install('$PREFIX/bin', rdbt) diff --git a/src/rebuild/SConscript b/src/rebuild/SConscript index 14c5f6f7bc8..3a05bee6480 100644 --- a/src/rebuild/SConscript +++ b/src/rebuild/SConscript @@ -16,9 +16,9 @@ def scons(): denv.Append(CCFLAGS=['-Wframe-larger-than=131072']) # rebuild - rebuild = denv.d_library('rebuild', - ['scan.c', 'srv.c', 'rpc.c', 'ras.c', 'rebuild_iv.c'], - install_off="../..") + rebuild = denv.d_library( + 'rebuild', ['scan.c', 'srv.c', 'rpc.c', 'ras.c', 'rebuild_iv.c'], install_off="../.." + ) denv.Install('$PREFIX/lib64/daos_srv', rebuild) diff --git a/src/security/tests/SConscript b/src/security/tests/SConscript index 8c363f42ff3..4b8c0d08ae7 100644 --- a/src/security/tests/SConscript +++ b/src/security/tests/SConscript @@ -8,13 +8,17 @@ def scons(): mocks = denv.Object('drpc_mocks.c') util = denv.Object('sec_test_util.c') # Isolated unit tests - denv.d_test_program('cli_security_tests', - source=['cli_security_tests.c', util, dc_security_tgts, mocks], - LIBS=['cmocka', 'protobuf-c', 'daos_common', 'gurt']) + denv.d_test_program( + 'cli_security_tests', + source=['cli_security_tests.c', util, dc_security_tgts, mocks], + LIBS=['cmocka', 'protobuf-c', 'daos_common', 'gurt'], + ) - denv.d_test_program('srv_acl_tests', - source=['srv_acl_tests.c', util, acl_tgts, mocks], - LIBS=['cmocka', 'protobuf-c', 'daos_common', 'gurt']) + denv.d_test_program( + 'srv_acl_tests', + source=['srv_acl_tests.c', util, acl_tgts, mocks], + LIBS=['cmocka', 'protobuf-c', 'daos_common', 'gurt'], + ) if __name__ == "SCons.Script": diff --git a/src/tests/SConscript b/src/tests/SConscript index 0c620376c21..b1924555d79 100644 --- a/src/tests/SConscript +++ b/src/tests/SConscript @@ -17,10 +17,32 @@ def build_tests(env): denv = env.Clone() denv.compiler_setup() - libs_server = ['dts', 'daos_tests', 'daos_common_pmem', 'cart', 'gurt', 'uuid', 'pthread', - 'dpar', 'isal', 'protobuf-c', 'cmocka'] - libs_client = ['dts', 'daos_tests', 'daos', 'daos_common', 'daos_tests', 'gurt', 'cart', 'uuid', - 'pthread', 'dpar', 'cmocka'] + libs_server = [ + 'dts', + 'daos_tests', + 'daos_common_pmem', + 'cart', + 'gurt', + 'uuid', + 'pthread', + 'dpar', + 'isal', + 'protobuf-c', + 'cmocka', + ] + libs_client = [ + 'dts', + 'daos_tests', + 'daos', + 'daos_common', + 'daos_tests', + 'gurt', + 'cart', + 'uuid', + 'pthread', + 'dpar', + 'cmocka', + ] denv.AppendUnique(CPPPATH=[Dir('suite').srcnode()]) denv.AppendUnique(LIBPATH=[Dir('.')]) @@ -41,18 +63,18 @@ def build_tests(env): if denv["STACK_MMAP"] == 1: new_env = denv.Clone() new_env.Append(CCFLAGS=['-DULT_MMAP_STACK']) - vos_perf = new_env.d_program('vos_perf', - ['vos_perf.c', perf_common, vos_engine] + libdaos_tgts, - LIBS=libs_server) + vos_perf = new_env.d_program( + 'vos_perf', ['vos_perf.c', perf_common, vos_engine] + libdaos_tgts, LIBS=libs_server + ) else: - vos_perf = denv.d_program('vos_perf', - ['vos_perf.c', perf_common, vos_engine] + libdaos_tgts, - LIBS=libs_server) + vos_perf = denv.d_program( + 'vos_perf', ['vos_perf.c', perf_common, vos_engine] + libdaos_tgts, LIBS=libs_server + ) denv.Install('$PREFIX/bin/', vos_perf) - obj_ctl = denv.d_program('obj_ctl', - ['obj_ctl.c', cmd_parser, vos_engine] + libdaos_tgts, - LIBS=libs_server) + obj_ctl = denv.d_program( + 'obj_ctl', ['obj_ctl.c', cmd_parser, vos_engine] + libdaos_tgts, LIBS=libs_server + ) denv.Install('$PREFIX/bin/', obj_ctl) jobtest = denv.d_program('jobtest', ['jobtest.c'], LIBS=libs_client) diff --git a/src/tests/suite/SConscript b/src/tests/suite/SConscript index efbcba289cb..577480719eb 100644 --- a/src/tests/suite/SConscript +++ b/src/tests/suite/SConscript @@ -25,13 +25,14 @@ def scons(): daos_test_obj = denv.SharedObject(['daos_obj.c']) c_files = ['daos_debug_set_params.c'] - daosdebug = denv.d_program('daos_debug_set_params', - c_files + daos_test_tgt, - LIBS=['daos_common'] + libraries) + daosdebug = denv.d_program( + 'daos_debug_set_params', c_files + daos_test_tgt, LIBS=['daos_common'] + libraries + ) newenv = denv.Clone() - c_files = Split("""daos_array.c daos_base_tx.c daos_capa.c daos_checksum.c + c_files = Split( + """daos_array.c daos_base_tx.c daos_capa.c daos_checksum.c daos_container.c daos_dedup.c daos_degraded.c daos_dist_tx.c daos_drain_simple.c daos_epoch.c daos_epoch_io.c daos_epoch_recovery.c daos_kv.c @@ -40,15 +41,18 @@ def scons(): daos_rebuild.c daos_rebuild_common.c daos_rebuild_ec.c daos_rebuild_simple.c daos_test.c daos_verify_consistency.c daos_aggregate_ec.c daos_degrade_ec.c - daos_extend_simple.c daos_obj_ec.c daos_upgrade.c daos_pipeline.c""") + daos_extend_simple.c daos_obj_ec.c daos_upgrade.c daos_pipeline.c""" + ) - daostest = newenv.d_program('daos_test', c_files + daos_test_tgt, - LIBS=['daos_common'] + libraries) + daostest = newenv.d_program( + 'daos_test', c_files + daos_test_tgt, LIBS=['daos_common'] + libraries + ) c_files = ['dfs_unit_test.c', 'dfs_par_test.c', 'dfs_test.c', 'dfs_sys_unit_test.c'] newenv.AppendUnique(CPPPATH=[Dir('../../client/dfs').srcnode()]) - dfstest = newenv.d_program('dfs_test', c_files + daos_test_tgt, - LIBS=['daos_common'] + libraries) + dfstest = newenv.d_program( + 'dfs_test', c_files + daos_test_tgt, LIBS=['daos_common'] + libraries + ) denv.Install('$PREFIX/bin/', daostest) denv.Install('$PREFIX/bin/', dfstest) @@ -58,8 +62,9 @@ def scons(): denv.Install('$PREFIX/lib/daos/TESTING/io_conf', 'io_conf/daos_io_conf_3') denv.Install('$PREFIX/lib/daos/TESTING/io_conf', 'io_conf/daos_io_conf_4') denv.Install('$PREFIX/lib/daos/TESTING/io_conf', 'io_conf/daos_io_conf_5') - SConscript('io_conf/SConscript', - exports=['denv', 'daos_epoch_io', 'daos_test_tgt', 'daos_test_obj']) + SConscript( + 'io_conf/SConscript', exports=['denv', 'daos_epoch_io', 'daos_test_tgt', 'daos_test_obj'] + ) if __name__ == "SCons.Script": diff --git a/src/tests/suite/io_conf/SConscript b/src/tests/suite/io_conf/SConscript index 5c01016164b..50f5041a417 100644 --- a/src/tests/suite/io_conf/SConscript +++ b/src/tests/suite/io_conf/SConscript @@ -9,14 +9,14 @@ def scons(): libraries += ['cmocka', 'json-c', 'dpar'] iogen = denv.SharedObject('daos_generate_io_conf.c') - daos_gen_io_conf = denv.d_test_program('daos_gen_io_conf', - [daos_test_tgt, daos_epoch_io, daos_test_obj, iogen], - LIBS=libraries) + daos_gen_io_conf = denv.d_test_program( + 'daos_gen_io_conf', [daos_test_tgt, daos_epoch_io, daos_test_obj, iogen], LIBS=libraries + ) ioconf = denv.SharedObject('daos_run_io_conf.c') - daos_run_io_conf = denv.d_test_program('daos_run_io_conf', - [daos_test_tgt, daos_epoch_io, daos_test_obj, ioconf], - LIBS=libraries) + daos_run_io_conf = denv.d_test_program( + 'daos_run_io_conf', [daos_test_tgt, daos_epoch_io, daos_test_obj, ioconf], LIBS=libraries + ) denv.Install('$PREFIX/bin/', daos_gen_io_conf) denv.Install('$PREFIX/bin/', daos_run_io_conf) diff --git a/src/vos/SConscript b/src/vos/SConscript index 36e71260ec1..c78a6aa4b58 100644 --- a/src/vos/SConscript +++ b/src/vos/SConscript @@ -1,12 +1,33 @@ """Build versioned object store""" -FILES = ["evt_iter.c", "vos_common.c", "vos_iterator.c", "vos_io.c", - "vos_pool.c", "vos_aggregate.c", "vos_container.c", "vos_obj.c", - "vos_obj_cache.c", "vos_obj_index.c", "vos_tree.c", "evtree.c", - "vos_dtx.c", "vos_query.c", "vos_overhead.c", - "vos_dtx_iter.c", "vos_gc.c", "vos_ilog.c", "ilog.c", "vos_ts.c", - "lru_array.c", "vos_space.c", "sys_db.c", "vos_policy.c", - "vos_csum_recalc.c", "vos_pool_scrub.c"] +FILES = [ + "evt_iter.c", + "vos_common.c", + "vos_iterator.c", + "vos_io.c", + "vos_pool.c", + "vos_aggregate.c", + "vos_container.c", + "vos_obj.c", + "vos_obj_cache.c", + "vos_obj_index.c", + "vos_tree.c", + "evtree.c", + "vos_dtx.c", + "vos_query.c", + "vos_overhead.c", + "vos_dtx_iter.c", + "vos_gc.c", + "vos_ilog.c", + "ilog.c", + "vos_ts.c", + "lru_array.c", + "vos_space.c", + "sys_db.c", + "vos_policy.c", + "vos_csum_recalc.c", + "vos_pool_scrub.c", +] def build_vos(env, standalone): diff --git a/src/vos/storage_estimator/common/__init__.py b/src/vos/storage_estimator/common/__init__.py index 4ca82ff9ff5..da88d621ae3 100644 --- a/src/vos/storage_estimator/common/__init__.py +++ b/src/vos/storage_estimator/common/__init__.py @@ -3,10 +3,4 @@ SPDX-License-Identifier: BSD-2-Clause-Patent ''' -__all__ = [ - 'dfs_sb', - 'explorer', - 'parse_csv', - 'vos_size', - 'vos_structures', - 'util'] +__all__ = ['dfs_sb', 'explorer', 'parse_csv', 'vos_size', 'vos_structures', 'util'] diff --git a/src/vos/storage_estimator/common/dfs_sb.py b/src/vos/storage_estimator/common/dfs_sb.py index 6b9f7fa4833..9d6d730561f 100644 --- a/src/vos/storage_estimator/common/dfs_sb.py +++ b/src/vos/storage_estimator/common/dfs_sb.py @@ -85,7 +85,9 @@ def _print_akey(iod, overhead='meta'): overhead: {3} value_type: {4} values: [{5}] -'''.format(key, key, iov_buf_len, overhead, iod_type, values) +'''.format( + key, key, iov_buf_len, overhead, iod_type, values + ) return key, akey @@ -113,7 +115,9 @@ def _print_dkey(dkey, akeys): size: {1} overhead: meta akeys: {2} -'''.format(key, dkey.iov_len, _list_2_str(akeys)) +'''.format( + key, dkey.iov_len, _list_2_str(akeys) + ) return buf @@ -129,7 +133,9 @@ def _print_dfs_inode(key, size): overhead: meta value_type: array values: [{1}] -'''.format(key, values) +'''.format( + key, values + ) return buf @@ -161,14 +167,8 @@ def _create_akey(iod): iod_type = ValType.ARRAY overhead = Overhead.META - akey = AKey( - key=key.decode('utf-8'), - value_type=iod_type, - overhead=overhead) - value = VosValue( - count=int( - iod.iod_nr), size=int( - iod.iod_size)) + akey = AKey(key=key.decode('utf-8'), value_type=iod_type, overhead=overhead) + value = VosValue(count=int(iod.iod_nr), size=int(iod.iod_size)) akey.add_value(value) return akey @@ -189,23 +189,22 @@ def _parse_dfs_sb_dkey(dkey_raw, iods, akey_count): def _parse_dfs_akey_inode(dfs_entry_key_size, dfs_entry_size): key = 'x' * dfs_entry_key_size value = VosValue(size=dfs_entry_size) - akey = AKey( - key=key, - overhead=Overhead.META, - value_type=ValType.ARRAY) + akey = AKey(key=key, overhead=Overhead.META, value_type=ValType.ARRAY) akey.add_value(value) return akey class STR_BUFFER(ctypes.Structure): - _fields_ = [("status", ctypes.c_int), - ("str_len", ctypes.c_size_t), - ("buf_len", ctypes.c_size_t), - ("cstr", ctypes.c_char_p)] + _fields_ = [ + ("status", ctypes.c_int), + ("str_len", ctypes.c_size_t), + ("buf_len", ctypes.c_size_t), + ("cstr", ctypes.c_char_p), + ] -class BASE_CLASS(): +class BASE_CLASS: def __init__(self, lib_name): self._lib = self._load_lib(lib_name) @@ -214,14 +213,10 @@ def _load_lib(self, lib_name): try: lib_path = os.path.join(current_path, '../../..') - libdfs = ctypes.CDLL( - os.path.join(lib_path, lib_name), - mode=ctypes.DEFAULT_MODE) + libdfs = ctypes.CDLL(os.path.join(lib_path, lib_name), mode=ctypes.DEFAULT_MODE) except OSError as err: - raise Exception( - 'failed to load {0} library: {1}'.format( - lib_name, err)) + raise Exception('failed to load {0} library: {1}'.format(lib_name, err)) return libdfs @@ -238,13 +233,11 @@ def get_vos_size_str(self, alloc_overhead, vospath): """vospath - mount point of daos. Default is /mnt/daos""" print(' Reading VOS structures from current installation') ret = self._lib.get_vos_structure_sizes_yaml( - ctypes.c_int(alloc_overhead), - ctypes.byref(self._data), - bytes(vospath, encoding='utf-8')) + ctypes.c_int(alloc_overhead), ctypes.byref(self._data), bytes(vospath, encoding='utf-8') + ) if ret != 0: - raise Exception( - 'failed to retrieve the VOS structure sizes') + raise Exception('failed to retrieve the VOS structure sizes') vos_str = ctypes.string_at(self._data.cstr, self._data.str_len) @@ -279,22 +272,19 @@ def _dfs_get_sb_layout(self): ctypes.byref(self._iods), ctypes.byref(self._akey_count), ctypes.byref(self._dfs_entry_key_size), - ctypes.byref(self._dfs_entry_size)) + ctypes.byref(self._dfs_entry_size), + ) self._ready = True if ret != 0: - raise Exception( - 'failed to retrieve the DFS Super Block. RC: {0}'.format(ret)) + raise Exception('failed to retrieve the DFS Super Block. RC: {0}'.format(ret)) def get_dfs_str(self): if not self._ready: self._dfs_get_sb_layout() return _print_dfs( - self._dkey, - self._iods, - self._akey_count, - self._dfs_entry_key_size, - self._dfs_entry_size) + self._dkey, self._iods, self._akey_count, self._dfs_entry_key_size, self._dfs_entry_size + ) def get_dfs_sb_dkey(self): if not self._ready: @@ -304,16 +294,13 @@ def get_dfs_sb_dkey(self): def get_dfs_inode_akey(self): if not self._ready: self._dfs_get_sb_layout() - return _parse_dfs_akey_inode( - self._dfs_entry_key_size.value, - self._dfs_entry_size.value) + return _parse_dfs_akey_inode(self._dfs_entry_key_size.value, self._dfs_entry_size.value) def print_daos_version(): try: current_path = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(current_path, '../../../daos/VERSION'), - 'r') as version_file: + with open(os.path.join(current_path, '../../../daos/VERSION'), 'r') as version_file: daos_version = version_file.read().rstrip() except OSError: daos_version = '0.0.0' @@ -327,8 +314,7 @@ def get_dfs_sb_obj(): dkey = dfs_sb.get_dfs_sb_dkey() dfs_inode = dfs_sb.get_dfs_inode_akey() except Exception as err: - raise Exception( - 'Failed to get the DFS superblock VOS object: {0}'.format(err)) + raise Exception('Failed to get the DFS superblock VOS object: {0}'.format(err)) sb_obj = VosObject() sb_obj.add_value(dkey) diff --git a/src/vos/storage_estimator/common/explorer.py b/src/vos/storage_estimator/common/explorer.py index aafc7371989..8f63d2fcd9b 100644 --- a/src/vos/storage_estimator/common/explorer.py +++ b/src/vos/storage_estimator/common/explorer.py @@ -9,16 +9,24 @@ import sys from storage_estimator.util import CommonBase, ObjectClass -from storage_estimator.vos_structures import (AKey, Container, DKey, KeyType, Overhead, ValType, - VosObject, VosValue) - - -class FileInfo(): +from storage_estimator.vos_structures import ( + AKey, + Container, + DKey, + KeyType, + Overhead, + ValType, + VosObject, + VosValue, +) + + +class FileInfo: def __init__(self, size): self.st_size = size -class Entry(): +class Entry: def __init__(self, name, path): self.path = path self.name = name @@ -40,9 +48,7 @@ def __init__(self, arg): elif isinstance(arg, ObjectClass): self._dfs = DFS(arg) else: - raise TypeError( - 'arg must be of type {0} or {1}'.format( - type(DFS), type(ObjectClass))) + raise TypeError('arg must be of type {0} or {1}'.format(type(DFS), type(ObjectClass))) self._total_symlinks = 0 self._avg_symlink_size = 0 @@ -92,8 +98,7 @@ def set_avg_dir_name_size(self, name_size): def set_avg_name_size(self, name_size): self._check_value_type(name_size, int) self._avg_name_size = name_size - self._debug( - 'using {0} average file name size'.format(self._avg_name_size)) + self._debug('using {0} average file name size'.format(self._avg_name_size)) def get_dfs(self): new_dfs = self._dfs.copy() @@ -114,20 +119,18 @@ def _calculate_average_dir(self, dfs): symlink_per_dir += (self._total_symlinks % self._total_dirs) > 0 self._debug( 'adding {} symlinks of name size {} bytes and size {} bytes per directory'.format( - symlink_per_dir, - self._avg_name_size, - self._avg_symlink_size)) - dfs.add_symlink( - oid, - avg_name, - self._avg_symlink_size, - symlink_per_dir) + symlink_per_dir, self._avg_name_size, self._avg_symlink_size + ) + ) + dfs.add_symlink(oid, avg_name, self._avg_symlink_size, symlink_per_dir) # add dirs avg_dir_name = 'x' * self._dir_name_size self._debug( 'adding 1 directory of name size {0} bytes per directory'.format( - self._dir_name_size)) + self._dir_name_size + ) + ) dfs.add_dummy(oid, avg_dir_name) # add files @@ -135,7 +138,9 @@ def _calculate_average_dir(self, dfs): files_per_dir += (self._total_files % self._total_dirs) > 0 self._debug( 'adding {0} files of name size {1} per directory'.format( - files_per_dir, self._avg_name_size)) + files_per_dir, self._avg_name_size + ) + ) dfs.add_dummy(oid, avg_name, files_per_dir) return dfs @@ -272,24 +277,17 @@ def show_stats(self): def _create_default_dkey0(self): akey = AKey( - key='0', - key_type=KeyType.HASHED, - overhead=Overhead.META, - value_type=ValType.ARRAY) + key='0', key_type=KeyType.HASHED, overhead=Overhead.META, value_type=ValType.ARRAY + ) value = VosValue(count=1, size=32) akey.add_value(value) - dkey = DKey( - key_type=KeyType.INTEGER, - overhead=Overhead.META, - akeys=[akey]) + dkey = DKey(key_type=KeyType.INTEGER, overhead=Overhead.META, akeys=[akey]) return dkey def _create_default_inode_akey(self, key='DFS_INODE', size=96): value = VosValue(size=size) - akey = AKey(key=key, - overhead=Overhead.META, - value_type=ValType.ARRAY) + akey = AKey(key=key, overhead=Overhead.META, value_type=ValType.ARRAY) akey.add_value(value) return akey @@ -303,10 +301,7 @@ def _create_file_akey(self, size, io_size): remainder = size % io_size akey_size = io_size - akey = AKey( - key_type=KeyType.INTEGER, - overhead=Overhead.USER, - value_type=ValType.ARRAY) + akey = AKey(key_type=KeyType.INTEGER, overhead=Overhead.USER, value_type=ValType.ARRAY) if count > 0: value = VosValue(count=count, size=akey_size) @@ -320,10 +315,7 @@ def _create_file_akey(self, size, io_size): def _create_file_dkey(self, size, io_size): akey = self._create_file_akey(size, io_size) - dkey = DKey( - key_type=KeyType.INTEGER, - overhead=Overhead.USER, - akeys=[akey]) + dkey = DKey(key_type=KeyType.INTEGER, overhead=Overhead.USER, akeys=[akey]) return dkey @@ -381,8 +373,8 @@ def _add_ec_full_chunks(self, file_object, stripe_size, chunks, parity_stats): def _add_ec_elements(self, file_object, chunks, remainder, parity_stats): """If it's an EC class, add EC specific data and return True. - For now, the command line ensures ec cell size is never smaller than - the io_size or chunk_size for simplicity""" + For now, the command line ensures ec cell size is never smaller than + the io_size or chunk_size for simplicity""" parity = self._oclass.get_file_parity() if parity == 0: @@ -412,9 +404,7 @@ def _add_elements(self, file_object, file_size, parity_stats): chunks = file_size // self._chunk_size remainder = file_size % self._chunk_size - self._debug( - 'adding {0} chunk(s) of size {1}'.format( - chunks, self._chunk_size)) + self._debug('adding {0} chunk(s) of size {1}'.format(chunks, self._chunk_size)) if self._add_ec_elements(file_object, chunks, remainder, parity_stats): return @@ -450,9 +440,7 @@ def create_file_obj(self, file_size, identical_files=1): return parity_stats = CellStats(self._verbose) - self._debug( - 'adding {0} file(s) of size of {1}'.format( - identical_files, file_size)) + self._debug('adding {0} file(s) of size of {1}'.format(identical_files, file_size)) file_object = VosObject() file_object.set_num_of_targets(self._oclass.get_file_targets()) file_object.set_count(identical_files) @@ -528,26 +516,14 @@ def print_stats(self): self._info('') self._info('Summary:') self._info('') - self._info( - ' directories {0} count {1}'.format( - self._count_files, - pretty_name_size)) - self._info( - ' files {0} count {1}'.format( - self._count_dir, - pretty_file_size)) - self._info( - ' symlinks {0} count {1}'.format( - self._count_sym, - pretty_sym_size)) + self._info(' directories {0} count {1}'.format(self._count_files, pretty_name_size)) + self._info(' files {0} count {1}'.format(self._count_dir, pretty_file_size)) + self._info(' symlinks {0} count {1}'.format(self._count_sym, pretty_sym_size)) self._info(' errors {0} count'.format(self._count_error)) self._info('') self._info(' total count {0}'.format(total_count)) - self._info( - ' total size {0} ({1} bytes)'.format( - pretty_total_size, - total_size)) + self._info(' total size {0} ({1} bytes)'.format(pretty_total_size, total_size)) self._info('') @@ -566,8 +542,7 @@ def _get_avg_file_name_size(self): else: avg_file_name_size = self._name_size // total_items - self._debug( - ' assuming average file name size of {0} bytes'.format(avg_file_name_size)) + self._debug(' assuming average file name size of {0} bytes'.format(avg_file_name_size)) return avg_file_name_size def get_dfs_average(self): @@ -581,8 +556,7 @@ def get_dfs_average(self): if self._count_files > 0: avg_file_size = self._file_size // self._count_files - self._debug( - ' assuming average file size of {0} bytes'.format(avg_file_size)) + self._debug(' assuming average file size of {0} bytes'.format(avg_file_size)) averageFS.add_average_file(self._count_files, avg_file_size) dfs = averageFS.get_dfs() @@ -599,7 +573,8 @@ def _process_stats(self, container): "values": 0, "dkey_size": 0, "akey_size": 0, - "value_size": 0} + "value_size": 0, + } for object in container["objects"]: obj_count = object.get("count", 11) @@ -632,8 +607,7 @@ def _process_stats(self, container): continue total_values = obj_count * dkey_count * akey_count * value_count - stats["value_size"] += value.get("size", - 0) * total_values + stats["value_size"] += value.get("size", 0) * total_values stats["values"] += total_akeys return stats @@ -654,9 +628,7 @@ def _read_directory_3(self, file_path): self._process_file(entry) count += 1 else: - self._error( - 'found unknown object (skipped): {0}'.format( - entry.name)) + self._error('found unknown object (skipped): {0}'.format(entry.name)) if count == 0: self._process_empty_dir() @@ -679,9 +651,7 @@ def _read_directory_2(self, file_path): elif os.path.isfile(target): self._process_file(entry) else: - print( - 'Error: found unknown object (skipped): {0}'.format( - entry.name)) + print('Error: found unknown object (skipped): {0}'.format(entry.name)) def _read_directory(self, file_path): try: @@ -701,9 +671,7 @@ def _process_empty_dir(self): self._dfs.remove_obj(self._oid) def _process_error(self, file_path): - self._debug( - 'a adding dummy entry {0} for {1}'.format( - self._oid, file_path)) + self._debug('a adding dummy entry {0} for {1}'.format(self._oid, file_path)) self._dfs.add_dummy(self._oid, 'unknown') self._count_error += 1 diff --git a/src/vos/storage_estimator/common/parse_csv.py b/src/vos/storage_estimator/common/parse_csv.py index 8820a3e3aaa..384a453af3f 100644 --- a/src/vos/storage_estimator/common/parse_csv.py +++ b/src/vos/storage_estimator/common/parse_csv.py @@ -8,8 +8,26 @@ from storage_estimator.explorer import AverageFS from storage_estimator.util import ProcessBase -FILE_SIZES = ['4k', '64k', '128k', '256k', '512k', '768k', '1m', '8m', '64m', - '128m', '1g', '10g', '100g', '250g', '500g', '1t', '10t', '100t'] +FILE_SIZES = [ + '4k', + '64k', + '128k', + '256k', + '512k', + '768k', + '1m', + '8m', + '64m', + '128m', + '1g', + '10g', + '100g', + '250g', + '500g', + '1t', + '10t', + '100t', +] class ProcessCSV(ProcessBase): @@ -36,8 +54,8 @@ def _ingest_csv(self): raise Exception( "CSV must provide one row of values that matches fields" "Number of fields is {0}" - "Number of values is {1}".format( - len(fields), len(values))) + "Number of values is {1}".format(len(fields), len(values)) + ) for name in fields: value_dict[name] = values[idx] idx += 1 @@ -55,10 +73,7 @@ def _ingest_csv(self): symlink_size = 0 total_items = count_files + count_symlink + count_dir - unknown_items = int( - value_dict.get( - "total_objects", - 0)) - total_items + unknown_items = int(value_dict.get("total_objects", 0)) - total_items self._debug("total files {0}".format(count_files)) self._debug("total directories {0}".format(count_dir)) @@ -68,12 +83,9 @@ def _ingest_csv(self): items_per_dir = total_items // count_dir dir_name_size = total_dir_size // count_dir - self._debug( - 'assuming {0} items per directory'.format(items_per_dir)) - self._debug( - 'assuming average symlink size of {0} bytes'.format(symlink_size)) - self._debug( - 'assuming average dir size of {0} bytes'.format(dir_name_size)) + self._debug('assuming {0} items per directory'.format(items_per_dir)) + self._debug('assuming average symlink size of {0} bytes'.format(symlink_size)) + self._debug('assuming average dir size of {0} bytes'.format(dir_name_size)) afs = AverageFS(self._oclass) afs.set_verbose(self._verbose) @@ -94,11 +106,11 @@ def _ingest_csv(self): total_size = int(value_dict.get("%s_size" % size, 0)) if num_files != 0: - avg_file_size = (total_size // num_files) + avg_file_size = total_size // num_files pretty_size = self._to_human(avg_file_size) self._debug( - 'found {0} files of {1} average size'.format( - num_files, pretty_size)) + 'found {0} files of {1} average size'.format(num_files, pretty_size) + ) afs.add_average_file(num_files, avg_file_size) return afs diff --git a/src/vos/storage_estimator/common/tests/storage_estimator_test.py b/src/vos/storage_estimator/common/tests/storage_estimator_test.py index 66944e75968..4375daad37b 100644 --- a/src/vos/storage_estimator/common/tests/storage_estimator_test.py +++ b/src/vos/storage_estimator/common/tests/storage_estimator_test.py @@ -11,13 +11,22 @@ from storage_estimator.explorer import FileSystemExplorer from storage_estimator.parse_csv import ProcessCSV from storage_estimator.util import ObjectClass -from storage_estimator.vos_structures import (AKey, Container, Containers, DKey, Overhead, ValType, - VosObject, VosValue, VosValueError) +from storage_estimator.vos_structures import ( + AKey, + Container, + Containers, + DKey, + Overhead, + ValType, + VosObject, + VosValue, + VosValueError, +) from .util import FileGenerator -class MockArgs(): +class MockArgs: def __init__(self, file_oclass="SX", csv_file=""): self.dir_oclass = "S1" self.file_oclass = file_oclass @@ -47,8 +56,7 @@ def test_invalid_parameters(self): assert "count parameter must be of type int" in str(err.value) # nosec with pytest.raises(TypeError) as err: - _ = VosValue( - size=5, count=10, aligned="rubbish") + _ = VosValue(size=5, count=10, aligned="rubbish") assert "aligned parameter must be of type" in str(err.value) # nosec @pytest.mark.ut @@ -86,12 +94,13 @@ def create_values(self): return [value1, value2] def create_default_akey( - self, - key="A-key 1", - count=1, - key_type="hashed", - value_type="single_value", - overhead="user"): + self, + key="A-key 1", + count=1, + key_type="hashed", + value_type="single_value", + overhead="user", + ): values = self.create_values() raw_values = self._dump_values(values) @@ -103,7 +112,8 @@ def create_default_akey( "type": key_type, "overhead": overhead, "value_type": value_type, - "values": raw_values} + "values": raw_values, + } if key_type == "hashed": key_size = len(key.encode("utf-8")) @@ -119,24 +129,14 @@ def _dump_values(self, values): return raw_values def create_akeys(self): - akey1 = self.create_default_akey( - key="A-key 1", value_type="single_value") + akey1 = self.create_default_akey(key="A-key 1", value_type="single_value") akey2 = self.create_default_akey(key="A-key 2", value_type="array") return [akey1, akey2] - def create_default_dkey( - self, - key="D-key 1", - count=1, - key_type="hashed", - overhead="user"): + def create_default_dkey(self, key="D-key 1", count=1, key_type="hashed", overhead="user"): akeys = self.create_akeys() - dkey = { - "count": count, - "type": key_type, - "overhead": overhead, - "akeys": akeys} + dkey = {"count": count, "type": key_type, "overhead": overhead, "akeys": akeys} if key_type == "hashed": key_size = len(key.encode("utf-8")) @@ -148,62 +148,38 @@ def create_default_object(self, key="Object 1", count=1): dkey1 = self.create_default_dkey(key="D-key 1") dkey2 = self.create_default_dkey(key="D-key 2") - vos_object = { - "targets": 0, - "count": count, - "dkeys": [ - dkey1, - dkey2]} + vos_object = {"targets": 0, "count": count, "dkeys": [dkey1, dkey2]} return vos_object - def create_default_container( - self, count=1, csum_size=0, csum_gran=16384): + def create_default_container(self, count=1, csum_size=0, csum_gran=16384): self.raw_obj1 = self.create_default_object(count=100) self.raw_obj2 = self.create_default_object(count=200) vos_container = { "count": count, "csum_size": csum_size, "csum_gran": csum_gran, - "objects": [ - self.raw_obj1, - self.raw_obj2]} + "objects": [self.raw_obj1, self.raw_obj2], + } return vos_container def _create_sb_akey(self, key, size): value = VosValue(size=size) - akey = AKey( - key=key, - overhead=Overhead.META, - value_type=ValType.SINGLE) + akey = AKey(key=key, overhead=Overhead.META, value_type=ValType.SINGLE) akey.add_value(value) return akey def get_mock_dfs_superblock_obj(self): - dkey_sb = DKey( - key="DFS_SB_METADATA", - overhead=Overhead.META) + dkey_sb = DKey(key="DFS_SB_METADATA", overhead=Overhead.META) dkey_sb.add_value(self._create_sb_akey(key="DFS_MAGIC", size=8)) - dkey_sb.add_value( - self._create_sb_akey( - key="DFS_SB_VERSION", size=2)) - dkey_sb.add_value( - self._create_sb_akey( - key="DFS_LAYOUT_VERSION", - size=2)) - dkey_sb.add_value( - self._create_sb_akey( - key="DFS_CHUNK_SIZE", size=8)) - dkey_sb.add_value( - self._create_sb_akey( - key="DFS_OBJ_CLASS", size=2)) + dkey_sb.add_value(self._create_sb_akey(key="DFS_SB_VERSION", size=2)) + dkey_sb.add_value(self._create_sb_akey(key="DFS_LAYOUT_VERSION", size=2)) + dkey_sb.add_value(self._create_sb_akey(key="DFS_CHUNK_SIZE", size=8)) + dkey_sb.add_value(self._create_sb_akey(key="DFS_OBJ_CLASS", size=2)) inode_value = VosValue(size=64) - akey_inode = AKey( - key="DFS_INODE", - overhead=Overhead.META, - value_type=ValType.ARRAY) + akey_inode = AKey(key="DFS_INODE", overhead=Overhead.META, value_type=ValType.ARRAY) akey_inode.add_value(inode_value) dkey_root = DKey(key="/") dkey_root.add_value(akey_inode) @@ -222,7 +198,8 @@ def process_stats(self, container): "values": 0, "dkey_size": 0, "akey_size": 0, - "value_size": 0} + "value_size": 0, + } for object in container["objects"]: obj_count = object.get("count", 11) @@ -255,8 +232,7 @@ def process_stats(self, container): continue total_values = obj_count * dkey_count * akey_count * value_count - stats["value_size"] += value.get( - "size", 0) * total_values + stats["value_size"] += value.get("size", 0) * total_values stats["values"] += total_akeys return stats @@ -277,117 +253,82 @@ def test_invalid_parameters(self): assert "value_type parameter must be of type" in str(err.value) # nosec with pytest.raises(TypeError) as err: - _ = AKey( - value_type="single_value", count="rubbish") + _ = AKey(value_type="single_value", count="rubbish") assert "count parameter must be of type int" in str(err.value) # nosec with pytest.raises(TypeError) as err: - _ = AKey( - value_type="single_value", key_type="rubbish") + _ = AKey(value_type="single_value", key_type="rubbish") assert "key_type parameter must be of type" in str(err.value) # nosec with pytest.raises(TypeError) as err: - _ = AKey( - value_type="single_value", overhead="rubbish") + _ = AKey(value_type="single_value", overhead="rubbish") assert "overhead parameter must be of type" in str(err.value) # nosec with pytest.raises(TypeError) as err: - _ = AKey( - value_type="single_value", values=["rubbish"]) + _ = AKey(value_type="single_value", values=["rubbish"]) assert "must be of type" in str(err.value) # nosec @pytest.mark.ut def test_constructor(self): values = self.test_data.create_values() - akey = AKey( - key="A-key 1", - value_type="single_value", - values=values) + akey = AKey(key="A-key 1", value_type="single_value", values=values) want = self.test_data.create_default_akey() assert want == akey.dump() # nosec - akey = AKey( - key="A-key 1", - key_type="hashed", - value_type="single_value", - values=values) + akey = AKey(key="A-key 1", key_type="hashed", value_type="single_value", values=values) want = self.test_data.create_default_akey( - key="A-key 1", key_type="hashed", value_type="single_value") + key="A-key 1", key_type="hashed", value_type="single_value" + ) assert want == akey.dump() # nosec - akey = AKey( - key_type="integer", - value_type="single_value", - values=values) - want = self.test_data.create_default_akey( - key_type="integer", value_type="single_value") + akey = AKey(key_type="integer", value_type="single_value", values=values) + want = self.test_data.create_default_akey(key_type="integer", value_type="single_value") assert want == akey.dump() # nosec - akey = AKey( - key_type="integer", - value_type="single_value", - count=20, - values=values) + akey = AKey(key_type="integer", value_type="single_value", count=20, values=values) want = self.test_data.create_default_akey( - key_type="integer", value_type="single_value", count=20) + key_type="integer", value_type="single_value", count=20 + ) assert want == akey.dump() # nosec - akey = AKey( - key_type="integer", - value_type="single_value", - overhead="user", - values=values) + akey = AKey(key_type="integer", value_type="single_value", overhead="user", values=values) want = self.test_data.create_default_akey( - key_type="integer", value_type="single_value", overhead="user") + key_type="integer", value_type="single_value", overhead="user" + ) assert want == akey.dump() # nosec - akey = AKey( - key_type="integer", - value_type="single_value", - overhead="meta", - values=values) + akey = AKey(key_type="integer", value_type="single_value", overhead="meta", values=values) want = self.test_data.create_default_akey( - key_type="integer", value_type="single_value", overhead="meta") + key_type="integer", value_type="single_value", overhead="meta" + ) assert want == akey.dump() # nosec - akey = AKey( - key="A-key 1", - value_type="array", - overhead="user", - values=values) + akey = AKey(key="A-key 1", value_type="array", overhead="user", values=values) want = self.test_data.create_default_akey( - key="A-key 1", value_type="array", overhead="user") + key="A-key 1", value_type="array", overhead="user" + ) assert want == akey.dump() # nosec - akey = AKey( - key="A-key 1", - value_type="array", - overhead="meta", - values=values) + akey = AKey(key="A-key 1", value_type="array", overhead="meta", values=values) want = self.test_data.create_default_akey( - key="A-key 1", value_type="array", overhead="meta") + key="A-key 1", value_type="array", overhead="meta" + ) assert want == akey.dump() # nosec - akey = AKey( - key="A-key 1", - value_type="array", - values=values) - want = self.test_data.create_default_akey( - key="A-key 1", key_type=None, value_type="array") + akey = AKey(key="A-key 1", value_type="array", values=values) + want = self.test_data.create_default_akey(key="A-key 1", key_type=None, value_type="array") assert want == akey.dump() # nosec @pytest.mark.ut def test_add_value(self): with pytest.raises(VosValueError) as err: - akey = AKey( - key="A-key 1", value_type="single_value") + akey = AKey(key="A-key 1", value_type="single_value") akey.dump() assert "list of values must not be empty" in str(err.value) # nosec with pytest.raises(TypeError) as err: - akey = AKey( - key="A-key 1", value_type="single_value") + akey = AKey(key="A-key 1", value_type="single_value") akey.add_value("rubbish") assert "must be of type" in str(err.value) # nosec @@ -424,13 +365,9 @@ def test_invalid_parameters(self): @pytest.mark.ut def test_constructor(self): akey1 = AKey( - key="A-key 1", - value_type="single_value", - values=self.test_data.create_values()) - akey2 = AKey( - key="A-key 2", - value_type="array", - values=self.test_data.create_values()) + key="A-key 1", value_type="single_value", values=self.test_data.create_values() + ) + akey2 = AKey(key="A-key 2", value_type="array", values=self.test_data.create_values()) dkey = DKey(key="D-key 1", akeys=[akey1, akey2]) want = self.test_data.create_default_dkey() @@ -450,15 +387,9 @@ def test_add_value(self): assert "must be of type" in str(err.value) # nosec dkey = DKey(key="D-key 1") - akey = AKey( - key="A-key 1", - value_type="single_value", - values=self.test_data.create_values()) + akey = AKey(key="A-key 1", value_type="single_value", values=self.test_data.create_values()) dkey.add_value(akey) - akey = AKey( - key="A-key 2", - value_type="array", - values=self.test_data.create_values()) + akey = AKey(key="A-key 2", value_type="array", values=self.test_data.create_values()) dkey.add_value(akey) want = self.test_data.create_default_dkey() @@ -470,13 +401,9 @@ def test_add_value(self): class ObjectTestCase(unittest.TestCase): def setUp(self): akey1 = AKey( - key="A-key 1", - value_type="single_value", - values=self.test_data.create_values()) - akey2 = AKey( - key="A-key 2", - value_type="array", - values=self.test_data.create_values()) + key="A-key 1", value_type="single_value", values=self.test_data.create_values() + ) + akey2 = AKey(key="A-key 2", value_type="array", values=self.test_data.create_values()) self.dkey1 = DKey(key="D-key 1", akeys=[akey1, akey2]) self.dkey2 = DKey(key="D-key 2", akeys=[akey1, akey2]) @@ -492,8 +419,7 @@ def test_invalid_parameters(self): @pytest.mark.ut def test_constructor(self): - vos_object = VosObject( - count=100, dkeys=[self.dkey1, self.dkey2]) + vos_object = VosObject(count=100, dkeys=[self.dkey1, self.dkey2]) want = self.test_data.create_default_object(count=100) assert want == vos_object.dump() # nosec @@ -520,20 +446,14 @@ def test_add_value(self): class ContainerTestCase(unittest.TestCase): def setUp(self): akey1 = AKey( - key="A-key 1", - value_type="single_value", - values=self.test_data.create_values()) - akey2 = AKey( - key="A-key 2", - value_type="array", - values=self.test_data.create_values()) + key="A-key 1", value_type="single_value", values=self.test_data.create_values() + ) + akey2 = AKey(key="A-key 2", value_type="array", values=self.test_data.create_values()) self.dkey1 = DKey(key="D-key 1", akeys=[akey1, akey2]) self.dkey2 = DKey(key="D-key 2", akeys=[akey1, akey2]) - self.vos_object1 = VosObject( - count=100, dkeys=[self.dkey1, self.dkey2]) - self.vos_object2 = VosObject( - count=200, dkeys=[self.dkey1, self.dkey2]) + self.vos_object1 = VosObject(count=100, dkeys=[self.dkey1, self.dkey2]) + self.vos_object2 = VosObject(count=200, dkeys=[self.dkey1, self.dkey2]) @pytest.mark.ut def test_invalid_parameters(self): @@ -555,16 +475,14 @@ def test_invalid_parameters(self): @pytest.mark.ut def test_constructor(self): - container = Container( - objects=[self.vos_object1, self.vos_object2]) + container = Container(objects=[self.vos_object1, self.vos_object2]) want = self.test_data.create_default_container() assert want == container.dump() # nosec container = Container( - count=300, csum_size=400, csum_gran=500, objects=[ - self.vos_object1, self.vos_object2]) - want = self.test_data.create_default_container( - count=300, csum_size=400, csum_gran=500) + count=300, csum_size=400, csum_gran=500, objects=[self.vos_object1, self.vos_object2] + ) + want = self.test_data.create_default_container(count=300, csum_size=400, csum_gran=500) assert want == container.dump() # nosec @pytest.mark.ut @@ -580,25 +498,17 @@ def test_add_value(self): class ContainersTestCase(unittest.TestCase): def setUp(self): akey1 = AKey( - key="A-key 1", - value_type="single_value", - values=self.test_data.create_values()) - akey2 = AKey( - key="A-key 2", - value_type="array", - values=self.test_data.create_values()) + key="A-key 1", value_type="single_value", values=self.test_data.create_values() + ) + akey2 = AKey(key="A-key 2", value_type="array", values=self.test_data.create_values()) self.dkey1 = DKey(key="D-key 1", akeys=[akey1, akey2]) self.dkey2 = DKey(key="D-key 2", akeys=[akey1, akey2]) - self.vos_object1 = VosObject( - count=100, dkeys=[self.dkey1, self.dkey2]) - self.vos_object2 = VosObject( - count=200, dkeys=[self.dkey1, self.dkey2]) + self.vos_object1 = VosObject(count=100, dkeys=[self.dkey1, self.dkey2]) + self.vos_object2 = VosObject(count=200, dkeys=[self.dkey1, self.dkey2]) - self.vos_container1 = Container( - csum_gran=300, objects=[self.vos_object1, self.vos_object2]) - self.vos_container2 = Container( - csum_gran=400, objects=[self.vos_object1, self.vos_object2]) + self.vos_container1 = Container(csum_gran=300, objects=[self.vos_object1, self.vos_object2]) + self.vos_container2 = Container(csum_gran=400, objects=[self.vos_object1, self.vos_object2]) @pytest.mark.ut def test_invalid_parameters(self): @@ -618,17 +528,13 @@ def test_invalid_parameters(self): @pytest.mark.ut def test_constructor(self): containers = Containers( - num_shards=200, containers=[ - self.vos_container1, self.vos_container2]) + num_shards=200, containers=[self.vos_container1, self.vos_container2] + ) raw_container1 = self.test_data.create_default_container(csum_gran=300) raw_container2 = self.test_data.create_default_container(csum_gran=400) - want = { - "num_shards": 200, - "containers": [ - raw_container1, - raw_container2]} + want = {"num_shards": 200, "containers": [raw_container1, raw_container2]} assert want == containers.dump() # nosec @pytest.mark.ut @@ -651,32 +557,24 @@ def test_add_value(self): raw_container1 = self.test_data.create_default_container(csum_gran=300) raw_container2 = self.test_data.create_default_container(csum_gran=400) - want = { - "num_shards": 500, - "containers": [ - raw_container1, - raw_container2]} + want = {"num_shards": 500, "containers": [raw_container1, raw_container2]} assert want == containers.dump() # nosec @pytest.mark.usefixtures("vos_test_data") class FSTestCase(unittest.TestCase): def setUp(self): - test_files = [{"type": "file", - "path": "data/deploy/driver.bin", - "size": 5767168}, - {"type": "symlink", - "path": "data/deploy/my_file", - "dest": "../../specs/very_importan_file.txt"}, - {"type": "file", - "path": "data/secret_plan.txt", - "size": 3670016}, - {"type": "file", - "path": "specs/readme.txt", - "size": 1572864}, - {"type": "file", - "path": "specs/very_importan_file.txt", - "size": 2621440}] + test_files = [ + {"type": "file", "path": "data/deploy/driver.bin", "size": 5767168}, + { + "type": "symlink", + "path": "data/deploy/my_file", + "dest": "../../specs/very_importan_file.txt", + }, + {"type": "file", "path": "data/secret_plan.txt", "size": 3670016}, + {"type": "file", "path": "specs/readme.txt", "size": 1572864}, + {"type": "file", "path": "specs/very_importan_file.txt", "size": 2621440}, + ] self.fg = FileGenerator() self.fg.crete_mock_fs(test_files) self.root_dir = self.fg.get_root() @@ -685,9 +583,7 @@ def setUp(self): def _create_inode_akey(self, key, size): value = VosValue(size=size) - akey = AKey(key=key, - overhead=Overhead.META, - value_type=ValType.ARRAY) + akey = AKey(key=key, overhead=Overhead.META, value_type=ValType.ARRAY) akey.add_value(value) return akey diff --git a/src/vos/storage_estimator/common/tests/util.py b/src/vos/storage_estimator/common/tests/util.py index 79eaea7a9bb..bce472f97c6 100644 --- a/src/vos/storage_estimator/common/tests/util.py +++ b/src/vos/storage_estimator/common/tests/util.py @@ -8,7 +8,7 @@ import tempfile -class FileGenerator(): +class FileGenerator: def __init__(self, prefix=""): if prefix: temp_path = tempfile.mkdtemp(prefix=prefix) @@ -30,10 +30,7 @@ def _create_files(self, files): if file.get("type", "unknown") == "file": self.generate_file(file.get("path", ""), file.get("size", 0)) if file.get("type", "unknown") == "symlink": - self._create_symlink( - file.get( - "path", ""), file.get( - "dest", "")) + self._create_symlink(file.get("path", ""), file.get("dest", "")) def _create_symlink(self, path, dest): target_path = os.path.join(self._mock_root, path) diff --git a/src/vos/storage_estimator/common/util.py b/src/vos/storage_estimator/common/util.py index b571d984a57..448ab1c9b30 100644 --- a/src/vos/storage_estimator/common/util.py +++ b/src/vos/storage_estimator/common/util.py @@ -13,7 +13,7 @@ from storage_estimator.vos_structures import Containers -class CommonBase(): +class CommonBase: def __init__(self): self._verbose = False @@ -23,9 +23,7 @@ def set_verbose(self, verbose): def _check_value_type(self, value, values_type): if not isinstance(value, values_type): - raise TypeError( - 'item {0} must be of type {1}'.format( - value, type(values_type))) + raise TypeError('item {0} must be of type {1}'.format(value, type(values_type))) def _error(self, msg): print('Error: {0}'.format(msg)) @@ -51,7 +49,8 @@ def _get_power_labels(self): 5: 'PiB', 6: 'EiB', 7: 'ZiB', - 8: 'YiB'} + 8: 'YiB', + } def _to_human(self, size): power_labels = self._get_power_labels() @@ -95,8 +94,7 @@ def _remove_suffix(self, string, suffix, pedantic=True): def _check_positive_number(self, number): self._check_value_type(number, int) if number < 1: - raise ValueError( - '{0} must be a positive not zero value'.format(number)) + raise ValueError('{0} must be a positive not zero value'.format(number)) def _from_human(self, human_number): self._check_value_type(human_number, str) @@ -123,12 +121,9 @@ def __init__(self, args): def print_pretty_status(self): self._debug( '{0:<13}{1:<10}{2:<10}{3:<9}{4:<9}{5:<11}'.format( - 'FS Object', - 'OClass', - '# Targets', - '# Stripe', - '# Parity', - '# Replicas')) + 'FS Object', 'OClass', '# Targets', '# Stripe', '# Parity', '# Replicas' + ) + ) self._get_pretty_status('File', self._file_oclass) self._get_pretty_status('Directory', self._dir_oclass) @@ -168,48 +163,37 @@ def is_ec_enabled(self): return False def get_dir_targets(self): - return self._get_oclass_parameter( - self._dir_oclass, 'number_of_targets') + return self._get_oclass_parameter(self._dir_oclass, 'number_of_targets') def get_dir_stripe(self): - return self._get_oclass_parameter( - self._dir_oclass, 'number_of_stripe_cells') + return self._get_oclass_parameter(self._dir_oclass, 'number_of_stripe_cells') def get_dir_parity(self): - return self._get_oclass_parameter( - self._dir_oclass, 'number_of_parity_cells') + return self._get_oclass_parameter(self._dir_oclass, 'number_of_parity_cells') def get_dir_replicas(self): - return self._get_oclass_parameter( - self._dir_oclass, 'number_of_replicas') + return self._get_oclass_parameter(self._dir_oclass, 'number_of_replicas') def get_file_targets(self): - return self._get_oclass_parameter( - self._file_oclass, 'number_of_targets') + return self._get_oclass_parameter(self._file_oclass, 'number_of_targets') def get_file_stripe(self): - return self._get_oclass_parameter( - self._file_oclass, 'number_of_stripe_cells') + return self._get_oclass_parameter(self._file_oclass, 'number_of_stripe_cells') def get_file_parity(self): - return self._get_oclass_parameter( - self._file_oclass, 'number_of_parity_cells') + return self._get_oclass_parameter(self._file_oclass, 'number_of_parity_cells') def get_file_replicas(self): - return self._get_oclass_parameter( - self._file_oclass, 'number_of_replicas') + return self._get_oclass_parameter(self._file_oclass, 'number_of_replicas') def get_supported_oclass(self): return list(self._get_oclass_definitions().keys()) def _get_min_shards_required(self, oclass_type): - parity = self._get_oclass_parameter( - oclass_type, 'number_of_parity_cells') - stripe = self._get_oclass_parameter( - oclass_type, 'number_of_stripe_cells') + parity = self._get_oclass_parameter(oclass_type, 'number_of_parity_cells') + stripe = self._get_oclass_parameter(oclass_type, 'number_of_stripe_cells') targets = self._get_oclass_parameter(oclass_type, 'number_of_targets') - replicas = self._get_oclass_parameter( - oclass_type, 'number_of_replicas') + replicas = self._get_oclass_parameter(oclass_type, 'number_of_replicas') return max(stripe + parity, targets, replicas) @@ -219,15 +203,15 @@ def _get_pretty_status(self, label, oclass_type): if targets == 0: targets = 'all' - cells = self._get_oclass_parameter( - oclass_type, 'number_of_stripe_cells') - parity = self._get_oclass_parameter( - oclass_type, 'number_of_parity_cells') - replicas = self._get_oclass_parameter( - oclass_type, 'number_of_replicas') + cells = self._get_oclass_parameter(oclass_type, 'number_of_stripe_cells') + parity = self._get_oclass_parameter(oclass_type, 'number_of_parity_cells') + replicas = self._get_oclass_parameter(oclass_type, 'number_of_replicas') - self._debug('{0:<13}{1:<10}{2:<10}{3:<9}{4:<9}{5:<11}'.format( - label, oclass_type, targets, cells, parity, replicas)) + self._debug( + '{0:<13}{1:<10}{2:<10}{3:<9}{4:<9}{5:<11}'.format( + label, oclass_type, targets, cells, parity, replicas + ) + ) def _update_oclass(self, args, key_value, default_value): op = vars(args) @@ -239,7 +223,9 @@ def _update_oclass(self, args, key_value, default_value): if value not in supported_oclasses: raise ValueError( 'unknown object class "{0}", the supported objects are {1}:'.format( - value, self.get_supported_oclass())) + value, self.get_supported_oclass() + ) + ) return value @@ -248,10 +234,9 @@ def _get_oclass_parameter(self, oclass_type, parameter_id): 'number_of_targets': 0, 'number_of_stripe_cells': 1, 'number_of_parity_cells': 2, - 'number_of_replicas': 3 + 'number_of_replicas': 3, } - return self._get_oclass_definitions( - )[oclass_type][ec_parameters[parameter_id]] + return self._get_oclass_definitions()[oclass_type][ec_parameters[parameter_id]] def _get_oclass_definitions(self): return { @@ -273,7 +258,7 @@ def _get_oclass_definitions(self): 'EC_8P2GX': (0, 8, 2, 1), # 16+2 Erasure Coded object, it spreads across all targets within # the pool - 'EC_16P2GX': (0, 16, 2, 1) + 'EC_16P2GX': (0, 16, 2, 1), } @@ -296,9 +281,7 @@ def _create_file(self, file_name, buf): f.write(buf) except OSError as err: - raise Exception( - 'Failed to open file {0} {1}'.format( - file_name, err)) + raise Exception('Failed to open file {0} {1}'.format(file_name, err)) def _get_vos_meta(self): self._meta_str = self._create_vos_meta() @@ -323,9 +306,7 @@ def _load_yaml_from_file(self, file_name): try: data = yaml.safe_load(open(file_name, 'r')) except OSError as err: - raise Exception( - 'Failed to open file {0} {1}'.format( - file_name, err)) + raise Exception('Failed to open file {0} {1}'.format(file_name, err)) return data @@ -337,13 +318,12 @@ def _process_yaml(self, config_yaml): if 'containers' not in config_yaml: raise Exception( - 'No "containers" key in {0}. Nothing to do'.format( - self._args.config[0])) + 'No "containers" key in {0}. Nothing to do'.format(self._args.config[0]) + ) self._debug('starting analysis') if 'average' in self._args and not self._args.average: - self._debug( - 'for massive file systems, consider using the average "-x" option') + self._debug('for massive file systems, consider using the average "-x" option') self._debug('working...') for container in config_yaml.get('containers'): @@ -391,8 +371,8 @@ def _get_num_shards(self, args): shards_required = self._oclass.validate_number_of_shards(num_shards) if shards_required > 0: raise ValueError( - 'Insufficient shards. Wanted {0} given {1}'.format( - shards_required, num_shards)) + 'Insufficient shards. Wanted {0} given {1}'.format(shards_required, num_shards) + ) return num_shards @@ -421,13 +401,15 @@ def _process_checksum(self): csum_name = self._args.checksum if csum_name not in csummers: - raise ValueError(f"unknown checksum algorithm: '{csum_name}', the supported " - + f"checksum algorithms are: '{list(csummers.keys())}'") + raise ValueError( + f"unknown checksum algorithm: '{csum_name}', the supported " + + f"checksum algorithms are: '{list(csummers.keys())}'" + ) csum_size = csummers[csum_name] self._debug( - 'using checksum "{0}" algorithm of size {1} bytes'.format( - csum_name, csum_size)) + 'using checksum "{0}" algorithm of size {1} bytes'.format(csum_name, csum_size) + ) self._csum_size = csum_size def _process_block_values(self): diff --git a/src/vos/storage_estimator/common/vos_size.py b/src/vos/storage_estimator/common/vos_size.py index 872fc76207a..c8cad3630fd 100644 --- a/src/vos/storage_estimator/common/vos_size.py +++ b/src/vos/storage_estimator/common/vos_size.py @@ -19,8 +19,7 @@ def convert(stat): def print_total(name, stat, total): "Pretty print" - print("\t%-20s: %s (%5.2f%%)" % (name, convert(stat), - 100 * float(stat) / total)) + print("\t%-20s: %s (%5.2f%%)" % (name, convert(stat), 100 * float(stat) / total)) def check_key_type(spec): @@ -33,7 +32,7 @@ def check_key_type(spec): raise RuntimeError("Size required for hashed key %s" % spec) -class Stats(): +class Stats: """Class for calculating and storing stats""" def __init__(self): @@ -50,7 +49,7 @@ def __init__(self): "user_meta": 0, "total_meta": 0, "nvme_total": 0, - "total": 0 + "total": 0, } def mult(self, multiplier): @@ -92,8 +91,7 @@ def print_stat(self, name): def pretty_print(self): """Pretty print statistics""" print("Metadata breakdown:") - self.stats["scm_total"] = self.stats["total"] - \ - self.stats["nvme_total"] + self.stats["scm_total"] = self.stats["total"] - self.stats["nvme_total"] self.print_stat("pool") self.print_stat("container") self.print_stat("object") @@ -113,10 +111,11 @@ def pretty_print(self): pretty_total = convert(self.stats["total"]) print("Total storage required: {0}".format(pretty_total)) + # pylint: disable=too-many-instance-attributes -class MetaOverhead(): +class MetaOverhead: """Class for calculating overheads""" def __init__(self, args, num_pools, meta_yaml): @@ -126,8 +125,7 @@ def __init__(self, args, num_pools, meta_yaml): self.num_pools = num_pools self.pools = [] for _index in range(0, self.num_pools): - self.pools.append({"trees": [], "dup": 1, "key": "container", - "count": 0}) + self.pools.append({"trees": [], "dup": 1, "key": "container", "count": 0}) self.next_cont = 1 self.next_object = 1 self._scm_cutoff = meta_yaml.get("scm_cutoff", 4096) @@ -144,11 +142,14 @@ def init_container(self, cont_spec): for pool in self.pools: pool["count"] += int(cont_spec.get("count", 1)) - cont = {"dup": int(cont_spec.get("count", 1)), "key": "object", - "count": 0, - "csum_size": int(cont_spec.get("csum_size", 0)), - "csum_gran": int(cont_spec.get("csum_gran", 1048576)), - "trees": []} + cont = { + "dup": int(cont_spec.get("count", 1)), + "key": "object", + "count": 0, + "csum_size": int(cont_spec.get("csum_size", 0)), + "csum_gran": int(cont_spec.get("csum_gran", 1048576)), + "trees": [], + } pool["trees"].append(cont) for obj_spec in cont_spec.get("objects"): @@ -189,18 +190,28 @@ def init_dkeys(self, oid, obj_spec, num_of_targets): pool = self.pools[pool_idx] cont = pool["trees"][-1] if cont["trees"] == [] or cont["trees"][-1]["oid"] != oid: - obj = {"dup": int(obj_spec.get("count", 1)), "key": "dkey", - "count": 0, "trees": [], "oid": oid} + obj = { + "dup": int(obj_spec.get("count", 1)), + "key": "dkey", + "count": 0, + "trees": [], + "oid": oid, + } cont["trees"].append(obj) cont["count"] += int(obj_spec.get("count", 1)) dup = full_count if partial_count > idx: dup += 1 obj = cont["trees"][-1] - dkey = {"dup": dup, "key": "akey", "count": 0, "trees": [], - "type": dkey_spec.get("type", "hashed"), - "size": int(dkey_spec.get("size", 0)), - "overhead": dkey_spec.get("overhead", "user")} + dkey = { + "dup": dup, + "key": "akey", + "count": 0, + "trees": [], + "type": dkey_spec.get("type", "hashed"), + "size": int(dkey_spec.get("size", 0)), + "overhead": dkey_spec.get("overhead", "user"), + } obj["trees"].append(dkey) obj["count"] += dup for akey_spec in dkey_spec.get("akeys"): @@ -213,12 +224,17 @@ def init_akey(self, cont, dkey, akey_spec): raise RuntimeError("No values in akey spec %s" % akey_spec) if "value_type" not in akey_spec: raise RuntimeError("No value_type in akey spec %s" % akey_spec) - akey = {"dup": int(akey_spec.get("count", 1)), - "key": akey_spec.get("value_type"), "count": 0, - "type": akey_spec.get("type", "hashed"), - "size": int(akey_spec.get("size", 0)), - "overhead": akey_spec.get("overhead", "user"), - "value_size": 0, "meta_size": 0, "nvme_size": 0} + akey = { + "dup": int(akey_spec.get("count", 1)), + "key": akey_spec.get("value_type"), + "count": 0, + "type": akey_spec.get("type", "hashed"), + "size": int(akey_spec.get("size", 0)), + "overhead": akey_spec.get("overhead", "user"), + "value_size": 0, + "meta_size": 0, + "nvme_size": 0, + } dkey["trees"].append(akey) dkey["count"] += int(akey_spec.get("count", 1)) for value_spec in akey_spec.get("values"): @@ -235,14 +251,11 @@ def init_value(self, cont, akey, value_spec): akey["count"] += value_spec.get("count", 1) # Number of values if value_spec.get("overhead", "user") == "user": - akey["value_size"] += size * \ - value_spec.get("count", 1) # total size + akey["value_size"] += size * value_spec.get("count", 1) # total size else: - akey["meta_size"] += size * \ - value_spec.get("count", 1) # total size + akey["meta_size"] += size * value_spec.get("count", 1) # total size if nvme: - akey["nvme_size"] += size * \ - value_spec.get("count", 1) # total size + akey["nvme_size"] += size * value_spec.get("count", 1) # total size # Add checksum overhead @@ -250,8 +263,7 @@ def init_value(self, cont, akey, value_spec): if akey["key"] == "array": csum_size = int(math.ceil(size / cont["csum_gran"]) * csum_size) - akey["meta_size"] += csum_size * \ - value_spec.get("count", 1) + akey["meta_size"] += csum_size * value_spec.get("count", 1) def load_container(self, cont_spec): """calculate metadata for update(s)""" diff --git a/src/vos/storage_estimator/common/vos_structures.py b/src/vos/storage_estimator/common/vos_structures.py index d877dc73fc0..91931b3ca57 100644 --- a/src/vos/storage_estimator/common/vos_structures.py +++ b/src/vos/storage_estimator/common/vos_structures.py @@ -25,7 +25,7 @@ class StrBool(Enum): NO = "No" -class VosBase(): +class VosBase: def __init__(self, count): self._payload = dict() self.set_count(count) @@ -66,20 +66,13 @@ def _set_aligned(self, aligned): if aligned is None: aligned = StrBool.YES.value elif aligned is not StrBool.YES.value and aligned is not StrBool.NO.value: - raise TypeError( - "aligned parameter must be of type {0}".format( - type(StrBool))) + raise TypeError("aligned parameter must be of type {0}".format(type(StrBool))) self._payload["aligned"] = aligned class VosItems(VosBase): - def __init__( - self, - count=None, - values=None, - values_label=None, - values_type=None): + def __init__(self, count=None, values=None, values_label=None, values_type=None): super().__init__(count) self._values_label = values_label self._values_type = values_type @@ -88,9 +81,7 @@ def __init__( def dump(self): if not bool(self._payload[self._values_label]): - raise VosValueError( - "list of {0} must not be empty".format( - self._values_label)) + raise VosValueError("list of {0} must not be empty".format(self._values_label)) return self._payload def add_value(self, value): @@ -104,22 +95,20 @@ def _add_values(self, values): def _check_value_type(self, value): if not isinstance(value, self._values_type): - raise TypeError( - "item {0} must be of type {1}".format( - value, type( - self._values_type))) + raise TypeError("item {0} must be of type {1}".format(value, type(self._values_type))) class VosKey(VosItems): def __init__( - self, - key=None, - count=None, - key_type=None, - overhead=None, - values=None, - values_label=None, - values_type=None): + self, + key=None, + count=None, + key_type=None, + overhead=None, + values=None, + values_label=None, + values_type=None, + ): super().__init__(count, values, values_label, values_type) self._set_type(key, key_type) self._set_overhead(overhead) @@ -131,9 +120,7 @@ def _set_overhead(self, overhead): elif overhead == Overhead.META.value: self._payload["overhead"] = Overhead.META.value else: - raise TypeError( - "overhead parameter must be of type {0}".format( - type(Overhead))) + raise TypeError("overhead parameter must be of type {0}".format(type(Overhead))) def _add_key_size(self, key): if key: @@ -150,20 +137,11 @@ def _set_type(self, key, key_type): elif key_type == KeyType.INTEGER.value: self._payload["type"] = KeyType.INTEGER.value else: - raise TypeError( - "key_type parameter must be of type {0}".format( - type(KeyType))) + raise TypeError("key_type parameter must be of type {0}".format(type(KeyType))) class AKey(VosKey): - def __init__( - self, - key=None, - count=1, - key_type=None, - overhead=None, - value_type=None, - values=[]): + def __init__(self, key=None, count=1, key_type=None, overhead=None, value_type=None, values=[]): super().__init__( key=key, count=count, @@ -171,7 +149,8 @@ def __init__( overhead=overhead, values=values, values_label="values", - values_type=VosValue) + values_type=VosValue, + ) self._set_value_type(value_type) def _set_value_type(self, value_type): @@ -181,27 +160,12 @@ def _set_value_type(self, value_type): elif value_type == ValType.ARRAY.value or value_type == ValType.SINGLE.value: self._payload["value_type"] = value_type else: - raise TypeError( - "value_type parameter must be of type {0}".format( - type(ValType))) + raise TypeError("value_type parameter must be of type {0}".format(type(ValType))) class DKey(VosKey): - def __init__( - self, - key=None, - count=1, - key_type=None, - overhead=None, - akeys=[]): - super().__init__( - key, - count, - key_type, - overhead, - akeys, - "akeys", - AKey) + def __init__(self, key=None, count=1, key_type=None, overhead=None, akeys=[]): + super().__init__(key, count, key_type, overhead, akeys, "akeys", AKey) class VosObject(VosItems): @@ -235,10 +199,8 @@ def set_csum_gran(self, csum_gran): class Containers(VosItems): def __init__(self, num_shards=1000, containers=[]): super().__init__( - count=None, - values=containers, - values_label="containers", - values_type=Container) + count=None, values=containers, values_label="containers", values_type=Container + ) self.set_num_shards(num_shards) def dump(self): diff --git a/src/vos/storage_estimator/daos_storage_estimator.py b/src/vos/storage_estimator/daos_storage_estimator.py index 6e68df36f63..e6ebda604a8 100755 --- a/src/vos/storage_estimator/daos_storage_estimator.py +++ b/src/vos/storage_estimator/daos_storage_estimator.py @@ -108,15 +108,14 @@ def process_csv(args): sys.exit(-1) -class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, - argparse.RawTextHelpFormatter): +class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter): """Just to get proper help output""" + pass # create the top-level parser -parser = argparse.ArgumentParser(description=tool_description, - formatter_class=MyFormatter) +parser = argparse.ArgumentParser(description=tool_description, formatter_class=MyFormatter) subparsers = parser.add_subparsers(description='valid subcommands') example_description = ''' @@ -130,269 +129,189 @@ class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, 'create_example', help='Create a YAML example of the DFS layout', description=example_description, - formatter_class=MyFormatter) -example.add_argument('-a', '--alloc_overhead', type=int, - help='Vos alloc overhead', default=0) + formatter_class=MyFormatter, +) +example.add_argument('-a', '--alloc_overhead', type=int, help='Vos alloc overhead', default=0) example.add_argument( '-f', '--dfs_file_name', type=str, help='Output file name of the DFS example', - default='vos_dfs_sample.yaml') + default='vos_dfs_sample.yaml', +) example.add_argument( '-m', '--meta_out', type=str, help='Output file name of the Vos Metadata', - default='vos_size.yaml') -example.add_argument( - '-v', - '--verbose', - action='store_true', - help='Explain what is being done') + default='vos_size.yaml', +) +example.add_argument('-v', '--verbose', action='store_true', help='Explain what is being done') example.add_argument( - '-S', - '--storage', - dest='vospath', - type=str, - help='DAOS storage path', - default=vos_path_default) + '-S', '--storage', dest='vospath', type=str, help='DAOS storage path', default=vos_path_default +) example.set_defaults(func=create_dfs_example) # read the file system explore = subparsers.add_parser( - 'explore_fs', help='Estimate the VOS overhead from a given tree directory', - formatter_class=MyFormatter) -explore.add_argument( - 'path', - type=str, - nargs=1, - help='Path to the target directory', - default=None) -explore.add_argument( - '-v', - '--verbose', - action='store_true', - help='Explain what is being done') + 'explore_fs', + help='Estimate the VOS overhead from a given tree directory', + formatter_class=MyFormatter, +) +explore.add_argument('path', type=str, nargs=1, help='Path to the target directory', default=None) +explore.add_argument('-v', '--verbose', action='store_true', help='Explain what is being done') explore.add_argument( '-t', '--dir_oclass', type=str, help='Predefined object classes. It describes schema of data distribution & protection ' - + 'for directories.', - default='S1') + + 'for directories.', + default='S1', +) explore.add_argument( '-r', '--file_oclass', type=str, help='Predefined object classes. It describes schema of data distribution & protection for ' - + 'files.', - default='SX') + + 'files.', + default='SX', +) explore.add_argument( - '-x', - '--average', - action='store_true', - help='Use average file size for estimation. (Faster)') -explore.add_argument( - '-i', - '--io_size', - type=str, - help='I/O size.', - default='1MiB') + '-x', '--average', action='store_true', help='Use average file size for estimation. (Faster)' +) +explore.add_argument('-i', '--io_size', type=str, help='I/O size.', default='1MiB') explore.add_argument( '-c', '--chunk_size', type=str, help='Array chunk size/stripe size for regular files.', - default='1MiB') -explore.add_argument( - '-e', - '--ec_cell_size', - type=str, - help='EC cell size', - default='64KiB') + default='1MiB', +) +explore.add_argument('-e', '--ec_cell_size', type=str, help='EC cell size', default='64KiB') explore.add_argument( - '-A', - '--assume_aggregation', - action='store_true', - help='Assume aggregation', - default=False) + '-A', '--assume_aggregation', action='store_true', help='Assume aggregation', default=False +) explore.add_argument( '-s', '--scm_cutoff', type=str, help='SCM threshold in bytes, optional suffixes KiB, MiB, ..., YiB', - default='4KiB') -explore.add_argument( - '-n', - '--num_shards', - type=int, - help='Number of VOS Pools', - default=1000) -explore.add_argument('-a', '--alloc_overhead', type=int, - help='Vos alloc overhead', default=0) + default='4KiB', +) +explore.add_argument('-n', '--num_shards', type=int, help='Number of VOS Pools', default=1000) +explore.add_argument('-a', '--alloc_overhead', type=int, help='Vos alloc overhead', default=0) explore.add_argument( '-k', '--checksum', type=str, help='[optional] Checksum algorithm to be used crc16, crc32, crc64, sha1, sha256, sha512', - default=None) + default=None, +) explore.add_argument( - '-m', - '--meta', - metavar='META', - help='[optional] Input metadata file', - default=None) + '-m', '--meta', metavar='META', help='[optional] Input metadata file', default=None +) explore.add_argument( - '-o', - '--output', - dest='output', - type=str, - help='Output file name', - default=None) + '-o', '--output', dest='output', type=str, help='Output file name', default=None +) explore.add_argument( - '-S', - '--storage', - dest='vospath', - type=str, - help='DAOS storage path', - default=vos_path_default) + '-S', '--storage', dest='vospath', type=str, help='DAOS storage path', default=vos_path_default +) explore.set_defaults(func=process_fs) # parse a yaml file yaml_file = subparsers.add_parser( - 'read_yaml', help='Estimate the VOS overhead from a given YAML file', - formatter_class=MyFormatter) + 'read_yaml', + help='Estimate the VOS overhead from a given YAML file', + formatter_class=MyFormatter, +) +yaml_file.add_argument('-v', '--verbose', action='store_true', help='Explain what is being done') yaml_file.add_argument( - '-v', - '--verbose', - action='store_true', - help='Explain what is being done') -yaml_file.add_argument('config', metavar='CONFIG', type=str, nargs=1, - help='Path to the input yaml configuration file') -yaml_file.add_argument('-a', '--alloc_overhead', type=int, - help='Vos alloc overhead', default=0) + 'config', metavar='CONFIG', type=str, nargs=1, help='Path to the input yaml configuration file' +) +yaml_file.add_argument('-a', '--alloc_overhead', type=int, help='Vos alloc overhead', default=0) yaml_file.add_argument( '-s', '--scm_cutoff', type=str, help='SCM threshold in bytes, optional suffixes KiB, MiB, ..., YiB', - default='4KiB') + default='4KiB', +) yaml_file.add_argument( - '-m', - '--meta', - metavar='META', - help='[optional] Input metadata file', - default=None) + '-m', '--meta', metavar='META', help='[optional] Input metadata file', default=None +) yaml_file.add_argument( - '-S', - '--storage', - dest='vospath', - type=str, - help='DAOS storage path', - default=vos_path_default) + '-S', '--storage', dest='vospath', type=str, help='DAOS storage path', default=vos_path_default +) yaml_file.set_defaults(func=process_yaml) # parse a csv file csv_file = subparsers.add_parser( - 'read_csv', help='Estimate the VOS overhead from a given CSV file', - formatter_class=MyFormatter) + 'read_csv', help='Estimate the VOS overhead from a given CSV file', formatter_class=MyFormatter +) csv_file.add_argument( - 'csv', - metavar='CSV', - type=str, - nargs=1, - help='Input CSV file (assumes Argonne format)') -csv_file.add_argument( - '--file_name_size', - type=int, - dest='file_name_size', - help='Average file name length', - default=32) + 'csv', metavar='CSV', type=str, nargs=1, help='Input CSV file (assumes Argonne format)' +) csv_file.add_argument( - '-i', - '--io_size', - type=str, - help='I/O size.', - default='1MiB') + '--file_name_size', type=int, dest='file_name_size', help='Average file name length', default=32 +) +csv_file.add_argument('-i', '--io_size', type=str, help='I/O size.', default='1MiB') csv_file.add_argument( '--chunk_size', dest='chunk_size', type=str, help='Array chunk size/stripe size for regular files. Must be multiple of I/O size', - default='1MiB') + default='1MiB', +) +csv_file.add_argument('-e', '--ec_cell_size', type=str, help='EC cell size', default='64KiB') csv_file.add_argument( - '-e', - '--ec_cell_size', - type=str, - help='EC cell size', - default='64KiB') -csv_file.add_argument( - '-A', - '--assume_aggregation', - action='store_true', - help='Assume aggregation', - default=False) + '-A', '--assume_aggregation', action='store_true', help='Assume aggregation', default=False +) csv_file.add_argument( '-s', '--scm_cutoff', type=str, help='SCM threshold in bytes, optional suffixes KiB, MiB, ..., YiB', - default='4KiB') + default='4KiB', +) csv_file.add_argument( - '--num_shards', - dest='num_shards', - type=int, - help='Number of vos pools', - default=1000) + '--num_shards', dest='num_shards', type=int, help='Number of vos pools', default=1000 +) csv_file.add_argument( '-k', '--checksum', type=str, help='[optional] Checksum algorithm to be used crc16, crc32, crc64, sha1, sha256, sha512', - default=None) -csv_file.add_argument( - '-v', - '--verbose', - action='store_true', - help='Explain what is being done') -csv_file.add_argument('-a', '--alloc_overhead', type=int, - help='Vos alloc overhead', default=0) + default=None, +) +csv_file.add_argument('-v', '--verbose', action='store_true', help='Explain what is being done') +csv_file.add_argument('-a', '--alloc_overhead', type=int, help='Vos alloc overhead', default=0) csv_file.add_argument( '-t', '--dir_oclass', type=str, help='Predefined object classes. It describes schema of data distribution & protection for ' - + 'directories.', - default='S1') + + 'directories.', + default='S1', +) csv_file.add_argument( '-r', '--file_oclass', type=str, help='Predefined object classes. It describes schema of data distribution & protection for ' - + 'files.', - default='SX') + + 'files.', + default='SX', +) csv_file.add_argument( - '-m', - '--meta', - metavar='META', - help='[optional] Input metadata file', - default=None) + '-m', '--meta', metavar='META', help='[optional] Input metadata file', default=None +) csv_file.add_argument( - '-o', '--output', - dest='output', - type=str, - help='Output file name', - default=None) + '-o', '--output', dest='output', type=str, help='Output file name', default=None +) csv_file.add_argument( - '-S', - '--storage', - dest='vospath', - type=str, - help='DAOS storage path', - default=vos_path_default) + '-S', '--storage', dest='vospath', type=str, help='DAOS storage path', default=vos_path_default +) csv_file.set_defaults(func=process_csv) # parse the args and call whatever function was selected diff --git a/src/vos/tests/SConscript b/src/vos/tests/SConscript index 23612518322..22d1c9dbd6a 100644 --- a/src/vos/tests/SConscript +++ b/src/vos/tests/SConscript @@ -5,9 +5,20 @@ def scons(): """Execute build""" Import('denv', 'utest_utils', 'conf_dir', 'cmd_parser') - libraries = ['vos', 'bio', 'abt', 'pthread', 'daos_common_pmem', - 'daos_tests', 'gurt', 'uuid', 'pthread', - 'pmemobj', 'cmocka', 'gomp'] + libraries = [ + 'vos', + 'bio', + 'abt', + 'pthread', + 'daos_common_pmem', + 'daos_tests', + 'gurt', + 'uuid', + 'pthread', + 'pmemobj', + 'cmocka', + 'gomp', + ] tenv = denv.Clone() tenv.Append(CPPPATH=[Dir('..').srcnode()]) @@ -18,11 +29,26 @@ def scons(): tenv.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) tenv.Append(OBJPREFIX="b_") - vos_test_src = ['vos_tests.c', 'vts_io.c', 'vts_pool.c', 'vts_container.c', - tenv.Object("vts_common.c"), 'vts_aggregate.c', 'vts_dtx.c', - 'vts_gc.c', 'vts_checksum.c', 'vts_ilog.c', 'vts_array.c', - 'vts_pm.c', 'vts_ts.c', 'vts_mvcc.c', 'vos_cmd.c', 'vts_wal.c', - '../../object/srv_csum.c', '../../object/srv_io_map.c'] + vos_test_src = [ + 'vos_tests.c', + 'vts_io.c', + 'vts_pool.c', + 'vts_container.c', + tenv.Object("vts_common.c"), + 'vts_aggregate.c', + 'vts_dtx.c', + 'vts_gc.c', + 'vts_checksum.c', + 'vts_ilog.c', + 'vts_array.c', + 'vts_pm.c', + 'vts_ts.c', + 'vts_mvcc.c', + 'vos_cmd.c', + 'vts_wal.c', + '../../object/srv_csum.c', + '../../object/srv_io_map.c', + ] vos_tests = tenv.d_program('vos_tests', vos_test_src, LIBS=libraries) tenv.AppendUnique(CPPPATH=[Dir('../../common/tests').srcnode()]) evt_ctl = tenv.d_program('evt_ctl', ['evt_ctl.c', utest_utils, cmd_parser], LIBS=libraries) @@ -33,11 +59,23 @@ def scons(): unit_env = tenv.Clone() unit_env.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) - libraries = ['daos_common_pmem', 'daos_tests', 'gurt', 'cart', 'cmocka', - 'vos', 'uuid', 'pmem', 'pmemobj', 'bio', 'pthread', 'abt'] - unit_env.d_test_program('pool_scrubbing_tests', ['pool_scrubbing_tests.c', - '../vos_pool_scrub.c'], - LIBS=libraries) + libraries = [ + 'daos_common_pmem', + 'daos_tests', + 'gurt', + 'cart', + 'cmocka', + 'vos', + 'uuid', + 'pmem', + 'pmemobj', + 'bio', + 'pthread', + 'abt', + ] + unit_env.d_test_program( + 'pool_scrubbing_tests', ['pool_scrubbing_tests.c', '../vos_pool_scrub.c'], LIBS=libraries + ) tenv = denv.Clone() tenv.AppendUnique(RPATH_FULL=['$PREFIX/lib64/daos_srv']) diff --git a/src/vos/tests/evt_stress.py b/src/vos/tests/evt_stress.py index ed05f61cc3f..adc0ed22595 100755 --- a/src/vos/tests/evt_stress.py +++ b/src/vos/tests/evt_stress.py @@ -6,8 +6,9 @@ from os.path import join -class EVTStress(): +class EVTStress: """Helper class for running the test""" + def __init__(self): parser = argparse.ArgumentParser(description='Run evt_ctl with pattern from DAOS-11894') parser.add_argument('--algo', default='dist', choices=['dist', 'dist_even', 'soff']) diff --git a/utils/ansible/ftest/library/daos_hugepages.py b/utils/ansible/ftest/library/daos_hugepages.py index 35d192b3035..529abd1c4f2 100644 --- a/utils/ansible/ftest/library/daos_hugepages.py +++ b/utils/ansible/ftest/library/daos_hugepages.py @@ -23,11 +23,7 @@ """ -ANSIBLE_METADATA = { - 'metadata_version': '0.1', - 'status': ['preview'], - 'supported_by': 'Intel' -} +ANSIBLE_METADATA = {'metadata_version': '0.1', 'status': ['preview'], 'supported_by': 'Intel'} DOCUMENTATION = ''' @@ -97,7 +93,8 @@ def main(): if not os.path.isfile(r"/sys/kernel/mm/transparent_hugepage/enabled"): module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg="Huge Pages not activated in kernel") + msg="Huge Pages not activated in kernel", + ) if not is_huge_pages_enabled(): try: @@ -106,19 +103,26 @@ def main(): except Exception as error: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Huge Pages could not be enabled: {error}") + msg=f"Huge Pages could not be enabled: {error}", + ) if not is_huge_pages_enabled(): module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg="Huge Pages could not be enabled") + msg="Huge Pages could not be enabled", + ) result = subprocess.run( [r'sysctl', r'vm.nr_hugepages'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=3, check=False) + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + timeout=3, + check=False, + ) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Size of Huge Pages could not be read: {result.stderr.decode('ascii')}") + msg=f"Size of Huge Pages could not be read: {result.stderr.decode('ascii')}", + ) hugepages_current_size = 0 stdout_str = result.stdout.decode('ascii') @@ -126,23 +130,30 @@ def main(): if match is None: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Invalid size of huge pages from sysctl: {stdout_str}") + msg=f"Invalid size of huge pages from sysctl: {stdout_str}", + ) hugepages_current_size = int(match.groupdict()['size']) if hugepages_size != hugepages_current_size: if check_mode: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Invalid size of huge pages: {hugepages_current_size}") + msg=f"Invalid size of huge pages: {hugepages_current_size}", + ) result = subprocess.run( [r'sysctl', f"vm.nr_hugepages={hugepages_size}"], - stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, timeout=3, check=False) + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + timeout=3, + check=False, + ) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, msg="Size of Huge Pages could not be dynamically set: " - f"{result.stderr.decode('ascii')}") + f"{result.stderr.decode('ascii')}", + ) try: with open(r"/etc/sysctl.d/50-hugepages.conf", "w", encoding="utf8") as fd: @@ -150,19 +161,19 @@ def main(): except Exception as error: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Setup of Huge Pages size at boot could not be defined: {error}") + msg=f"Setup of Huge Pages size at boot could not be defined: {error}", + ) result = subprocess.run([r'sysctl', '-p'], stderr=subprocess.PIPE, timeout=3, check=False) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, msg="Setup of Huge Pages size at boot could not be applied: " - f"{result.stderr.decode('ascii')}") + f"{result.stderr.decode('ascii')}", + ) - module.exit_json(changed=True, - elapsed=(datetime.datetime.utcnow() - start_time).seconds) + module.exit_json(changed=True, elapsed=(datetime.datetime.utcnow() - start_time).seconds) - module.exit_json(changed=False, - elapsed=(datetime.datetime.utcnow() - start_time).seconds) + module.exit_json(changed=False, elapsed=(datetime.datetime.utcnow() - start_time).seconds) if __name__ == '__main__': diff --git a/utils/certs/SConscript b/utils/certs/SConscript index 446a2059a5a..4e9eeb1863c 100644 --- a/utils/certs/SConscript +++ b/utils/certs/SConscript @@ -5,10 +5,10 @@ def scons(): """Execute build""" Import('env') - env.Install("$PREFIX/lib64/daos/certgen", ['admin.cnf', - 'agent.cnf', - 'server.cnf', - 'gen_certificates.sh']) + env.Install( + "$PREFIX/lib64/daos/certgen", + ['admin.cnf', 'agent.cnf', 'server.cnf', 'gen_certificates.sh'], + ) if __name__ == "SCons.Script": diff --git a/utils/cq/d_logging_check.py b/utils/cq/d_logging_check.py index 134e204cb7c..1dc6852c93a 100755 --- a/utils/cq/d_logging_check.py +++ b/utils/cq/d_logging_check.py @@ -21,7 +21,7 @@ ARGS = None -class FileLine(): +class FileLine: """One line from a file""" def __init__(self, file_object, line, lineno): @@ -124,15 +124,47 @@ def __next__(self): # Logging macros where the new-line is added if missing. -PREFIXES = ['D_ERROR', 'D_WARN', 'D_INFO', 'D_NOTE', 'D_ALERT', 'D_CRIT', 'D_FATAT', 'D_EMIT', - 'D_TRACE_INFO', 'D_TRACE_NOTE', 'D_TRACE_WARN', 'D_TRACE_ERROR', 'D_TRACE_ALERT', - 'D_TRACE_CRIT', 'D_TRACE_FATAL', 'D_TRACE_EMIT', 'RPC_TRACE', 'RPC_ERROR', - 'VOS_TX_LOG_FAIL', 'VOS_TX_TRACE_FAIL', 'D_DEBUG', 'D_CDEBUG', 'IV_DBG'] +PREFIXES = [ + 'D_ERROR', + 'D_WARN', + 'D_INFO', + 'D_NOTE', + 'D_ALERT', + 'D_CRIT', + 'D_FATAT', + 'D_EMIT', + 'D_TRACE_INFO', + 'D_TRACE_NOTE', + 'D_TRACE_WARN', + 'D_TRACE_ERROR', + 'D_TRACE_ALERT', + 'D_TRACE_CRIT', + 'D_TRACE_FATAL', + 'D_TRACE_EMIT', + 'RPC_TRACE', + 'RPC_ERROR', + 'VOS_TX_LOG_FAIL', + 'VOS_TX_TRACE_FAIL', + 'D_DEBUG', + 'D_CDEBUG', + 'IV_DBG', +] # Logging macros where a new-line is always added. -PREFIXES_NNL = ['DFUSE_LOG_WARNING', 'DFUSE_LOG_ERROR', 'DFUSE_LOG_DEBUG', 'DFUSE_LOG_INFO', - 'DFUSE_TRA_WARNING', 'DFUSE_TRA_ERROR', 'DFUSE_TRA_DEBUG', 'DFUSE_TRA_INFO', - 'DH_PERROR_SYS', 'DH_PERROR_DER', 'DL_CDEBUG', 'PRINT_ERROR'] +PREFIXES_NNL = [ + 'DFUSE_LOG_WARNING', + 'DFUSE_LOG_ERROR', + 'DFUSE_LOG_DEBUG', + 'DFUSE_LOG_INFO', + 'DFUSE_TRA_WARNING', + 'DFUSE_TRA_ERROR', + 'DFUSE_TRA_DEBUG', + 'DFUSE_TRA_INFO', + 'DH_PERROR_SYS', + 'DH_PERROR_DER', + 'DL_CDEBUG', + 'PRINT_ERROR', +] for prefix in ['DL', 'DHL', 'DS', 'DHS']: for suffix in ['ERROR', 'WARN', 'INFO']: @@ -142,7 +174,7 @@ def __next__(self): PREFIXES_ALL.extend(PREFIXES_NNL) -class AllChecks(): +class AllChecks: """All the checks in one class""" def __init__(self, file_object): @@ -319,7 +351,7 @@ def check_df_rc(self, line): if any(map(msg.endswith, [' ', '=', '.', ',', ':', ';'])): msg = msg[:-1] if msg.endswith(var_name): - msg = msg[:-len(var_name)] + msg = msg[: -len(var_name)] if msg.endswith('rc'): msg = msg[:-2] diff --git a/utils/cq/daos_pylint.py b/utils/cq/daos_pylint.py index 22e1fbc997c..05308e805f7 100755 --- a/utils/cq/daos_pylint.py +++ b/utils/cq/daos_pylint.py @@ -18,11 +18,15 @@ from pylint.lint import Run from pylint.reporters.collecting_reporter import CollectingReporter except ImportError: - if os.path.exists('venv'): - sys.path.append(os.path.join('venv', 'lib', - f'python{sys.version_info.major}.{sys.version_info.minor}', - 'site-packages')) + sys.path.append( + os.path.join( + 'venv', + 'lib', + f'python{sys.version_info.major}.{sys.version_info.minor}', + 'site-packages', + ) + ) try: from pylint.constants import full_version from pylint.lint import Run @@ -63,7 +67,7 @@ # also be enabled shortly however we have a number to correct or resolve before enabling. -class WrapScript(): +class WrapScript: """Create a wrapper for a scons file and maintain a line mapping An update here is needed as files in site_scons/*.py do not automatically import SCons but @@ -71,7 +75,6 @@ class WrapScript(): """ def __init__(self, fname, from_stdin): - self.line_map = {} # pylint: disable-next=consider-using-with self._outfile = tempfile.NamedTemporaryFile(mode='w+', prefix='daos_pylint_') @@ -159,7 +162,8 @@ def write_variables(outfile, prefix, variables): if variable.upper() == 'PREREQS': newlines += 1 outfile.write( - f'{prefix}{variable} = PreReqComponent(DefaultEnvironment(), Variables())\n') + f'{prefix}{variable} = PreReqComponent(DefaultEnvironment(), Variables())\n' + ) elif "ENV" in variable.upper(): newlines += 1 outfile.write(f'{prefix}{variable} = DefaultEnvironment()\n') @@ -184,11 +188,13 @@ def write_header(outfile): # Always import PreReqComponent here, but it'll only be used in some cases. This causes # errors in the toplevel SConstruct which are suppressed, the alternative would be to do # two passes and only add the include if needed later. - outfile.write("""# pylint: disable-next=unused-wildcard-import,wildcard-import + outfile.write( + """# pylint: disable-next=unused-wildcard-import,wildcard-import from SCons.Script import * # pylint: disable=import-outside-toplevel # pylint: disable-next=import-outside-toplevel,unused-wildcard-import,wildcard-import from SCons.Variables import * -from prereq_tools import PreReqComponent # pylint: disable=unused-import\n""") +from prereq_tools import PreReqComponent # pylint: disable=unused-import\n""" + ) return 5 def convert_line(self, line): @@ -196,7 +202,7 @@ def convert_line(self, line): return self.line_map[line] -class FileTypeList(): +class FileTypeList: """Class for sorting files Consumes a list of file/module names and sorts them into categories so that later on each @@ -213,8 +219,9 @@ def __init__(self): def file_count(self): """Return the number of files to be checked""" - return len(self.ftest_files) + len(self.scons_files) \ - + len(self.files) + len(self.fake_scons) + return ( + len(self.ftest_files) + len(self.scons_files) + len(self.files) + len(self.fake_scons) + ) def add(self, file, force=False): """Add a filename to the correct list""" @@ -340,13 +347,15 @@ def word_is_allowed(word, code): def parse_msg(msg): # Convert from a pylint message into a dict that can be using for printing. - vals = {'category': msg.category, - 'column': msg.column, - 'message-id': msg.msg_id, - 'message': msg.msg, - 'symbol': msg.symbol, - 'msg': msg.msg, - 'msg_id': msg.msg_id} + vals = { + 'category': msg.category, + 'column': msg.column, + 'message-id': msg.msg_id, + 'message': msg.msg, + 'symbol': msg.symbol, + 'msg': msg.msg, + 'msg_id': msg.msg_id, + } if wrapper: vals['path'] = target_file @@ -358,8 +367,9 @@ def parse_msg(msg): def msg_to_github(vals): # pylint: disable-next=consider-using-f-string - print('::{category} file={path},line={line},col={column},::{symbol}, {msg}'.format( - **vals)) + print( + '::{category} file={path},line={line},col={column},::{symbol}, {msg}'.format(**vals) + ) failed = False rep = CollectingReporter() @@ -437,14 +447,16 @@ def msg_to_github(vals): symbols[msg.symbol] += 1 if args.output_format == 'json': - report = {'type': vals['category'], - 'path': msg.path, - 'module': msg.module, - 'line': vals['line'], - 'column': vals['column'], - 'symbol': vals['symbol'], - 'message': vals['message'], - 'message-id': vals['message-id']} + report = { + 'type': vals['category'], + 'path': msg.path, + 'module': msg.module, + 'line': vals['line'], + 'column': vals['column'], + 'symbol': vals['symbol'], + 'message': vals['message'], + 'message-id': vals['message-id'], + } if msg.obj: report['obj'] = msg.obj @@ -474,10 +486,10 @@ def msg_to_github(vals): if not types or args.reports == 'n': return failed - for (mtype, count) in types.most_common(): + for mtype, count in types.most_common(): print(f'{mtype}:{count}') - for (mtype, count) in symbols.most_common(): + for mtype, count in symbols.most_common(): print(f'{mtype}:{count}') return failed @@ -519,8 +531,9 @@ def main(): rcfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pylintrc') - parser.add_argument('--msg-template', - default='{path}:{line}:{column}: {message-id}: {message} ({symbol})') + parser.add_argument( + '--msg-template', default='{path}:{line}:{column}: {message-id}: {message} ({symbol})' + ) parser.add_argument('--reports', choices=['y', 'n'], default='y') parser.add_argument('--output-format', choices=['text', 'json', 'github'], default='text') parser.add_argument('--rcfile', default=rcfile) diff --git a/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py b/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py index 34a30c55c6e..0d9ffba3d27 100755 --- a/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py +++ b/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py @@ -12,10 +12,9 @@ def rebasing(): """Determines if the current operation is a rebase""" - with subprocess.Popen(["git", "branch"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) as process: - + with subprocess.Popen( + ["git", "branch"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) as process: stdout = process.communicate()[0].decode() return stdout.split('\n', maxsplit=1)[0].startswith("* (no branch, rebasing") @@ -35,8 +34,10 @@ def submodule_check(modname, msg_file): with open(msg_file, 'r', encoding='utf-8') as file: lines = file.readlines() - message = f'# WARNING *** This patch modifies the {modname} reference. ' \ - 'Are you sure this is intended? *** WARNING' + message = ( + f'# WARNING *** This patch modifies the {modname} reference. ' + 'Are you sure this is intended? *** WARNING' + ) if lines[0] != message: lines = [message, "\n", "\n"] + lines @@ -47,12 +48,12 @@ def submodule_check(modname, msg_file): def main(msg_file): """main""" - for line in subprocess.check_output(['git', 'submodule', - 'status']).decode().rstrip().split('\n'): + for line in ( + subprocess.check_output(['git', 'submodule', 'status']).decode().rstrip().split('\n') + ): if line: submodule_check(line[1:].split(' ')[1], msg_file) if __name__ == '__main__': - main(sys.argv[1]) diff --git a/utils/node_local_test.py b/utils/node_local_test.py index 770c30dc464..2d15ae5820c 100755 --- a/utils/node_local_test.py +++ b/utils/node_local_test.py @@ -79,11 +79,10 @@ def umount(path, background=False): return ret.returncode -class NLTConf(): +class NLTConf: """Helper class for configuration""" def __init__(self, json_file, args): - with open(json_file, 'r') as ofh: self._bc = json.load(ofh) self.agent_dir = None @@ -93,8 +92,7 @@ def __init__(self, json_file, args): self.valgrind_errors = False self.log_timer = CulmTimer() self.compress_timer = CulmTimer() - self.dfuse_parent_dir = tempfile.mkdtemp(dir=args.dfuse_dir, - prefix='dnt_dfuse_') + self.dfuse_parent_dir = tempfile.mkdtemp(dir=args.dfuse_dir, prefix='dnt_dfuse_') self.tmp_dir = None if args.class_name: self.tmp_dir = join('nlt_logs', args.class_name) @@ -123,10 +121,10 @@ def set_args(self, args): size = args.max_log_size if size.endswith('MiB'): size = int(size[:-3]) - size *= (1024 * 1024) + size *= 1024 * 1024 elif size.endswith('GiB'): size = int(size[:-3]) - size *= (1024 * 1024 * 1024) + size *= 1024 * 1024 * 1024 self.max_log_size = int(size) def __getitem__(self, key): @@ -151,7 +149,7 @@ def flush_bz2(self): self.compress_timer.stop() -class CulmTimer(): +class CulmTimer: """Class to keep track of elapsed time so we know where to focus performance tuning""" def __init__(self): @@ -167,7 +165,7 @@ def stop(self): self.total += time.perf_counter() - self._start -class BoolRatchet(): +class BoolRatchet: """Used for saving test results""" # Any call to fail() of add_result with a True value will result @@ -186,7 +184,7 @@ def add_result(self, result): self.fail() -class WarningsFactory(): +class WarningsFactory: """Class to parse warnings, and save to JSON output file Take a list of failures, and output the data in a way that is best @@ -196,13 +194,9 @@ class WarningsFactory(): # Error levels supported by the reporting are LOW, NORMAL, HIGH, ERROR. - def __init__(self, - filename, - junit=False, - class_id=None, - post=False, - post_error=False, - check=None): + def __init__( + self, filename, junit=False, class_id=None, post=False, post_error=False, check=None + ): # pylint: disable=consider-using-with self._fd = open(filename, 'w') self.filename = filename @@ -226,8 +220,9 @@ def __init__(self, tc_startup = junit_xml.TestCase('Startup', classname=self._class_name('core')) tc_sanity = junit_xml.TestCase('Sanity', classname=self._class_name('core')) tc_sanity.add_error_info('NLT exited abnormally') - self.test_suite = junit_xml.TestSuite('Node Local Testing', - test_cases=[tc_startup, tc_sanity]) + self.test_suite = junit_xml.TestSuite( + 'Node Local Testing', test_cases=[tc_startup, tc_sanity] + ) self._write_test_file() else: self.test_suite = None @@ -259,8 +254,16 @@ def __del__(self): self.test_suite = None self.close() - def add_test_case(self, name, failure=None, test_class='core', output=None, duration=None, - stdout=None, stderr=None): + def add_test_case( + self, + name, + failure=None, + test_class='core', + output=None, + duration=None, + stdout=None, + stderr=None, + ): """Add a test case to the results class and other metadata will be set automatically, @@ -270,8 +273,13 @@ class and other metadata will be set automatically, if not self.test_suite: return - test_case = junit_xml.TestCase(name, classname=self._class_name(test_class), - elapsed_sec=duration, stdout=stdout, stderr=stderr) + test_case = junit_xml.TestCase( + name, + classname=self._class_name(test_class), + elapsed_sec=duration, + stdout=stdout, + stderr=stderr, + ) if failure: test_case.add_failure_info(failure, output=output) self.test_suite.test_cases.append(test_case) @@ -304,7 +312,7 @@ def explain(self, line, log_file, esignal): if count == 0: return - for (sline, smessage) in self.pending: + for sline, smessage in self.pending: locs.add(f'{sline.filename}:{sline.lineno}') symptoms.add(smessage) @@ -428,7 +436,7 @@ def get_base_env(clean=False): return env -class DaosPool(): +class DaosPool: """Class to store data about daos pools""" def __init__(self, server, pool_uuid, label): @@ -471,7 +479,7 @@ def fetch_containers(self): return containers -class DaosCont(): +class DaosCont: """Class to store data about daos containers""" def __init__(self, cont_uuid, label, pool): @@ -514,15 +522,17 @@ def destroy(self, valgrind=True, log_check=True): Raises: NLTestFail: If Pool was not provided when object created. """ - destroy_container(self.pool.conf, self.pool.id(), self.id(), - valgrind=valgrind, log_check=log_check) + destroy_container( + self.pool.conf, self.pool.id(), self.id(), valgrind=valgrind, log_check=log_check + ) -class DaosServer(): +class DaosServer: """Manage a DAOS server instance""" - def __init__(self, conf, test_class=None, valgrind=False, wf=None, fatal_errors=None, - enable_fi=False): + def __init__( + self, conf, test_class=None, valgrind=False, wf=None, fatal_errors=None, enable_fi=False + ): self.running = False self._file = __file__.lstrip('./') self._sp = None @@ -543,25 +553,23 @@ def __init__(self, conf, test_class=None, valgrind=False, wf=None, fatal_errors= self.engines = conf.args.engine_count self.sys_ram_rsvd = conf.args.system_ram_reserved # pylint: disable=consider-using-with - self.control_log = tempfile.NamedTemporaryFile(prefix='dnt_control_', - suffix='.log', - dir=conf.tmp_dir, - delete=False) - self.helper_log = tempfile.NamedTemporaryFile(prefix='dnt_helper_', - suffix='.log', - dir=conf.tmp_dir, - delete=False) - self.agent_log = tempfile.NamedTemporaryFile(prefix='dnt_agent_', - suffix='.log', - dir=conf.tmp_dir, - delete=False) + self.control_log = tempfile.NamedTemporaryFile( + prefix='dnt_control_', suffix='.log', dir=conf.tmp_dir, delete=False + ) + self.helper_log = tempfile.NamedTemporaryFile( + prefix='dnt_helper_', suffix='.log', dir=conf.tmp_dir, delete=False + ) + self.agent_log = tempfile.NamedTemporaryFile( + prefix='dnt_agent_', suffix='.log', dir=conf.tmp_dir, delete=False + ) self.server_logs = [] for engine in range(self.engines): prefix = f'dnt_server_{self._test_class}_{engine}_' - self.server_logs.append(tempfile.NamedTemporaryFile(prefix=prefix, - suffix='.log', - dir=conf.tmp_dir, - delete=False)) + self.server_logs.append( + tempfile.NamedTemporaryFile( + prefix=prefix, suffix='.log', dir=conf.tmp_dir, delete=False + ) + ) self.__process_name = 'daos_engine' if self.valgrind: self.__process_name = 'memcheck-amd64-' @@ -627,8 +635,9 @@ def _add_test_case(self, name, failure=None, duration=None): if not self._test_class: return - self.conf.wf.add_test_case(name, failure=failure, duration=duration, - test_class=self._test_class) + self.conf.wf.add_test_case( + name, failure=failure, duration=duration, test_class=self._test_class + ) def _check_timing(self, name, start, max_time): elapsed = time.perf_counter() - start @@ -671,13 +680,15 @@ def start(self): plain_env = os.environ.copy() if self.valgrind: - valgrind_args = ['--fair-sched=yes', - '--gen-suppressions=all', - '--xml=yes', - '--xml-file=dnt.server.%p.memcheck.xml', - '--num-callers=10', - '--track-origins=yes', - '--leak-check=full'] + valgrind_args = [ + '--fair-sched=yes', + '--gen-suppressions=all', + '--xml=yes', + '--xml-file=dnt.server.%p.memcheck.xml', + '--num-callers=10', + '--track-origins=yes', + '--leak-check=full', + ] suppression_file = join('src', 'cart', 'utils', 'memcheck-cart.supp') if not os.path.exists(suppression_file): suppression_file = join(self.conf['PREFIX'], 'etc', 'memcheck-cart.supp') @@ -691,8 +702,7 @@ def start(self): fd.write(f"export PATH={join(self.conf['PREFIX'], 'bin')}:$PATH\n") fd.write(f'exec valgrind {" ".join(valgrind_args)} daos_engine "$@"\n') - os.chmod(join(self._io_server_dir.name, 'daos_engine'), - stat.S_IXUSR | stat.S_IRUSR) + os.chmod(join(self._io_server_dir.name, 'daos_engine'), stat.S_IXUSR | stat.S_IRUSR) plain_env['PATH'] = f'{self._io_server_dir.name}:{plain_env["PATH"]}' self.max_start_time = 300 @@ -719,9 +729,7 @@ def start(self): if self._fi: # Set D_ALLOC to fail, but do not enable it. This can be changed later via # the set_fi() method. - faults = {'fault_config': [{'id': 0, - 'probability_x': 0, - 'probability_y': 100}]} + faults = {'fault_config': [{'id': 0, 'probability_x': 0, 'probability_y': 100}]} self._fi_file = tempfile.NamedTemporaryFile(prefix='fi_', suffix='.yaml') @@ -729,7 +737,7 @@ def start(self): self._fi_file.flush() server_env['D_FI_CONFIG'] = self._fi_file.name - for (key, value) in server_env.items(): + for key, value in server_env.items(): # If server log is set via server_debug then do not also set env settings. if self.conf.args.server_debug and key in ('DD_MASK', 'DD_SUBSYS', 'D_LOG_MASK'): continue @@ -772,11 +780,16 @@ def start(self): agent_bin = join(self.conf['PREFIX'], 'bin', 'daos_agent') - agent_cmd = [agent_bin, - '--config-path', agent_config, - '--insecure', - '--runtime_dir', self.agent_dir, - '--logfile', self.agent_log.name] + agent_cmd = [ + agent_bin, + '--config-path', + agent_config, + '--insecure', + '--runtime_dir', + self.agent_dir, + '--logfile', + self.agent_log.name, + ] if not self.conf.args.server_debug and not self.conf.args.client_debug: agent_cmd.append('--debug') @@ -947,10 +960,7 @@ def run_dmg(self, cmd): exe_cmd.extend(cmd) print(f'running {exe_cmd}') - return subprocess.run(exe_cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False) + return subprocess.run(exe_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) def run_dmg_json(self, cmd): """Run the specified dmg command in json mode @@ -1026,17 +1036,17 @@ def run_daos_client_cmd(self, cmd): cmd_env = get_base_env() - with tempfile.NamedTemporaryFile(prefix=f'dnt_cmd_{get_inc_id()}_', - suffix='.log', - dir=self.conf.tmp_dir, - delete=False) as log_file: + with tempfile.NamedTemporaryFile( + prefix=f'dnt_cmd_{get_inc_id()}_', suffix='.log', dir=self.conf.tmp_dir, delete=False + ) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name cmd_env['DAOS_AGENT_DRPC_DIR'] = self.conf.agent_dir - rc = subprocess.run(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=cmd_env, check=False) + rc = subprocess.run( + exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env, check=False + ) if rc.stderr != b'': print('Stderr from command') @@ -1083,10 +1093,12 @@ def run_daos_client_cmd_pil4dfs(self, cmd, check=True, container=None, report=Tr cmd_env = get_base_env() - with tempfile.NamedTemporaryFile(prefix=f'dnt_pil4dfs_{cmd[0]}_{get_inc_id()}_', - suffix='.log', - dir=self.conf.tmp_dir, - delete=False) as log_file: + with tempfile.NamedTemporaryFile( + prefix=f'dnt_pil4dfs_{cmd[0]}_{get_inc_id()}_', + suffix='.log', + dir=self.conf.tmp_dir, + delete=False, + ) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name @@ -1111,8 +1123,9 @@ def run_daos_client_cmd_pil4dfs(self, cmd, check=True, container=None, report=Tr print('Run command: ') print(cmd) - rc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, - env=cmd_env, check=False) + rc = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=cmd_env, check=False + ) print(rc) if rc.stderr != b'': @@ -1164,53 +1177,51 @@ def set_fi(self, probability=0): agent_bin = join(self.conf['PREFIX'], 'bin', 'daos_agent') - with tempfile.TemporaryDirectory(prefix='dnt_addr_',) as addr_dir: - + with tempfile.TemporaryDirectory( + prefix='dnt_addr_', + ) as addr_dir: addr_file = join(addr_dir, f'{system_name}.attach_info_tmp') - agent_cmd = [agent_bin, - '-i', - '-s', - self.agent_dir, - 'dump-attachinfo', - '-o', - addr_file] + agent_cmd = [agent_bin, '-i', '-s', self.agent_dir, 'dump-attachinfo', '-o', addr_file] rc = subprocess.run(agent_cmd, env=cmd_env, check=True) print(rc) # options here are: fault_id,max_faults,probability,err_code[,argument] - cmd = ['set_fi_attr', - '--cfg_path', - addr_dir, - '--group-name', - 'daos_server', - '--rank', - '0', - '--attr', - f'0,0,{probability},0,0'] + cmd = [ + 'set_fi_attr', + '--cfg_path', + addr_dir, + '--group-name', + 'daos_server', + '--rank', + '0', + '--attr', + f'0,0,{probability},0,0', + ] exec_cmd.append(join(self.conf['PREFIX'], 'bin', 'cart_ctl')) exec_cmd.extend(cmd) - with tempfile.NamedTemporaryFile(prefix=f'dnt_crt_ctl_{get_inc_id()}_', - suffix='.log', - delete=False) as log_file: - + with tempfile.NamedTemporaryFile( + prefix=f'dnt_crt_ctl_{get_inc_id()}_', suffix='.log', delete=False + ) as log_file: cmd_env['D_LOG_FILE'] = log_file.name cmd_env['DAOS_AGENT_DRPC_DIR'] = self.agent_dir - rc = subprocess.run(exec_cmd, - env=cmd_env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False) + rc = subprocess.run( + exec_cmd, + env=cmd_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + ) print(rc) valgrind_hdl.convert_xml() log_test(self.conf, log_file.name, show_memleaks=False) -class ValgrindHelper(): +class ValgrindHelper: """Class for running valgrind commands This helps setup the command line required, and @@ -1219,7 +1230,6 @@ class ValgrindHelper(): """ def __init__(self, conf, logid=None): - # Set this to False to disable valgrind, which will run faster. self.conf = conf self.use_valgrind = True @@ -1238,16 +1248,19 @@ def get_cmd_prefix(self): if not self._logid: self._logid = get_inc_id() - with tempfile.NamedTemporaryFile(prefix=f'dnt.{self._logid}.', dir='.', - suffix='.memcheck', delete=False) as log_file: + with tempfile.NamedTemporaryFile( + prefix=f'dnt.{self._logid}.', dir='.', suffix='.memcheck', delete=False + ) as log_file: self._xml_file = log_file.name - cmd = ['valgrind', - f'--xml-file={self._xml_file}', - '--xml=yes', - '--fair-sched=yes', - '--gen-suppressions=all', - '--error-exitcode=42'] + cmd = [ + 'valgrind', + f'--xml-file={self._xml_file}', + '--xml=yes', + '--fair-sched=yes', + '--gen-suppressions=all', + '--error-exitcode=42', + ] if self.full_check: cmd.extend(['--leak-check=full', '--show-leak-kinds=all']) @@ -1276,13 +1289,23 @@ def convert_xml(self): os.unlink(self._xml_file) -class DFuse(): +class DFuse: """Manage a dfuse instance""" instance_num = 0 - def __init__(self, daos, conf, pool=None, container=None, mount_path=None, uns_path=None, - caching=True, wbcache=True, multi_user=False): + def __init__( + self, + daos, + conf, + pool=None, + container=None, + mount_path=None, + uns_path=None, + caching=True, + wbcache=True, + multi_user=False, + ): if mount_path: self.dir = mount_path else: @@ -1312,7 +1335,6 @@ def __init__(self, daos, conf, pool=None, container=None, mount_path=None, uns_p os.mkdir(self.dir) def __str__(self): - if self._sp: running = 'running' else: @@ -1513,8 +1535,13 @@ def il_cmd(self, cmd, check_read=True, check_write=True, check_fstat=True): check_fstat = False try: - log_test(self.conf, log_name, check_read=check_read, check_write=check_write, - check_fstat=check_fstat) + log_test( + self.conf, + log_name, + check_read=check_read, + check_write=check_write, + check_fstat=check_fstat, + ) assert ret.returncode == 0 except NLTestNoFunction as error: command = ' '.join(cmd) @@ -1526,8 +1553,13 @@ def il_cmd(self, cmd, check_read=True, check_write=True, check_fstat=True): def run_query(self, use_json=False, quiet=False): """Run filesystem query""" - rc = run_daos_cmd(self.conf, ['filesystem', 'query', self.dir], - use_json=use_json, log_check=quiet, valgrind=quiet) + rc = run_daos_cmd( + self.conf, + ['filesystem', 'query', self.dir], + use_json=use_json, + log_check=quiet, + valgrind=quiet, + ) print(rc) return rc @@ -1612,7 +1644,7 @@ def import_daos(server, conf): return daos -class DaosCmdReturn(): +class DaosCmdReturn: """Class to enable pretty printing of daos output""" def __init__(self): @@ -1643,14 +1675,16 @@ def __str__(self): return output -def run_daos_cmd(conf, - cmd, - show_stdout=False, - valgrind=True, - log_check=True, - ignore_busy=False, - use_json=False, - cwd=None): +def run_daos_cmd( + conf, + cmd, + show_stdout=False, + valgrind=True, + log_check=True, + ignore_busy=False, + use_json=False, + cwd=None, +): """Run a DAOS command Run a command, returning what subprocess.run() would. @@ -1685,17 +1719,17 @@ def run_daos_cmd(conf, del cmd_env['DD_SUBSYS'] del cmd_env['D_LOG_MASK'] - with tempfile.NamedTemporaryFile(prefix=f'dnt_cmd_{get_inc_id()}_', - suffix='.log', - dir=conf.tmp_dir, - delete=False) as log_file: + with tempfile.NamedTemporaryFile( + prefix=f'dnt_cmd_{get_inc_id()}_', suffix='.log', dir=conf.tmp_dir, delete=False + ) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name cmd_env['DAOS_AGENT_DRPC_DIR'] = conf.agent_dir - rc = subprocess.run(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=cmd_env, check=False, cwd=cwd) + rc = subprocess.run( + exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env, check=False, cwd=cwd + ) if rc.stderr != b'': print('Stderr from command') @@ -1730,8 +1764,20 @@ def run_daos_cmd(conf, # pylint: disable-next=too-many-arguments -def create_cont(conf, pool=None, ctype=None, label=None, path=None, oclass=None, dir_oclass=None, - file_oclass=None, hints=None, valgrind=False, log_check=True, cwd=None): +def create_cont( + conf, + pool=None, + ctype=None, + label=None, + path=None, + oclass=None, + dir_oclass=None, + file_oclass=None, + hints=None, + valgrind=False, + log_check=True, + cwd=None, +): """Use 'daos' command to create a new container. Args: @@ -1784,16 +1830,17 @@ def create_cont(conf, pool=None, ctype=None, label=None, path=None, oclass=None, def _create_cont(): """Helper function for create_cont""" - rc = run_daos_cmd(conf, cmd, use_json=True, log_check=log_check, valgrind=valgrind, - cwd=cwd) + rc = run_daos_cmd(conf, cmd, use_json=True, log_check=log_check, valgrind=valgrind, cwd=cwd) print(rc) return rc rc = _create_cont() - if rc.returncode == 1 and \ - rc.json['error'] == 'failed to create container: DER_EXIST(-1004): Entity already exists': - + if ( + rc.returncode == 1 + and rc.json['error'] + == 'failed to create container: DER_EXIST(-1004): Entity already exists' + ): # If a path is set DER_EXIST may refer to the path, not a container so do not attempt to # remove and retry in this case. if path is None: @@ -1821,9 +1868,11 @@ def destroy_container(conf, pool, container, valgrind=True, log_check=True): # This shouldn't happen but can on unclean shutdown, file it as a test failure so it does # not get lost, however destroy the container and attempt to continue. # DAOS-8860 - conf.wf.add_test_case(f'destroy_container_{pool}/{container}', - failure='Failed to destroy container', - output=rc) + conf.wf.add_test_case( + f'destroy_container_{pool}/{container}', + failure='Failed to destroy container', + output=rc, + ) cmd = ['container', 'destroy', '--force', pool, container] rc = run_daos_cmd(conf, cmd, valgrind=valgrind, use_json=True) print(rc) @@ -1850,6 +1899,7 @@ def needs_dfuse(method): Runs every test twice, once with caching enabled, and once with caching disabled. """ + @functools.wraps(method) def _helper(self): if self.call_index == 0: @@ -1859,10 +1909,7 @@ def _helper(self): else: caching = False - self.dfuse = DFuse(self.server, - self.conf, - caching=caching, - container=self.container) + self.dfuse = DFuse(self.server, self.conf, caching=caching, container=self.container) self.dfuse.start(v_hint=self.test_name) try: rc = method(self) @@ -1875,7 +1922,7 @@ def _helper(self): # pylint: disable-next=invalid-name -class needs_dfuse_with_opt(): +class needs_dfuse_with_opt: """Decorator class for starting dfuse under posix_tests class By default runs the method twice, once with caching and once without, however can be @@ -1892,9 +1939,9 @@ def __init__(self, caching=None, wbcache=True, single_threaded=False): def __call__(self, method): """Wrapper function""" + @functools.wraps(method) def _helper(obj): - caching = self.caching if caching is None: if obj.call_index == 0: @@ -1904,11 +1951,9 @@ def _helper(obj): else: caching = False - obj.dfuse = DFuse(obj.server, - obj.conf, - caching=caching, - wbcache=self.wbcache, - container=obj.container) + obj.dfuse = DFuse( + obj.server, obj.conf, caching=caching, wbcache=self.wbcache, container=obj.container + ) obj.dfuse.start(v_hint=method.__name__, single_threaded=self.single_threaded) try: rc = method(obj) @@ -1916,10 +1961,11 @@ def _helper(obj): if obj.dfuse.stop(): obj.fatal_errors = True return rc + return _helper -class PrintStat(): +class PrintStat: """Class for nicely showing file 'stat' data, similar to ls -l""" headers = ['uid', 'gid', 'size', 'mode', 'filename'] @@ -1946,11 +1992,9 @@ def add(self, filename, attr=None, show_dir=False): if attr is None: attr = os.stat(filename) - self._stats.append([attr.st_uid, - attr.st_gid, - attr.st_size, - stat.filemode(attr.st_mode), - filename]) + self._stats.append( + [attr.st_uid, attr.st_gid, attr.st_size, stat.filemode(attr.st_mode), filename] + ) self.count += 1 if show_dir: @@ -1966,7 +2010,7 @@ def __eq__(self, other): # This is test code where methods are tests, so we want to have lots of them. -class PosixTests(): +class PosixTests: """Class for adding standalone unit tests""" # pylint: disable=too-many-public-methods @@ -2025,12 +2069,18 @@ def test_cont_list(self): @needs_dfuse_with_opt(caching=False) def test_oclass(self): """Test container object class options""" - container = create_cont(self.conf, self.pool, ctype="POSIX", label='oclass_test', - oclass='S1', dir_oclass='S2', file_oclass='S4') - run_daos_cmd(self.conf, - ['container', 'query', - self.pool.id(), container.id()], - show_stdout=True) + container = create_cont( + self.conf, + self.pool, + ctype="POSIX", + label='oclass_test', + oclass='S1', + dir_oclass='S2', + file_oclass='S4', + ) + run_daos_cmd( + self.conf, ['container', 'query', self.pool.id(), container.id()], show_stdout=True + ) dfuse = DFuse(self.server, self.conf, container=container) dfuse.use_valgrind = False @@ -2065,21 +2115,23 @@ def test_oclass(self): def test_cache(self): """Test with caching enabled""" - run_daos_cmd(self.conf, - ['container', 'query', - self.pool.id(), self.container.id()], - show_stdout=True) - - cont_attrs = {'dfuse-attr-time': 2, - 'dfuse-dentry-time': '100s', - 'dfuse-dentry-dir-time': '100s', - 'dfuse-ndentry-time': '100s'} + run_daos_cmd( + self.conf, ['container', 'query', self.pool.id(), self.container.id()], show_stdout=True + ) + + cont_attrs = { + 'dfuse-attr-time': 2, + 'dfuse-dentry-time': '100s', + 'dfuse-dentry-dir-time': '100s', + 'dfuse-ndentry-time': '100s', + } self.container.set_attrs(cont_attrs) - run_daos_cmd(self.conf, - ['container', 'get-attr', - self.pool.id(), self.container.id()], - show_stdout=True) + run_daos_cmd( + self.conf, + ['container', 'get-attr', self.pool.id(), self.container.id()], + show_stdout=True, + ) dfuse = DFuse(self.server, self.conf, container=self.container) dfuse.start() @@ -2108,14 +2160,12 @@ def test_cont_info(self): """Check that daos container info and fs get-attr works on container roots""" def _check_cmd(check_path): - rc = run_daos_cmd(self.conf, - ['container', 'query', '--path', check_path], - use_json=True) + rc = run_daos_cmd( + self.conf, ['container', 'query', '--path', check_path], use_json=True + ) print(rc) assert rc.returncode == 0, rc - rc = run_daos_cmd(self.conf, - ['fs', 'get-attr', '--path', check_path], - use_json=True) + rc = run_daos_cmd(self.conf, ['fs', 'get-attr', '--path', check_path], use_json=True) print(rc) assert rc.returncode == 0, rc @@ -2221,16 +2271,10 @@ def test_pre_read(self): def test_two_mounts(self): """Create two mounts, and check that a file created in one can be read from the other""" - dfuse0 = DFuse(self.server, - self.conf, - caching=False, - container=self.container) + dfuse0 = DFuse(self.server, self.conf, caching=False, container=self.container) dfuse0.start(v_hint='two_0') - dfuse1 = DFuse(self.server, - self.conf, - caching=True, - container=self.container) + dfuse1 = DFuse(self.server, self.conf, caching=True, container=self.container) dfuse1.start(v_hint='two_1') file0 = join(dfuse0.dir, 'file') @@ -2268,23 +2312,20 @@ def test_cache_expire(self): """ cache_time = 20 - cont_attrs = {'dfuse-data-cache': False, - 'dfuse-attr-time': cache_time, - 'dfuse-dentry-time': cache_time, - 'dfuse-ndentry-time': cache_time} + cont_attrs = { + 'dfuse-data-cache': False, + 'dfuse-attr-time': cache_time, + 'dfuse-dentry-time': cache_time, + 'dfuse-ndentry-time': cache_time, + } self.container.set_attrs(cont_attrs) - dfuse0 = DFuse(self.server, - self.conf, - caching=True, - wbcache=False, - container=self.container) + dfuse0 = DFuse( + self.server, self.conf, caching=True, wbcache=False, container=self.container + ) dfuse0.start(v_hint='expire_0') - dfuse1 = DFuse(self.server, - self.conf, - caching=False, - container=self.container) + dfuse1 = DFuse(self.server, self.conf, caching=False, container=self.container) dfuse1.start(v_hint='expire_1') # Create ten files. @@ -2667,22 +2708,29 @@ def test_il(self): # Copy something into a container self.dfuse.il_cmd(['cp', '/bin/bash', sub_cont_dir], check_read=False) # Read it from within a container - self.dfuse.il_cmd(['md5sum', join(sub_cont_dir, 'bash')], - check_read=False, check_write=False, check_fstat=False) - self.dfuse.il_cmd(['dd', - f'if={join(sub_cont_dir, "bash")}', - f'of={join(sub_cont_dir, "bash_copy")}', - 'iflag=direct', - 'oflag=direct', - 'bs=128k'], - check_fstat=False) + self.dfuse.il_cmd( + ['md5sum', join(sub_cont_dir, 'bash')], + check_read=False, + check_write=False, + check_fstat=False, + ) + self.dfuse.il_cmd( + [ + 'dd', + f'if={join(sub_cont_dir, "bash")}', + f'of={join(sub_cont_dir, "bash_copy")}', + 'iflag=direct', + 'oflag=direct', + 'bs=128k', + ], + check_fstat=False, + ) @needs_dfuse def test_xattr(self): """Perform basic tests with extended attributes""" new_file = join(self.dfuse.dir, 'attr_file') with open(new_file, 'w') as fd: - xattr.set(fd, 'user.mine', 'init_value') # This should fail as a security test. try: @@ -2698,7 +2746,7 @@ def test_xattr(self): pass xattr.set(fd, 'user.Xfuse.ids', b'other_value') - for (key, value) in xattr.get_all(fd): + for key, value in xattr.get_all(fd): print(f'xattr is {key}:{value}') @needs_dfuse @@ -2731,7 +2779,7 @@ def test_list_xattr(self): xattr.set(self.dfuse.dir, 'user.dummy', 'short string') - for (key, value) in xattr.get_all(self.dfuse.dir): + for key, value in xattr.get_all(self.dfuse.dir): expected_keys.remove(key) print(f'xattr is {key}:{value}') @@ -2763,8 +2811,7 @@ def test_chmod(self): with open(fname, 'w'): pass - modes = [stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, - stat.S_IRUSR] + modes = [stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, stat.S_IRUSR] for mode in modes: os.chmod(fname, mode) @@ -2873,10 +2920,7 @@ def test_rename_clobber(self): fd.write('test') # Start another dfuse instance to move the files around without the kernel knowing. - dfuse = DFuse(self.server, - self.conf, - container=self.container, - caching=False) + dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) dfuse.start(v_hint='rename_other') print(os.listdir(self.dfuse.dir)) @@ -3033,10 +3077,7 @@ def test_complex_unlink(self): fds.append(fd) # Start another dfuse instance to move the files around without the kernel knowing. - dfuse = DFuse(self.server, - self.conf, - container=self.container, - caching=False) + dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) dfuse.start(v_hint='unlink') print(os.listdir(self.dfuse.dir)) @@ -3061,10 +3102,7 @@ def test_complex_unlink(self): def test_cont_rw(self): """Test write access to another users container""" - dfuse = DFuse(self.server, - self.conf, - container=self.container, - caching=False) + dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) dfuse.start(v_hint='cont_rw_1') @@ -3089,30 +3127,37 @@ def test_cont_rw(self): self.fatal_errors = True # Update container ACLs so current user has rw permissions only, the minimum required. - rc = run_daos_cmd(self.conf, ['container', - 'update-acl', - self.pool.id(), - self.container.id(), - '--entry', - f'A::{os.getlogin()}@:rwta']) + rc = run_daos_cmd( + self.conf, + [ + 'container', + 'update-acl', + self.pool.id(), + self.container.id(), + '--entry', + f'A::{os.getlogin()}@:rwta', + ], + ) print(rc) # Assign the container to someone else. - rc = run_daos_cmd(self.conf, ['container', - 'set-owner', - self.pool.id(), - self.container.id(), - '--user', - 'root@', - '--group', - 'root@']) + rc = run_daos_cmd( + self.conf, + [ + 'container', + 'set-owner', + self.pool.id(), + self.container.id(), + '--user', + 'root@', + '--group', + 'root@', + ], + ) print(rc) # Now start dfuse and access the container, see who the file is owned by. - dfuse = DFuse(self.server, - self.conf, - container=self.container, - caching=False) + dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) dfuse.start(v_hint='cont_rw_2') stat_log = PrintStat() @@ -3166,10 +3211,7 @@ def test_complex_rename(self): with open(fname, 'w') as ofd: print(os.fstat(ofd.fileno())) - dfuse = DFuse(self.server, - self.conf, - container=self.container, - caching=False) + dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) dfuse.start(v_hint='rename') os.mkdir(join(dfuse.dir, 'step_dir')) @@ -3183,8 +3225,10 @@ def test_complex_rename(self): except FileNotFoundError: print('Failed to fstat() replaced file') - os.rename(join(self.dfuse.dir, 'step_dir', 'file-new'), - join(self.dfuse.dir, 'new_dir', 'my-file')) + os.rename( + join(self.dfuse.dir, 'step_dir', 'file-new'), + join(self.dfuse.dir, 'new_dir', 'my-file'), + ) print(os.fstat(ofd.fileno())) @@ -3194,31 +3238,32 @@ def test_complex_rename(self): def test_cont_ro(self): """Test access to a read-only container""" # Update container ACLs so current user has 'rta' permissions only, the minimum required. - rc = run_daos_cmd(self.conf, ['container', - 'update-acl', - self.pool.id(), - self.container.id(), - '--entry', - f'A::{os.getlogin()}@:rta']) + rc = run_daos_cmd( + self.conf, + [ + 'container', + 'update-acl', + self.pool.id(), + self.container.id(), + '--entry', + f'A::{os.getlogin()}@:rta', + ], + ) print(rc) assert rc.returncode == 0 # Assign the container to someone else. - rc = run_daos_cmd(self.conf, ['container', - 'set-owner', - self.pool.id(), - self.container.id(), - '--user', - 'root@']) + rc = run_daos_cmd( + self.conf, + ['container', 'set-owner', self.pool.id(), self.container.id(), '--user', 'root@'], + ) print(rc) assert rc.returncode == 0 # Now start dfuse and access the container, this should require read-only opening. - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='cont_ro') print(os.listdir(dfuse.dir)) @@ -3264,10 +3309,7 @@ def test_with_path(self): cont_path = join(tmp_dir, 'my-cont') create_cont(self.conf, self.pool, path=cont_path) - dfuse = DFuse(self.server, - self.conf, - caching=True, - uns_path=cont_path) + dfuse = DFuse(self.server, self.conf, caching=True, uns_path=cont_path) dfuse.start(v_hint='with_path') # Simply write a file. This will fail if dfuse isn't backed via @@ -3364,10 +3406,7 @@ def test_uns_basic(self): def test_dfuse_dio_off(self): """Test for dfuse with no caching options, but direct-io disabled""" self.container.set_attrs({'dfuse-direct-io-disable': 'on'}) - dfuse = DFuse(self.server, - self.conf, - caching=True, - container=self.container) + dfuse = DFuse(self.server, self.conf, caching=True, container=self.container) dfuse.start(v_hint='dio_off') @@ -3448,8 +3487,16 @@ def test_daos_fs_tool(self): assert check_dfs_tool_output(output, 'S1', '1048576') # run same command using pool, container, dfs-path, and dfs-prefix - cmd = ['fs', 'get-attr', pool, uns_container.id(), '--dfs-path', dir1, - '--dfs-prefix', uns_path] + cmd = [ + 'fs', + 'get-attr', + pool, + uns_container.id(), + '--dfs-path', + dir1, + '--dfs-prefix', + uns_path, + ] print('get-attr of d1') rc = run_daos_cmd(conf, cmd) assert rc.returncode == 0 @@ -3476,16 +3523,14 @@ def test_daos_fs_tool(self): assert check_dfs_tool_output(output, None, '1048576') # Run a command to change attr of dir1 - cmd = ['fs', 'set-attr', '--path', dir1, '--oclass', 'S2', - '--chunk-size', '16'] + cmd = ['fs', 'set-attr', '--path', dir1, '--oclass', 'S2', '--chunk-size', '16'] print('set-attr of d1') rc = run_daos_cmd(conf, cmd) assert rc.returncode == 0 print(f'rc is {rc}') # Run a command to change attr of file1, should fail - cmd = ['fs', 'set-attr', '--path', file1, '--oclass', 'S2', - '--chunk-size', '16'] + cmd = ['fs', 'set-attr', '--path', file1, '--oclass', 'S2', '--chunk-size', '16'] print('set-attr of f1') rc = run_daos_cmd(conf, cmd) print(f'rc is {rc}') @@ -3523,16 +3568,20 @@ def test_cont_copy(self): # Create a temporary directory, with one file into it and copy it into # the container. Check the return-code only, do not verify the data. # tempfile() will remove the directory on completion. - src_dir = tempfile.TemporaryDirectory(prefix='copy_src_',) + src_dir = tempfile.TemporaryDirectory( + prefix='copy_src_', + ) with open(join(src_dir.name, 'file'), 'w') as ofd: ofd.write('hello') os.symlink('file', join(src_dir.name, 'file_s')) - cmd = ['filesystem', - 'copy', - '--src', - src_dir.name, - '--dst', - f'daos://{self.pool.uuid}/{self.container}'] + cmd = [ + 'filesystem', + 'copy', + '--src', + src_dir.name, + '--dst', + f'daos://{self.pool.uuid}/{self.container}', + ] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3554,16 +3603,20 @@ def test_cont_clone(self): # Create a temporary directory, with one file into it and copy it into # the container. Check the return code only, do not verify the data. # tempfile() will remove the directory on completion. - src_dir = tempfile.TemporaryDirectory(prefix='copy_src_',) + src_dir = tempfile.TemporaryDirectory( + prefix='copy_src_', + ) with open(join(src_dir.name, 'file'), 'w') as ofd: ofd.write('hello') - cmd = ['filesystem', - 'copy', - '--src', - src_dir.name, - '--dst', - f'daos://{self.pool.uuid}/{self.container.id()}'] + cmd = [ + 'filesystem', + 'copy', + '--src', + src_dir.name, + '--dst', + f'daos://{self.pool.uuid}/{self.container.id()}', + ] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3574,12 +3627,14 @@ def test_cont_clone(self): # Now create a container uuid and do an object based copy. # The daos command will create the target container on demand. - cmd = ['container', - 'clone', - '--src', - f'daos://{self.pool.uuid}/{self.container.id()}', - '--dst', - f'daos://{self.pool.uuid}/'] + cmd = [ + 'container', + 'clone', + '--src', + f'daos://{self.pool.uuid}/{self.container.id()}', + '--dst', + f'daos://{self.pool.uuid}/', + ] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3594,10 +3649,12 @@ def test_dfuse_perms(self): """Test permissions caching for DAOS-12577""" cache_time = 10 - cont_attrs = {'dfuse-data-cache': False, - 'dfuse-attr-time': cache_time, - 'dfuse-dentry-time': cache_time, - 'dfuse-ndentry-time': cache_time} + cont_attrs = { + 'dfuse-data-cache': False, + 'dfuse-attr-time': cache_time, + 'dfuse-dentry-time': cache_time, + 'dfuse-ndentry-time': cache_time, + } self.container.set_attrs(cont_attrs) dfuse = DFuse(self.server, self.conf, container=self.container, wbcache=False) @@ -3671,11 +3728,9 @@ def test_daos_fs_check(self): """Test DAOS FS Checker""" # pylint: disable=too-many-branches # pylint: disable=too-many-statements - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='fs_check_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -3775,8 +3830,16 @@ def test_daos_fs_check(self): self.server.run_daos_client_cmd(cmd) # run the checker while dfuse is still mounted (should fail - EX open) - cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print', '--dir-name', - 'lf1'] + cmd = [ + 'fs', + 'check', + self.pool.id(), + self.container.id(), + '--flags', + 'print', + '--dir-name', + 'lf1', + ] rc = run_daos_cmd(self.conf, cmd, ignore_busy=True) print(rc) assert rc.returncode != 0 @@ -3791,8 +3854,16 @@ def test_daos_fs_check(self): # fs check with relink should find the 2 leaked directories. # Everything under them should be relinked but not reported as leaked. - cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print,relink', - '--dir-name', 'lf1'] + cmd = [ + 'fs', + 'check', + self.pool.id(), + self.container.id(), + '--flags', + 'print,relink', + '--dir-name', + 'lf1', + ] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -3812,11 +3883,9 @@ def test_daos_fs_check(self): raise NLTestFail('Wrong number of Leaked OIDs') # remount dfuse - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='fs_check_test') path = dfuse.dir @@ -3856,8 +3925,16 @@ def test_daos_fs_check(self): # fs check with relink should find 3 leaked dirs and 1 leaked file that were directly under # test_dir2. Everything under those leaked dirs are relinked but not reported as leaked. - cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print,relink', - '--dir-name', 'lf2'] + cmd = [ + 'fs', + 'check', + self.pool.id(), + self.container.id(), + '--flags', + 'print,relink', + '--dir-name', + 'lf2', + ] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -3877,11 +3954,9 @@ def test_daos_fs_check(self): raise NLTestFail('Wrong number of Leaked OIDs') # remount dfuse - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='fs_check_test') path = dfuse.dir @@ -3910,11 +3985,9 @@ def test_daos_fs_check(self): def test_daos_fs_fix(self): """Test DAOS FS Fix Tool""" - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='fs_fix_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -3969,8 +4042,17 @@ def test_daos_fs_fix(self): assert error.errno == errno.EINVAL # fix corrupted entries while dfuse is running - should fail - cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', '/test_dir/f1', - '--type', '--chunk-size', '1048576'] + cmd = [ + 'fs', + 'fix-entry', + self.pool.id(), + self.container.id(), + '--dfs-path', + '/test_dir/f1', + '--type', + '--chunk-size', + '1048576', + ] rc = run_daos_cmd(self.conf, cmd, ignore_busy=True) print(rc) assert rc.returncode != 0 @@ -3984,8 +4066,17 @@ def test_daos_fs_fix(self): self.fatal_errors = True # fix corrupted entries - cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', '/test_dir/f1', - '--type', '--chunk-size', '1048576'] + cmd = [ + 'fs', + 'fix-entry', + self.pool.id(), + self.container.id(), + '--dfs-path', + '/test_dir/f1', + '--type', + '--chunk-size', + '1048576', + ] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -3994,8 +4085,17 @@ def test_daos_fs_fix(self): if line[-1] != 'Adjusting chunk size of /test_dir/f1 to 1048576': raise NLTestFail('daos fs fix-entry /test_dir/f1') - cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', - '/test_dir/1d1/f3', '--type', '--chunk-size', '1048576'] + cmd = [ + 'fs', + 'fix-entry', + self.pool.id(), + self.container.id(), + '--dfs-path', + '/test_dir/1d1/f3', + '--type', + '--chunk-size', + '1048576', + ] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -4004,8 +4104,15 @@ def test_daos_fs_fix(self): if line[-1] != 'Adjusting chunk size of /test_dir/1d1/f3 to 1048576': raise NLTestFail('daos fs fix-entry /test_dir/1d1/f3') - cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', - '/test_dir/1d2', '--type'] + cmd = [ + 'fs', + 'fix-entry', + self.pool.id(), + self.container.id(), + '--dfs-path', + '/test_dir/1d2', + '--type', + ] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -4015,11 +4122,9 @@ def test_daos_fs_fix(self): raise NLTestFail('daos fs fix-entry /test_dir/1d2') # remount dfuse - dfuse = DFuse(self.server, - self.conf, - pool=self.pool.id(), - container=self.container, - caching=False) + dfuse = DFuse( + self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False + ) dfuse.start(v_hint='fs_fix_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -4118,15 +4223,16 @@ def test_pil4dfs(self): # dd to write a file file5 = join(path, 'newfile') - self.server.run_daos_client_cmd_pil4dfs(['dd', 'if=/dev/zero', f'of={file5}', 'bs=1', - 'count=1']) + self.server.run_daos_client_cmd_pil4dfs( + ['dd', 'if=/dev/zero', f'of={file5}', 'bs=1', 'count=1'] + ) # cp "/usr/bin/mkdir" to DFS and call "/usr/bin/file" to analyze the binary file file6 file6 = join(path, 'elffile') self.server.run_daos_client_cmd_pil4dfs(['cp', '/usr/bin/mkdir', file6]) self.server.run_daos_client_cmd_pil4dfs(['file', file6]) -class NltStdoutWrapper(): +class NltStdoutWrapper: """Class for capturing stdout from threads""" def __init__(self): @@ -4167,7 +4273,7 @@ def __del__(self): sys.stdout = self._stdout -class NltStderrWrapper(): +class NltStderrWrapper: """Class for capturing stderr from threads""" def __init__(self): @@ -4223,12 +4329,9 @@ def _run_test(ptl=None, function=None, test_cb=None): # performance impact. There are other tests that run with valgrind enabled so this # should not reduce coverage. try: - ptl.container = create_cont(conf, - pool, - ctype="POSIX", - valgrind=False, - log_check=False, - label=function) + ptl.container = create_cont( + conf, pool, ctype="POSIX", valgrind=False, log_check=False, label=function + ) ptl.container_label = function test_cb() ptl.container.destroy(valgrind=False, log_check=False) @@ -4237,21 +4340,25 @@ def _run_test(ptl=None, function=None, test_cb=None): trace = ''.join(traceback.format_tb(inst.__traceback__)) duration = time.perf_counter() - start out_wrapper.sprint(f'{ptl.test_name} Failed') - conf.wf.add_test_case(ptl.test_name, - repr(inst), - stdout=out_wrapper.get_thread_output(), - stderr=err_wrapper.get_thread_err(), - output=trace, - test_class='test', - duration=duration) + conf.wf.add_test_case( + ptl.test_name, + repr(inst), + stdout=out_wrapper.get_thread_output(), + stderr=err_wrapper.get_thread_err(), + output=trace, + test_class='test', + duration=duration, + ) raise duration = time.perf_counter() - start out_wrapper.sprint(f'Test {ptl.test_name} took {duration:.1f} seconds') - conf.wf.add_test_case(ptl.test_name, - stdout=out_wrapper.get_thread_output(), - stderr=err_wrapper.get_thread_err(), - test_class='test', - duration=duration) + conf.wf.add_test_case( + ptl.test_name, + stdout=out_wrapper.get_thread_output(), + stderr=err_wrapper.get_thread_err(), + test_class='test', + duration=duration, + ) if not ptl.needs_more: break ptl.call_index = ptl.call_index + 1 @@ -4271,7 +4378,6 @@ def _run_test(ptl=None, function=None, test_cb=None): _run_test(ptl=pto, test_cb=obj, function=function) else: - threads = [] slow_tests = ['test_readdir_25', 'test_uns_basic', 'test_daos_fs_tool'] @@ -4288,11 +4394,13 @@ def _run_test(ptl=None, function=None, test_cb=None): if not callable(obj): continue - thread = threading.Thread(None, - target=_run_test, - name=f'test {function}', - kwargs={'ptl': ptl, 'test_cb': obj, 'function': function}, - daemon=True) + thread = threading.Thread( + None, + target=_run_test, + name=f'test {function}', + kwargs={'ptl': ptl, 'test_cb': obj, 'function': function}, + daemon=True, + ) thread.start() threads.append(thread) @@ -4316,9 +4424,11 @@ def _run_test(ptl=None, function=None, test_cb=None): # the tests are running in parallel. We could revise this so there's a dfuse method on # posix_tests class itself if required. for fuse in server.fuse_procs: - conf.wf.add_test_case('fuse leak in tests', - f'Test leaked dfuse instance at {fuse}', - test_class='test',) + conf.wf.add_test_case( + 'fuse leak in tests', + f'Test leaked dfuse instance at {fuse}', + test_class='test', + ) out_wrapper = None err_wrapper = None @@ -4333,9 +4443,10 @@ def run_tests(dfuse): fname = join(path, 'test_file3') - rc = subprocess.run(['dd', 'if=/dev/zero', 'bs=16k', 'count=64', # nosec - f'of={join(path, "dd_file")}'], - check=True) + rc = subprocess.run( + ['dd', 'if=/dev/zero', 'bs=16k', 'count=64', f'of={join(path, "dd_file")}'], # nosec + check=True, + ) print(rc) ofd = open(fname, 'w') ofd.write('hello') @@ -4464,17 +4575,19 @@ def log_timer_wrapper(*args, **kwargs): @log_timer -def log_test(conf, - filename, - show_memleaks=True, - quiet=False, - skip_fi=False, - leak_wf=None, - ignore_einval=False, - ignore_busy=False, - check_read=False, - check_write=False, - check_fstat=False): +def log_test( + conf, + filename, + show_memleaks=True, + quiet=False, + skip_fi=False, + leak_wf=None, + ignore_einval=False, + ignore_busy=False, + check_read=False, + check_write=False, + check_fstat=False, +): """Run the log checker on filename, logging to stdout""" # pylint: disable=too-many-arguments @@ -4520,9 +4633,7 @@ def sizeof_fmt(num, suffix='B'): lto.skip_suffixes.append(" DER_BUSY(-1012): 'Device or resource busy'") try: - lto.check_log_file(abort_on_warning=True, - show_memleaks=show_memleaks, - leak_wf=leak_wf) + lto.check_log_file(abort_on_warning=True, show_memleaks=show_memleaks, leak_wf=leak_wf) except nlt_lt.LogCheckError: pass @@ -4546,8 +4657,9 @@ def sizeof_fmt(num, suffix='B'): raise NLTestNoFunction('dfuse___fxstat') if conf.max_log_size and fstat.st_size > conf.max_log_size: - message = (f'Max log size exceeded, {sizeof_fmt(fstat.st_size)} > ' - + sizeof_fmt(conf.max_log_size)) + message = f'Max log size exceeded, {sizeof_fmt(fstat.st_size)} > ' + sizeof_fmt( + conf.max_log_size + ) conf.wf.add_test_case('logfile_size', failure=message) return lto.fi_location @@ -4687,20 +4799,19 @@ def run_in_fg(server, conf, args): # Only set the container cache attributes when the container is initially created so they # can be modified later. - cont_attrs = {'dfuse-data-cache': False, - 'dfuse-attr-time': 60, - 'dfuse-dentry-time': 60, - 'dfuse-ndentry-time': 60, - 'dfuse-direct-io-disable': False} + cont_attrs = { + 'dfuse-data-cache': False, + 'dfuse-attr-time': 60, + 'dfuse-dentry-time': 60, + 'dfuse-ndentry-time': 60, + 'dfuse-direct-io-disable': False, + } container.set_attrs(cont_attrs) container = container.uuid - dfuse = DFuse(server, - conf, - pool=pool.uuid, - caching=True, - wbcache=False, - multi_user=args.multi_user) + dfuse = DFuse( + server, conf, pool=pool.uuid, caching=True, wbcache=False, multi_user=args.multi_user + ) dfuse.log_flush = True dfuse.start() @@ -4817,11 +4928,9 @@ def print_results(): all_start = time.perf_counter() while True: - row = [count] row.extend(create_times) - dfuse = DFuse(server, conf, pool=pool, container=container, - caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) dir_dir = join(dfuse.dir, f'dirs.{count}') file_dir = join(dfuse.dir, f'files.{count}') dfuse.start() @@ -4831,35 +4940,29 @@ def print_results(): print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, - caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', file_dir], stdout=subprocess.PIPE, - check=True) + subprocess.run(['/bin/ls', file_dir], stdout=subprocess.PIPE, check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, - caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', dir_dir], stdout=subprocess.PIPE, - check=True) + subprocess.run(['/bin/ls', '-t', dir_dir], stdout=subprocess.PIPE, check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, - caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) dfuse.start() start = time.perf_counter() # Use sort by time here so ls calls stat, if you run ls -l then it will # also call getxattr twice which skews the figures. - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, - check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) @@ -4868,21 +4971,15 @@ def print_results(): # Test with caching enabled. Check the file directory, and do it twice # without restarting, to see the effect of populating the cache, and # reading from the cache. - dfuse = DFuse(server, - conf, - pool=pool, - container=container, - caching=True) + dfuse = DFuse(server, conf, pool=pool, container=container, caching=True) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, - check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, - check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) @@ -4898,10 +4995,7 @@ def print_results(): create_times = make_dirs(dfuse.dir, count) dfuse.stop() - run_daos_cmd(conf, ['container', - 'destroy', - pool, - container]) + run_daos_cmd(conf, ['container', 'destroy', pool, container]) print_results() @@ -4909,9 +5003,7 @@ def test_pydaos_kv(server, conf): """Test the KV interface""" # pylint: disable=consider-using-with - pydaos_log_file = tempfile.NamedTemporaryFile(prefix='dnt_pydaos_', - suffix='.log', - delete=False) + pydaos_log_file = tempfile.NamedTemporaryFile(prefix='dnt_pydaos_', suffix='.log', delete=False) os.environ['D_LOG_FILE'] = pydaos_log_file.name daos = import_daos(server, conf) @@ -4972,9 +5064,9 @@ def test_pydaos_kv(server, conf): def test_pydaos_kv_obj_class(server, conf): """Test the predefined object class works with KV""" - with tempfile.NamedTemporaryFile(prefix='kv_objclass_pydaos_', - suffix='.log', - delete=False) as tmp_file: + with tempfile.NamedTemporaryFile( + prefix='kv_objclass_pydaos_', suffix='.log', delete=False + ) as tmp_file: log_name = tmp_file.name os.environ['D_LOG_FILE'] = log_name @@ -5041,6 +5133,7 @@ def test_pydaos_kv_obj_class(server, conf): daos._cleanup() log_test(conf, log_name) + # Fault injection testing. # # This runs two different commands under fault injection, although it allows @@ -5061,11 +5154,10 @@ def test_pydaos_kv_obj_class(server, conf): # -class AllocFailTestRun(): +class AllocFailTestRun: """Class to run a fault injection command with a single fault""" def __init__(self, aft, cmd, env, loc, cwd): - # The return from subprocess.poll self.ret = None self.fault_injected = None @@ -5095,10 +5187,9 @@ def __init__(self, aft, cmd, env, loc, cwd): prefix = f'dnt_{loc:04d}_' else: prefix = 'dnt_reference_' - with tempfile.NamedTemporaryFile(prefix=prefix, - suffix='.log', - dir=self._aft.log_dir, - delete=False) as log_file: + with tempfile.NamedTemporaryFile( + prefix=prefix, suffix='.log', dir=self._aft.log_dir, delete=False + ) as log_file: self.log_file = log_file.name self._env['D_LOG_FILE'] = self.log_file @@ -5124,16 +5215,18 @@ def start(self): """Start the command""" faults = {} - faults['fault_config'] = [{'id': 100, - 'probability_x': 1, - 'probability_y': 1}] + faults['fault_config'] = [{'id': 100, 'probability_x': 1, 'probability_y': 1}] if self.loc: - faults['fault_config'].append({'id': 0, - 'probability_x': 1, - 'probability_y': 1, - 'interval': self.loc, - 'max_faults': 1}) + faults['fault_config'].append( + { + 'id': 0, + 'probability_x': 1, + 'probability_y': 1, + 'interval': self.loc, + 'max_faults': 1, + } + ) if self._aft.skip_daos_init: faults['fault_config'].append({'id': 101, 'probability_x': 1}) @@ -5152,12 +5245,14 @@ def start(self): else: exec_cmd = self._cmd - self._sp = subprocess.Popen(exec_cmd, - env=self._env, - cwd=self._cwd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + self._sp = subprocess.Popen( + exec_cmd, + env=self._env, + cwd=self._cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) def has_finished(self): """Check if the command has completed""" @@ -5182,8 +5277,8 @@ def _post(self, rc): This is where all the checks are performed. """ - def _explain(): + def _explain(): if self._aft.conf.tmp_dir: log_dir = self._aft.conf.tmp_dir else: @@ -5192,10 +5287,11 @@ def _explain(): short_log_file = self.log_file if short_log_file.startswith(self.log_file): - short_log_file = short_log_file[len(log_dir) + 1:] + short_log_file = short_log_file[len(log_dir) + 1 :] self._aft.wf.explain(self._fi_loc, short_log_file, fi_signal) self._aft.conf.wf.explain(self._fi_loc, short_log_file, fi_signal) + # Put in a new-line. print() self.returncode = rc @@ -5224,13 +5320,15 @@ def _explain(): else: wf = None - self._fi_loc = log_test(self._aft.conf, - self.log_file, - show_memleaks=show_memleaks, - ignore_busy=self._aft.ignore_busy, - quiet=True, - skip_fi=True, - leak_wf=wf) + self._fi_loc = log_test( + self._aft.conf, + self.log_file, + show_memleaks=show_memleaks, + ignore_busy=self._aft.ignore_busy, + quiet=True, + skip_fi=True, + leak_wf=wf, + ) self.fault_injected = True assert self._fi_loc except NLTestNoFi: @@ -5256,22 +5354,22 @@ def _explain(): # These checks will report an error against the line of code that introduced the "leak" # which may well only have a loose correlation to where the error was reported. if self._aft.check_daos_stderr: - # The go code will report a stacktrace in some cases on segfault or double-free # and these will obviously not be the expected output but are obviously an error, # to avoid filling the results with lots of warnings about stderr just include one # to say the check is disabled. if rc in (-6, -11): - self._aft.wf.add(self._fi_loc, - 'NORMAL', - f"Unable to check stderr because of exit code '{rc}'", - mtype='Crash preventing check') + self._aft.wf.add( + self._fi_loc, + 'NORMAL', + f"Unable to check stderr because of exit code '{rc}'", + mtype='Crash preventing check', + ) _explain() return stderr = self._stderr.decode('utf-8').rstrip() for line in stderr.splitlines(): - # This is what the go code uses. if line.endswith(': DER_NOMEM(-1009): Out of memory'): continue @@ -5292,45 +5390,53 @@ def _explain(): continue if 'DER_UNKNOWN' in line: - self._aft.wf.add(self._fi_loc, - 'HIGH', - f"Incorrect stderr '{line}'", - mtype='Invalid error code used') + self._aft.wf.add( + self._fi_loc, + 'HIGH', + f"Incorrect stderr '{line}'", + mtype='Invalid error code used', + ) continue - self._aft.wf.add(self._fi_loc, - 'NORMAL', - f"Malformed stderr '{line}'", - mtype='Malformed stderr') + self._aft.wf.add( + self._fi_loc, 'NORMAL', f"Malformed stderr '{line}'", mtype='Malformed stderr' + ) _explain() return if self.returncode == 0 and self._aft.check_post_stdout: if self.stdout != self._aft.expected_stdout: - self._aft.wf.add(self._fi_loc, - 'NORMAL', - f"Incorrect stdout '{self.stdout}'", - mtype='Out of memory caused zero exit code with incorrect output') + self._aft.wf.add( + self._fi_loc, + 'NORMAL', + f"Incorrect stdout '{self.stdout}'", + mtype='Out of memory caused zero exit code with incorrect output', + ) if self._aft.check_stderr: stderr = self._stderr.decode('utf-8').rstrip() - if stderr != '' and not stderr.endswith('(-1009): Out of memory') and \ - not stderr.endswith(': errno 12 (Cannot allocate memory)') and \ - 'error parsing command line arguments' not in stderr and \ - self.stdout != self._aft.expected_stdout: + if ( + stderr != '' + and not stderr.endswith('(-1009): Out of memory') + and not stderr.endswith(': errno 12 (Cannot allocate memory)') + and 'error parsing command line arguments' not in stderr + and self.stdout != self._aft.expected_stdout + ): if self.stdout != b'': print(self._aft.expected_stdout) print() print(self.stdout) print() - self._aft.wf.add(self._fi_loc, - 'NORMAL', - f"Incorrect stderr '{stderr}'", - mtype='Out of memory not reported correctly via stderr') + self._aft.wf.add( + self._fi_loc, + 'NORMAL', + f"Incorrect stderr '{stderr}'", + mtype='Out of memory not reported correctly via stderr', + ) _explain() -class AllocFailTest(): +class AllocFailTest: # pylint: disable=too-few-public-methods """Class to describe fault injection command""" @@ -5411,7 +5517,6 @@ def _prep(self): # finish. After each repetition completes then check for re-launch new processes # to keep the pipeline full. while not finished or active: - if not finished: while len(active) < max_child: active.append(self._run_cmd(fid)) @@ -5515,9 +5620,17 @@ def test_dfuse_start(server, conf, wf): os.mkdir(mount_point) - cmd = [join(conf['PREFIX'], 'bin', 'dfuse'), - '--mountpoint', mount_point, - '--pool', pool.id(), '--cont', container.id(), '--foreground', '--singlethread'] + cmd = [ + join(conf['PREFIX'], 'bin', 'dfuse'), + '--mountpoint', + mount_point, + '--pool', + pool.id(), + '--cont', + container.id(), + '--foreground', + '--singlethread', + ] test_cmd = AllocFailTest(conf, 'dfuse', cmd) test_cmd.wf = wf @@ -5542,16 +5655,20 @@ def test_alloc_fail_copy(server, conf, wf): """ def get_cmd(cont_id): - return ['daos', - 'filesystem', - 'copy', - '--src', - f'daos://{pool.id()}/aft_base', - '--dst', - f'daos://{pool.id()}/container_{cont_id}'] + return [ + 'daos', + 'filesystem', + 'copy', + '--src', + f'daos://{pool.id()}/aft_base', + '--dst', + f'daos://{pool.id()}/container_{cont_id}', + ] pool = server.get_test_pool_obj() - with tempfile.TemporaryDirectory(prefix='copy_src_',) as src_dir: + with tempfile.TemporaryDirectory( + prefix='copy_src_', + ) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5562,8 +5679,9 @@ def get_cmd(cont_id): os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', sub_dir, - '--dst', f'daos://{pool.id()}/aft_base']) + rc = run_daos_cmd( + conf, ['filesystem', 'copy', '--src', sub_dir, '--dst', f'daos://{pool.id()}/aft_base'] + ) assert rc.returncode == 0, rc test_cmd = AllocFailTest(conf, 'filesystem-copy', get_cmd) @@ -5589,8 +5707,15 @@ def test_alloc_fail_copy_trunc(server, conf, wf): files_needed = 4000 def get_cmd(_): - cmd = ['daos', 'filesystem', 'copy', '--src', src_file.name, - '--dst', f'daos://{pool.id()}/aftc/new_dir/file.{get_cmd.idx}'] + cmd = [ + 'daos', + 'filesystem', + 'copy', + '--src', + src_file.name, + '--dst', + f'daos://{pool.id()}/aftc/new_dir/file.{get_cmd.idx}', + ] get_cmd.idx += 1 assert get_cmd.idx <= files_needed return cmd @@ -5598,7 +5723,9 @@ def get_cmd(_): get_cmd.idx = 0 # pylint: disable=invalid-name pool = server.get_test_pool_obj() - with tempfile.TemporaryDirectory(prefix='copy_src_',) as src_dir: + with tempfile.TemporaryDirectory( + prefix='copy_src_', + ) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5606,12 +5733,12 @@ def get_cmd(_): with open(join(sub_dir, f'file.{idx}'), 'w') as ofd: ofd.write('hello') - rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', sub_dir, - '--dst', f'daos://{pool.id()}/aftc']) + rc = run_daos_cmd( + conf, ['filesystem', 'copy', '--src', sub_dir, '--dst', f'daos://{pool.id()}/aftc'] + ) assert rc.returncode == 0, rc with tempfile.NamedTemporaryFile() as src_file: - test_cmd = AllocFailTest(conf, 'filesystem-copy-trunc', get_cmd) test_cmd.wf = wf test_cmd.check_daos_stderr = True @@ -5632,7 +5759,9 @@ def test_alloc_pil4dfs_ls(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX', label='pil4dfs_fi') - with tempfile.TemporaryDirectory(prefix='pil4_src_',) as src_dir: + with tempfile.TemporaryDirectory( + prefix='pil4_src_', + ) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5644,8 +5773,17 @@ def test_alloc_pil4dfs_ls(server, conf, wf): os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', f'{src_dir}/new_dir', - '--dst', f'daos://{pool.id()}/{container.id()}']) + rc = run_daos_cmd( + conf, + [ + 'filesystem', + 'copy', + '--src', + f'{src_dir}/new_dir', + '--dst', + f'daos://{pool.id()}/{container.id()}', + ], + ) print(rc) assert rc.returncode == 0, rc @@ -5667,12 +5805,14 @@ def test_alloc_cont_create(server, conf, wf): pool = server.get_test_pool_obj() def get_cmd(cont_id): - return ['daos', - 'container', - 'create', - pool.id(), - '--properties', - f'srv_cksum:on,label:{cont_id}'] + return [ + 'daos', + 'container', + 'create', + pool.id(), + '--properties', + f'srv_cksum:on,label:{cont_id}', + ] test_cmd = AllocFailTest(conf, 'cont-create', get_cmd) test_cmd.wf = wf @@ -5691,13 +5831,15 @@ def test_alloc_fail_cont_create(server, conf): dfuse.start() def get_cmd(cont_id): - return ['daos', - 'container', - 'create', - '--type', - 'POSIX', - '--path', - join(dfuse.dir, f'container_{cont_id}')] + return [ + 'daos', + 'container', + 'create', + '--type', + 'POSIX', + '--path', + join(dfuse.dir, f'container_{cont_id}'), + ] test_cmd = AllocFailTest(conf, 'cont-create', get_cmd) test_cmd.check_post_stdout = False @@ -5779,14 +5921,9 @@ def test_fi_list_attr(server, conf, wf): container = create_cont(conf, pool) - container.set_attrs({'my-test-attr-1': 'some-value', - 'my-test-attr-2': 'some-other-value'}) + container.set_attrs({'my-test-attr-1': 'some-value', 'my-test-attr-2': 'some-other-value'}) - cmd = ['daos', - 'container', - 'list-attrs', - pool.id(), - container.id()] + cmd = ['daos', 'container', 'list-attrs', pool.id(), container.id()] test_cmd = AllocFailTest(conf, 'cont-list-attr', cmd) test_cmd.wf = wf @@ -5802,11 +5939,7 @@ def test_fi_get_prop(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX') - cmd = ['daos', - 'container', - 'get-prop', - pool.id(), - container.id()] + cmd = ['daos', 'container', 'get-prop', pool.id(), container.id()] test_cmd = AllocFailTest(conf, 'cont-get-prop', cmd) test_cmd.wf = wf @@ -5827,12 +5960,7 @@ def test_fi_get_attr(server, conf, wf): container.set_attrs({attr_name: 'value'}) - cmd = ['daos', - 'container', - 'get-attr', - pool.id(), - container.id(), - attr_name] + cmd = ['daos', 'container', 'get-attr', pool.id(), container.id(), attr_name] test_cmd = AllocFailTest(conf, 'cont-get-attr', cmd) test_cmd.wf = wf @@ -5851,11 +5979,7 @@ def test_fi_cont_query(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX') - cmd = ['daos', - 'container', - 'query', - pool.id(), - container.id()] + cmd = ['daos', 'container', 'query', pool.id(), container.id()] test_cmd = AllocFailTest(conf, 'cont-query', cmd) test_cmd.wf = wf @@ -5874,11 +5998,7 @@ def test_fi_cont_check(server, conf, wf): container = create_cont(conf, pool) - cmd = ['daos', - 'container', - 'check', - pool.id(), - container.id()] + cmd = ['daos', 'container', 'check', pool.id(), container.id()] test_cmd = AllocFailTest(conf, 'cont-check', cmd) test_cmd.wf = wf @@ -5895,10 +6015,7 @@ def test_alloc_fail(server, conf): """Run 'daos' client binary with fault injection""" pool = server.get_test_pool_obj() - cmd = ['daos', - 'cont', - 'list', - pool.id()] + cmd = ['daos', 'cont', 'list', pool.id()] test_cmd = AllocFailTest(conf, 'pool-list-containers', cmd) # Create at least one container, and record what the output should be when @@ -5920,7 +6037,9 @@ def test_dfs_check(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX', label='fsck') - with tempfile.TemporaryDirectory(prefix='fsck_src_',) as src_dir: + with tempfile.TemporaryDirectory( + prefix='fsck_src_', + ) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5932,13 +6051,23 @@ def test_dfs_check(server, conf, wf): # os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', f'{src_dir}/new_dir', - '--dst', f'daos://{pool.id()}/{container.id()}']) + rc = run_daos_cmd( + conf, + [ + 'filesystem', + 'copy', + '--src', + f'{src_dir}/new_dir', + '--dst', + f'daos://{pool.id()}/{container.id()}', + ], + ) print(rc) assert rc.returncode == 0, rc test_cmd = AllocFailTest( - conf, 'fs-check', ['daos', 'filesystem', 'check', pool.id(), container.id()]) + conf, 'fs-check', ['daos', 'filesystem', 'check', pool.id(), container.id()] + ) test_cmd.wf = wf test_cmd.single_process = True test_cmd.check_daos_stderr = True @@ -5971,7 +6100,6 @@ def server_fi(args): setup_log_test(conf) with DaosServer(conf, wf=wf, test_class='server-fi', enable_fi=True) as server: - pool = server.get_test_pool_obj() cont = create_cont(conf, pool=pool, ctype='POSIX', label='server_test') @@ -5980,12 +6108,17 @@ def server_fi(args): for idx in range(100): server.run_daos_client_cmd_pil4dfs( - ['touch', f'file.{idx}'], container=cont, check=False, report=False) + ['touch', f'file.{idx}'], container=cont, check=False, report=False + ) server.run_daos_client_cmd_pil4dfs( ['dd', 'if=/dev/zero', f'of=file.{idx}', 'bs=1', 'count=1024'], - container=cont, check=False, report=False) + container=cont, + check=False, + report=False, + ) server.run_daos_client_cmd_pil4dfs( - ['rm', '-f', f'file.{idx}'], container=cont, check=False, report=False) + ['rm', '-f', f'file.{idx}'], container=cont, check=False, report=False + ) # Turn off fault injection again to assist in server shutdown. server.set_fi(probability=0) @@ -6011,8 +6144,9 @@ def run(wf, args): if args.mode == 'fi': fi_test = True else: - with DaosServer(conf, test_class='first', wf=wf_server, - fatal_errors=fatal_errors) as server: + with DaosServer( + conf, test_class='first', wf=wf_server, fatal_errors=fatal_errors + ) as server: if args.mode == 'launch': run_in_fg(server, conf, args) elif args.mode == 'overlay': @@ -6037,16 +6171,18 @@ def run(wf, args): fatal_errors.add_result(server.set_fi()) if args.mode == 'all': - with DaosServer(conf, test_class='restart', wf=wf_server, - fatal_errors=fatal_errors) as server: + with DaosServer( + conf, test_class='restart', wf=wf_server, fatal_errors=fatal_errors + ) as server: pass # If running all tests then restart the server under valgrind. # This is really, really slow so just do cont list, then # exit again. if args.server_valgrind: - with DaosServer(conf, test_class='valgrind', wf=wf_server, valgrind=True, - fatal_errors=fatal_errors) as server: + with DaosServer( + conf, test_class='valgrind', wf=wf_server, valgrind=True, fatal_errors=fatal_errors + ) as server: pools = server.fetch_pools() for pool in pools: cmd = ['pool', 'query', pool.id()] @@ -6064,8 +6200,9 @@ def run(wf, args): args.server_debug = 'INFO' args.memcheck = 'no' args.dfuse_debug = 'WARN' - with DaosServer(conf, test_class='no-debug', wf=wf_server, - fatal_errors=fatal_errors) as server: + with DaosServer( + conf, test_class='no-debug', wf=wf_server, fatal_errors=fatal_errors + ) as server: if fi_test: # Most of the fault injection tests go here, they are then run on docker containers # so can be performed in parallel. @@ -6193,11 +6330,13 @@ def main(): print(f"Tests are: {','.join(sorted(tests))}") sys.exit(1) - wf = WarningsFactory('nlt-errors.json', - post_error=True, - check='Log file errors', - class_id=args.class_name, - junit=True) + wf = WarningsFactory( + 'nlt-errors.json', + post_error=True, + check='Log file errors', + class_id=args.class_name, + junit=True, + ) try: fatal_errors = run(wf, args) diff --git a/utils/run_utest.py b/utils/run_utest.py index 4b0d7f941e1..6d059274e95 100755 --- a/utils/run_utest.py +++ b/utils/run_utest.py @@ -8,6 +8,7 @@ """ import argparse import json + # pylint: disable=broad-except import os import re @@ -24,7 +25,7 @@ def check_version(): """Ensure python version is compatible""" if sys.version_info < (3, 6): - print("Python version 3.6 or greater is required""") + print("Python version 3.6 or greater is required" "") sys.exit(-1) @@ -50,8 +51,9 @@ def setup_junit(memcheck): return (suite, test) -class BaseResults(): +class BaseResults: """Keep track of test results""" + def __init__(self): """Initializes the values""" self.results = {"tests": 0, "errors": 0, "failures": 0, "fail_msg": "", "error_msg": ""} @@ -87,6 +89,7 @@ def add_error(self, error_str): class Results(BaseResults): """Keep track of test results to produce final report""" + def __init__(self, memcheck): """Class to keep track of results""" super().__init__() @@ -99,19 +102,23 @@ def create_junit(self): if os.environ.get("CMOCKA_XML_FILE", None) is None: return if self.results["failures"]: - self.test.add_failure_info(message=f"{self.results['failures']} of " - + f"{self.results['tests']} failed", - output=self.results["fail_msg"]) + self.test.add_failure_info( + message=f"{self.results['failures']} of " + f"{self.results['tests']} failed", + output=self.results["fail_msg"], + ) if self.results["errors"]: - self.test.add_error_info(message=f"{self.results['errors']} of " - + f"{self.results['tests']} failed", - output=self.results["error_msg"]) + self.test.add_error_info( + message=f"{self.results['errors']} of " + f"{self.results['tests']} failed", + output=self.results["error_msg"], + ) write_xml_result(self.name, self.suite) def print_results(self): """Print the output""" - print(f"Ran {self.results['tests']} tests, {self.results['failures']} tests failed, " - + f"{self.results['errors']} tests had errors") + print( + f"Ran {self.results['tests']} tests, {self.results['failures']} tests failed, " + + f"{self.results['errors']} tests had errors" + ) if self.results["failures"]: print("FAILURES:") print(self.results["fail_msg"]) @@ -120,8 +127,9 @@ def print_results(self): print(self.results["error_msg"]) -class ValgrindHelper(): +class ValgrindHelper: """Helper class to setup xml command""" + @staticmethod def get_xml_name(name): """Get the xml file name""" @@ -135,11 +143,19 @@ def get_supp(base): @staticmethod def setup_cmd(base, cmd, name): """Return a new command using valgrind""" - cmd_prefix = ["valgrind", "--leak-check=full", "--show-reachable=yes", "--num-callers=20", - "--error-limit=no", "--fair-sched=try", - f"--suppressions={ValgrindHelper.get_supp(base)}", - "--gen-suppressions=all", "--error-exitcode=42", "--xml=yes", - f"--xml-file={ValgrindHelper.get_xml_name(name)}"] + cmd_prefix = [ + "valgrind", + "--leak-check=full", + "--show-reachable=yes", + "--num-callers=20", + "--error-limit=no", + "--fair-sched=try", + f"--suppressions={ValgrindHelper.get_supp(base)}", + "--gen-suppressions=all", + "--error-exitcode=42", + "--xml=yes", + f"--xml-file={ValgrindHelper.get_xml_name(name)}", + ] return cmd_prefix + cmd @@ -149,8 +165,7 @@ def run_cmd(cmd, output_log=None, env=None): if output_log: with open(output_log, "w", encoding="UTF-8") as output: print(f"RUNNING COMMAND {' '.join(cmd)}\n Log: {output_log}") - ret = subprocess.run(cmd, check=False, env=env, stdout=output, - stderr=subprocess.STDOUT) + ret = subprocess.run(cmd, check=False, env=env, stdout=output, stderr=subprocess.STDOUT) else: print(f"RUNNING COMMAND {' '.join(cmd)}") ret = subprocess.run(cmd, check=False, env=env) @@ -204,8 +219,10 @@ def process_cmocka(fname, suite_name): else: match = re.search("^(.*case )name(.*$)", line) if match: - outfile.write(f"{match.group(1)}classname=\"UTEST_{suite_name}.{suite}\"" - + f" name{match.group(2)}\n") + outfile.write( + f"{match.group(1)}classname=\"UTEST_{suite_name}.{suite}\"" + + f" name{match.group(2)}\n" + ) continue match = re.search("^(.*case classname=\")(.*$)", line) if match: @@ -225,8 +242,9 @@ def for_each_file(path, operate, arg, ext=None): operate(full_path, arg) -class AIO(): +class AIO: """Handle AIO specific setup and teardown""" + def __init__(self, mount, device=None): """Initialize an AIO device""" self.config_name = os.path.join(mount, "daos_nvme.conf") @@ -307,8 +325,18 @@ def prepare_test(self, name="AIO_1", min_size=4): if self.device is None: run_cmd(["dd", "if=/dev/zero", f"of={self.fname}", "bs=1G", f"count={min_size}"]) else: - run_cmd(["sudo", "-E", "dd", "if=/dev/zero", f"of={self.fname}", "bs=4K", "count=1", - "conv=notrunc"]) + run_cmd( + [ + "sudo", + "-E", + "dd", + "if=/dev/zero", + f"of={self.fname}", + "bs=4K", + "count=1", + "conv=notrunc", + ] + ) self.create_config(name) def finalize_test(self): @@ -327,7 +355,7 @@ def finalize(self): os.unlink(self.config_name) -class Test(): +class Test: """Define a test""" test_num = 1 @@ -341,8 +369,7 @@ def __init__(self, config, path_info, args): if env_vars: self.env.update(env_vars) self.warn_if_missing = config.get("warn_if_missing", None) - self.aio = {"aio": config.get("aio", None), - "size": config.get("size", 4)} + self.aio = {"aio": config.get("aio", None), "size": config.get("size", 4)} if self.filter(args.test_filter): print(f"Filtered test {' '.join(self.cmd)}") raise TestSkipped() @@ -429,8 +456,12 @@ def run(self, base, memcheck, sudo): cmd = [os.path.join(base, self.cmd[0])] + self.cmd[1:] if memcheck: if os.path.splitext(cmd[0])[-1] in [".sh", ".py"]: - self.env.update({"USE_VALGRIND": "memcheck", - "VALGRIND_SUPP": ValgrindHelper.get_supp(self.root_dir())}) + self.env.update( + { + "USE_VALGRIND": "memcheck", + "VALGRIND_SUPP": ValgrindHelper.get_supp(self.root_dir()), + } + ) else: cmd = ValgrindHelper.setup_cmd(self.root_dir(), cmd, self.name) if sudo: @@ -469,8 +500,9 @@ def remove_empty_files(self, log_dir): print(f" produced {fname}") -class Suite(): +class Suite: """Define a suite""" + def __init__(self, path_info, config, args): """Initialize a test suite""" self.name = config["name"] @@ -561,8 +593,10 @@ def run_suite(self, args, aio): try: ret = test.run(self.base, args.memcheck, self.sudo) if ret != 0: - results.add_failure(f"{' '.join(test.get_last())} failed: ret={ret} " - + f"logs={test.log_dir()}") + results.add_failure( + f"{' '.join(test.get_last())} failed: ret={ret} " + + f"logs={test.log_dir()}" + ) except Exception: results.add_error(f"{traceback.format_exc()}") ret = 1 # prevent reporting errors on teardown too @@ -603,18 +637,25 @@ def get_args(): """Parse the arguments""" parser = argparse.ArgumentParser(description='Run DAOS unit tests') parser.add_argument('--memcheck', action='store_true', help='Run tests with Valgrind memcheck') - parser.add_argument('--test_filter', default=None, - help='Regular expression to select tests to run') - parser.add_argument('--suite_filter', default=None, - help='Regular expression to select suites to run') - parser.add_argument('--no-fail-on-error', action='store_true', - help='Disable non-zero return code on failure') - parser.add_argument('--sudo', choices=['yes', 'only', 'no'], default='yes', - help='How to handle tests requiring sudo') - parser.add_argument('--bdev', default=None, - help="Device to use for AIO, will create file by default") - parser.add_argument('--log_dir', default="/tmp/daos_utest", - help="Path to store test logs") + parser.add_argument( + '--test_filter', default=None, help='Regular expression to select tests to run' + ) + parser.add_argument( + '--suite_filter', default=None, help='Regular expression to select suites to run' + ) + parser.add_argument( + '--no-fail-on-error', action='store_true', help='Disable non-zero return code on failure' + ) + parser.add_argument( + '--sudo', + choices=['yes', 'only', 'no'], + default='yes', + help='How to handle tests requiring sudo', + ) + parser.add_argument( + '--bdev', default=None, help="Device to use for AIO, will create file by default" + ) + parser.add_argument('--log_dir', default="/tmp/daos_utest", help="Path to store test logs") return parser.parse_args() @@ -623,10 +664,12 @@ def get_path_info(args): script_dir = os.path.dirname(os.path.realpath(__file__)) daos_base = os.path.realpath(os.path.join(script_dir, '..')) build_vars_file = os.path.join(daos_base, '.build_vars.json') - path_info = {"DAOS_BASE": daos_base, - "UTEST_YAML": os.path.join(daos_base, "utils", "utest.yaml"), - "MOUNT_DIR": "/mnt/daos", - "LOG_DIR": args.log_dir} + path_info = { + "DAOS_BASE": daos_base, + "UTEST_YAML": os.path.join(daos_base, "utils", "utest.yaml"), + "MOUNT_DIR": "/mnt/daos", + "LOG_DIR": args.log_dir, + } try: with open(build_vars_file, "r", encoding="UTF-8") as vars_file: build_vars = json.load(vars_file) diff --git a/utils/sl/fake_scons/SCons/Action/__init__.py b/utils/sl/fake_scons/SCons/Action/__init__.py index 752d4fcbf60..4eb40d5f805 100644 --- a/utils/sl/fake_scons/SCons/Action/__init__.py +++ b/utils/sl/fake_scons/SCons/Action/__init__.py @@ -20,7 +20,7 @@ """Fake Action class""" -class Action(): +class Action: """Fake Action""" def __init__(self, *args, **kw): diff --git a/utils/sl/fake_scons/SCons/Builder/__init__.py b/utils/sl/fake_scons/SCons/Builder/__init__.py index 7af09adbff3..4b5e9417240 100644 --- a/utils/sl/fake_scons/SCons/Builder/__init__.py +++ b/utils/sl/fake_scons/SCons/Builder/__init__.py @@ -27,5 +27,4 @@ class Builder: ARGUMENTS = {} -__all__ = ['Builder', - 'ARGUMENTS'] +__all__ = ['Builder', 'ARGUMENTS'] diff --git a/utils/sl/fake_scons/SCons/Errors/__init__.py b/utils/sl/fake_scons/SCons/Errors/__init__.py index b2f2091852d..6826cc4c901 100644 --- a/utils/sl/fake_scons/SCons/Errors/__init__.py +++ b/utils/sl/fake_scons/SCons/Errors/__init__.py @@ -34,5 +34,4 @@ class UserError(Exception): ARGUMENTS = {} -__all__ = ['StopError', - 'ARGUMENTS'] +__all__ = ['StopError', 'ARGUMENTS'] diff --git a/utils/sl/fake_scons/SCons/Script/__init__.py b/utils/sl/fake_scons/SCons/Script/__init__.py index f490ba78cbe..eca2116ea25 100644 --- a/utils/sl/fake_scons/SCons/Script/__init__.py +++ b/utils/sl/fake_scons/SCons/Script/__init__.py @@ -28,14 +28,14 @@ # pylint: disable=unused-argument -class SConscript(): +class SConscript: """Fake SConscript""" def __init__(self, *_args, **_kw): """Init function""" -class DefaultEnvironment(): +class DefaultEnvironment: """Default environment""" def __init__(self, *_args, **_kwargs): @@ -196,7 +196,7 @@ def Alias(self, *_args, **_kw): def __getitem__(self, x): """Fake __getitem__""" - class myItem(): + class myItem: """Fake class for Env variables""" def __index__(self): @@ -339,7 +339,7 @@ def require(self, env, *kw, headers_only=False): return -class Variables(): +class Variables: """Fake variables""" def __init__(self, *_args, **_kw): @@ -361,7 +361,7 @@ def Save(self, *_args, **_kw): """Fake Save""" -class Configure(): +class Configure: """Fake Configure""" def __init__(self, *_args, **_kw): @@ -412,14 +412,14 @@ def Finish(self): """Fake finish""" -class Literal(): +class Literal: """Fake Literal""" def __init__(self, *_args, **_kw): """Constructor""" -class Dir(): +class Dir: """Fake Dir""" def __init__(self, *_args, **_kw): @@ -431,11 +431,11 @@ def srcnode(self): return self -class Scanner(): +class Scanner: """Fake Scanner""" -class File(): +class File: """Fake File""" @@ -458,7 +458,7 @@ def SetOption(*_args, **_kw): return True -class Help(): +class Help: """Fake Help""" def __init__(self, *_args, **_kw): @@ -541,29 +541,31 @@ def Depends(*_args, **_kw): Environment = DefaultEnvironment -__all__ = ['DefaultEnvironment', - 'Variables', - 'Configure', - 'GetOption', - 'SetOption', - 'Execute', - 'Depends', - 'Platform', - 'Literal', - 'Dir', - 'Help', - 'Glob', - 'Split', - 'Exit', - 'Import', - 'Export', - 'SConscript', - 'Default', - 'AlwaysBuild', - 'Command', - 'Builder', - 'AddOption', - 'VariantDir', - 'COMMAND_LINE_TARGETS', - 'BUILD_TARGETS', - 'DEFAULT_TARGETS'] +__all__ = [ + 'DefaultEnvironment', + 'Variables', + 'Configure', + 'GetOption', + 'SetOption', + 'Execute', + 'Depends', + 'Platform', + 'Literal', + 'Dir', + 'Help', + 'Glob', + 'Split', + 'Exit', + 'Import', + 'Export', + 'SConscript', + 'Default', + 'AlwaysBuild', + 'Command', + 'Builder', + 'AddOption', + 'VariantDir', + 'COMMAND_LINE_TARGETS', + 'BUILD_TARGETS', + 'DEFAULT_TARGETS', +] diff --git a/utils/sl/fake_scons/SCons/Subst/__init__.py b/utils/sl/fake_scons/SCons/Subst/__init__.py index 8b96484345b..de6bee1f282 100644 --- a/utils/sl/fake_scons/SCons/Subst/__init__.py +++ b/utils/sl/fake_scons/SCons/Subst/__init__.py @@ -20,7 +20,7 @@ """Fake scons environment shutting up pylint on SCons files""" -class Literal(): +class Literal: """Fake Literal""" def __init__(self, lstr): diff --git a/utils/sl/fake_scons/SCons/Variables/__init__.py b/utils/sl/fake_scons/SCons/Variables/__init__.py index cd229c87f68..23d6d8a6679 100644 --- a/utils/sl/fake_scons/SCons/Variables/__init__.py +++ b/utils/sl/fake_scons/SCons/Variables/__init__.py @@ -20,8 +20,9 @@ """Fake Variables class""" -class PathVariable(): +class PathVariable: """Fake PathVariable""" + PathIsDirCreate = 1 PathIsDir = 2 PathAccept = 3 @@ -30,21 +31,21 @@ def __init__(self, *args, **kw): pass -class ListVariable(): +class ListVariable: """Fake ListVariable""" def __init__(self, *args, **kw): pass -class BoolVariable(): +class BoolVariable: """Fake BoolVariable""" def __init__(self, *args, **kw): pass -class EnumVariable(): +class EnumVariable: """Fake EnumVariable""" def __init__(self, *args, **kw): @@ -53,8 +54,4 @@ def __init__(self, *args, **kw): ARGUMENTS = {} -__all__ = ['PathVariable', - 'ListVariable', - 'BoolVariable', - 'EnumVariable', - 'ARGUMENTS'] +__all__ = ['PathVariable', 'ListVariable', 'BoolVariable', 'EnumVariable', 'ARGUMENTS'] diff --git a/utils/sl/fake_scons/SCons/Warnings/__init__.py b/utils/sl/fake_scons/SCons/Warnings/__init__.py index 67d7158341d..6192650469b 100644 --- a/utils/sl/fake_scons/SCons/Warnings/__init__.py +++ b/utils/sl/fake_scons/SCons/Warnings/__init__.py @@ -34,5 +34,5 @@ class MissingSConscriptWarning(Exception): # pylint: disable-next=redefined-builtin,too-few-public-methods -class Warning(): +class Warning: """Fake Warning""" diff --git a/utils/sl/fake_scons/SCons/__init__.py b/utils/sl/fake_scons/SCons/__init__.py index a1b958e9f1c..58c1daaa79d 100644 --- a/utils/sl/fake_scons/SCons/__init__.py +++ b/utils/sl/fake_scons/SCons/__init__.py @@ -19,10 +19,4 @@ # SOFTWARE. """Fake SCons""" -__all__ = ['Script', - 'Action', - 'Variables', - 'Builder', - 'Errors', - 'Subst', - 'Warnings'] +__all__ = ['Script', 'Action', 'Variables', 'Builder', 'Errors', 'Subst', 'Warnings'] From 2a4a5dc96142232ce38fae3a8c2f1eae8eb7660c Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:22:25 +0000 Subject: [PATCH 04/26] Change more files. Required-githooks: true Signed-off-by: Ashley Pittman --- SConstruct | 288 +++++++++------ site_scons/components/__init__.py | 360 +++++++++++-------- site_scons/env_modules.py | 28 +- site_scons/prereq_tools/base.py | 264 +++++++++----- site_scons/site_tools/compiler_setup.py | 43 ++- site_scons/site_tools/daos_builder.py | 5 +- site_scons/site_tools/doneapi.py | 30 +- site_scons/site_tools/go_builder.py | 21 +- site_scons/site_tools/protoc/__init__.py | 26 +- site_scons/site_tools/stack_analyzer.py | 52 ++- src/tests/ftest/launch.py | 2 + src/tests/ftest/util/agent_utils.py | 8 +- src/tests/ftest/util/apricot/apricot/test.py | 15 +- src/tests/ftest/util/command_utils.py | 24 +- src/tests/ftest/util/data_mover_test_base.py | 12 +- src/tests/ftest/util/dmg_utils.py | 1 + src/tests/ftest/util/job_manager_utils.py | 10 +- src/tests/ftest/util/server_utils.py | 10 +- src/tests/ftest/util/soak_test_base.py | 33 +- src/tests/ftest/util/soak_utils.py | 13 +- src/tests/ftest/util/test_utils_pool.py | 1 + 21 files changed, 780 insertions(+), 466 deletions(-) diff --git a/SConstruct b/SConstruct index 9dbcfaf2255..9d8c0d9833e 100644 --- a/SConstruct +++ b/SConstruct @@ -9,11 +9,13 @@ import SCons.Warnings from prereq_tools import PreReqComponent # pylint: disable=reimported if sys.version_info.major < 3: - print(""""Python 2.7 is no longer supported in the DAOS build. + print( + """"Python 2.7 is no longer supported in the DAOS build. Install python3 version of SCons. On some platforms this package does not install the scons binary so your command may need to use scons-3 instead of scons or you will need to create an alias or script by the same name to -wrap scons-3.""") +wrap scons-3.""" + ) Exit(1) SCons.Warnings.warningAsException() @@ -22,63 +24,77 @@ SCons.Warnings.warningAsException() def add_command_line_options(): """Add command line options""" - AddOption('--preprocess', - dest='preprocess', - action='store_true', - default=False, - help='Preprocess selected files for profiling') - AddOption('--no-rpath', - dest='no_rpath', - action='store_true', - default=False, - help='Disable rpath') - AddOption('--analyze-stack', - dest='analyze_stack', - metavar='ARGSTRING', - default=None, - help='Gather stack usage statistics after build') + AddOption( + '--preprocess', + dest='preprocess', + action='store_true', + default=False, + help='Preprocess selected files for profiling', + ) + AddOption( + '--no-rpath', dest='no_rpath', action='store_true', default=False, help='Disable rpath' + ) + AddOption( + '--analyze-stack', + dest='analyze_stack', + metavar='ARGSTRING', + default=None, + help='Gather stack usage statistics after build', + ) # We need to sometimes use alternate tools for building and need to add them to the PATH in the # environment. - AddOption('--prepend-path', - dest='prepend_path', - default=None, - help="String to prepend to PATH environment variable.") + AddOption( + '--prepend-path', + dest='prepend_path', + default=None, + help="String to prepend to PATH environment variable.", + ) # Allow specifying the locale to be used. Default "en_US.UTF8" - AddOption('--locale-name', - dest='locale_name', - default='en_US.UTF8', - help='locale to use for building. [%default]') - - AddOption('--require-optional', - dest='require_optional', - action='store_true', - default=False, - help='Fail the build if check_component fails') - - AddOption('--build-deps', - dest='build_deps', - type='choice', - choices=['yes', 'no', 'only', 'build-only'], - default='no', - help="Automatically download and build sources. (yes|no|only|build-only) [no]") + AddOption( + '--locale-name', + dest='locale_name', + default='en_US.UTF8', + help='locale to use for building. [%default]', + ) + + AddOption( + '--require-optional', + dest='require_optional', + action='store_true', + default=False, + help='Fail the build if check_component fails', + ) + + AddOption( + '--build-deps', + dest='build_deps', + type='choice', + choices=['yes', 'no', 'only', 'build-only'], + default='no', + help="Automatically download and build sources. (yes|no|only|build-only) [no]", + ) # We want to be able to check what dependencies are needed without # doing a build, similar to --dry-run. We can not use --dry-run # on the command line because it disables running the tests for the # the dependencies. So we need a new option - AddOption('--check-only', - dest='check_only', - action='store_true', - default=False, - help="Check dependencies only, do not download or build.") + AddOption( + '--check-only', + dest='check_only', + action='store_true', + default=False, + help="Check dependencies only, do not download or build.", + ) # Need to be able to look for an alternate build.config file. - AddOption('--build-config', - dest='build_config', - default=os.path.join(Dir('#').abspath, 'utils', 'build.config'), - help='build config file to use. [%default]') + AddOption( + '--build-config', + dest='build_config', + default=os.path.join(Dir('#').abspath, 'utils', 'build.config'), + help='build config file to use. [%default]', + ) def parse_and_save_conf(env, opts_file): @@ -89,14 +105,26 @@ def parse_and_save_conf(env, opts_file): opts = Variables(opts_file) - opts.Add(EnumVariable('SCONS_ENV', "Default SCons environment inheritance", - 'minimal', ['minimal', 'full'], ignorecase=2)) + opts.Add( + EnumVariable( + 'SCONS_ENV', + "Default SCons environment inheritance", + 'minimal', + ['minimal', 'full'], + ignorecase=2, + ) + ) opts.Add('GO_BIN', 'Full path to go binary', None) - opts.Add(PathVariable('ENV_SCRIPT', "Location of environment script", - os.path.expanduser('~/.scons_localrc'), - PathVariable.PathAccept)) + opts.Add( + PathVariable( + 'ENV_SCRIPT', + "Location of environment script", + os.path.expanduser('~/.scons_localrc'), + PathVariable.PathAccept, + ) + ) # Finally parse the command line options and save to file if required. opts.Update(env) @@ -118,7 +146,7 @@ def build_misc(build_prefix): def update_rpm_version(version, tag): - """ Update the version (and release) in the RPM spec file """ + """Update the version (and release) in the RPM spec file""" # pylint: disable=consider-using-f-string spec = open("utils/rpms/daos.spec", "r").readlines() # pylint: disable=consider-using-with @@ -126,22 +154,22 @@ def update_rpm_version(version, tag): release = 0 for line_num, line in enumerate(spec): if line.startswith("Version:"): - current_version = line[line.rfind(' ') + 1:].rstrip() + current_version = line[line.rfind(' ') + 1 :].rstrip() if version < current_version: - print("You cannot create a new version ({}) lower than the RPM " - "spec file has currently ({})".format(version, - current_version)) + print( + "You cannot create a new version ({}) lower than the RPM " + "spec file has currently ({})".format(version, current_version) + ) return False if version > current_version: spec[line_num] = "Version: {}\n".format(version) if line.startswith("Release:"): if version == current_version: - current_release = int(line[line.rfind(' ') + 1:line.find('%')]) + current_release = int(line[line.rfind(' ') + 1 : line.find('%')]) release = current_release + 1 else: release = 1 - spec[line_num] = "Release: {}%{{?relval}}%{{?dist}}\n".\ - format(release) + spec[line_num] = "Release: {}%{{?relval}}%{{?dist}}\n".format(release) if line == "%changelog\n": cmd = 'rpmdev-packager' try: @@ -149,22 +177,21 @@ def update_rpm_version(version, tag): pkg_st = subprocess.Popen(cmd, stdout=subprocess.PIPE) # nosec packager = pkg_st.communicate()[0].strip().decode('UTF-8') except OSError: - print("You need to have the rpmdev-packager tool (from the " - "rpmdevtools RPM on EL7) in order to make releases.\n\n" - "Additionally, you should define %packager in " - "~/.rpmmacros as such:\n" - "%packager John A. Doe " - "so that package changelog entries are well defined") + print( + "You need to have the rpmdev-packager tool (from the " + "rpmdevtools RPM on EL7) in order to make releases.\n\n" + "Additionally, you should define %packager in " + "~/.rpmmacros as such:\n" + "%packager John A. Doe " + "so that package changelog entries are well defined" + ) return False date_str = time.strftime('%a %b %d %Y', time.gmtime()) spec.insert(line_num + 1, "\n") - spec.insert(line_num + 1, - "- Version bump up to {}\n".format(tag)) - spec.insert(line_num + 1, - '* {} {} - {}-{}\n'.format(date_str, - packager, - version, - release)) + spec.insert(line_num + 1, "- Version bump up to {}\n".format(tag)) + spec.insert( + line_num + 1, '* {} {} - {}-{}\n'.format(date_str, packager, version, release) + ) break open("utils/rpms/daos.spec", "w").writelines(spec) # pylint: disable=consider-using-with @@ -187,10 +214,8 @@ def check_for_release_target(): # pylint: disable=too-many-locals variables = Variables() variables.Add('RELEASE', 'Set to the release version to make', None) - variables.Add('RELEASE_BASE', 'Set to the release version to make', - 'master') - variables.Add('ORG_NAME', 'The GitHub project to do the release on.', - 'daos-stack') + variables.Add('RELEASE_BASE', 'Set to the release version to make', 'master') + variables.Add('ORG_NAME', 'The GitHub project to do the release on.', 'daos-stack') variables.Add('REMOTE_NAME', 'The remoten name release on.', 'origin') env = Environment(variables=variables) @@ -209,8 +234,10 @@ def check_for_release_target(): # pylint: disable=too-many-locals if dash > 0: version = tag[0:dash] else: - print("** Final releases should be made on GitHub directly " - "using a previous pre-release such as a release candidate.\n") + print( + "** Final releases should be made on GitHub directly " + "using a previous pre-release such as a release candidate.\n" + ) question = "Are you sure you want to continue? (y/N): " answer = None while answer not in ["y", "n", ""]: @@ -222,14 +249,16 @@ def check_for_release_target(): # pylint: disable=too-many-locals try: # pylint: disable=consider-using-with - token = yaml.safe_load(open(os.path.join(os.path.expanduser("~"), - ".config", "hub"), 'r') - )['github.com'][0]['oauth_token'] + token = yaml.safe_load( + open(os.path.join(os.path.expanduser("~"), ".config", "hub"), 'r') + )['github.com'][0]['oauth_token'] except IOError as excpn: if excpn.errno == errno.ENOENT: - print("You need to install hub (from the hub RPM on EL7) to " - "and run it at least once to create an authorization " - "token in order to create releases") + print( + "You need to install hub (from the hub RPM on EL7) to " + "and run it at least once to create an authorization " + "token in order to create releases" + ) Exit(1) raise @@ -238,12 +267,12 @@ def check_for_release_target(): # pylint: disable=too-many-locals print("Creating a branch for the PR...") repo = pygit2.Repository('.git') try: - base_ref = repo.lookup_reference( - 'refs/remotes/{}/{}'.format(remote_name, base_branch)) + base_ref = repo.lookup_reference('refs/remotes/{}/{}'.format(remote_name, base_branch)) except KeyError: - print("Branch {}/{} is not a valid branch\n" - "See https://github.com/{}/daos/branches".format( - remote_name, base_branch, org_name)) + print( + "Branch {}/{} is not a valid branch\n" + "See https://github.com/{}/daos/branches".format(remote_name, base_branch, org_name) + ) Exit(1) # older pygit2 didn't have AlreadyExistsError @@ -255,9 +284,10 @@ def check_for_release_target(): # pylint: disable=too-many-locals try: repo.branches.create(branch, repo[base_ref.target]) except already_exists_error_exception: - print("Branch {} exists locally already.\n" - "You need to delete it or rename it to try again.".format( - branch)) + print( + "Branch {} exists locally already.\n" + "You need to delete it or rename it to try again.".format(branch) + ) Exit(1) # and check it out @@ -266,8 +296,10 @@ def check_for_release_target(): # pylint: disable=too-many-locals print("Updating the RPM specfile...") if not update_rpm_version(version, tag): - print("Branch has been left in the created state. You will have " - "to clean it up manually.") + print( + "Branch has been left in the created state. You will have " + "to clean it up manually." + ) Exit(1) print("Updating the VERSION and TAG files...") @@ -283,21 +315,20 @@ def check_for_release_target(): # pylint: disable=too-many-locals author = repo.default_signature committer = repo.default_signature summary = "Update version to v{}".format(tag) - message = "{}\n\n" \ - "Signed-off-by: {} <{}>".format(summary, - repo.default_signature.name, - repo.default_signature.email) + message = "{}\n\n" "Signed-off-by: {} <{}>".format( + summary, repo.default_signature.name, repo.default_signature.email + ) index.add("utils/rpms/daos.spec") index.add("VERSION") index.add("TAG") index.write() tree = index.write_tree() - repo.create_commit('HEAD', author, committer, message, tree, - [repo.head.target]) + repo.create_commit('HEAD', author, committer, message, tree, [repo.head.target]) # set up authentication callback class MyCallbacks(pygit2.RemoteCallbacks): # pylint: disable=too-few-public-methods - """ Callbacks for pygit2 """ + """Callbacks for pygit2""" + @staticmethod def credentials(_url, username_from_url, allowed_types): """setup credentials""" @@ -306,8 +337,10 @@ def check_for_release_target(): # pylint: disable=too-many-locals # Use ssh agent for authentication return pygit2.KeypairFromAgent(username_from_url) else: - print("No supported credential types allowed by remote end. SSH_AUTH_SOCK not " - "found in your environment. Are you running an ssh-agent?") + print( + "No supported credential types allowed by remote end. SSH_AUTH_SOCK not " + "found in your environment. Are you running an ssh-agent?" + ) Exit(1) return None @@ -315,8 +348,7 @@ def check_for_release_target(): # pylint: disable=too-many-locals print("Pushing the changes to GitHub...") remote = repo.remotes[remote_name] try: - remote.push(['refs/heads/{}'.format(branch)], - callbacks=MyCallbacks()) + remote.push(['refs/heads/{}'.format(branch)], callbacks=MyCallbacks()) except pygit2.GitError as err: print("Error pushing branch: {}".format(err)) Exit(1) @@ -330,18 +362,19 @@ def check_for_release_target(): # pylint: disable=too-many-locals except github.UnknownObjectException: # maybe not an organization repo = gh_context.get_repo('{}/daos'.format(org_name)) - new_pr = repo.create_pull(title=summary, body="", base=base_branch, - head="{}:{}".format(org_name, branch)) + new_pr = repo.create_pull( + title=summary, body="", base=base_branch, head="{}:{}".format(org_name, branch) + ) - print("Successfully created PR#{0} for this version " - "update:\n" - "https://github.com/{1}/daos/pull/{0}/".format(new_pr.number, - org_name)) + print( + "Successfully created PR#{0} for this version " + "update:\n" + "https://github.com/{1}/daos/pull/{0}/".format(new_pr.number, org_name) + ) print("Self-assigning the PR...") # self-assign the PR - new_pr.as_issue().add_to_assignees( - gh_context.get_user(gh_context.get_user().login)) + new_pr.as_issue().add_to_assignees(gh_context.get_user(gh_context.get_user().login)) print("Done.") @@ -358,12 +391,30 @@ def load_local(env_script, env): # Environment variables that are kept when SCONS_ENV=minimal (the default). -MINIMAL_ENV = ('HOME', 'TERM', 'SSH_AUTH_SOCK', 'http_proxy', 'https_proxy', 'PKG_CONFIG_PATH', - 'MODULEPATH', 'MODULESHOME', 'MODULESLOADED', 'I_MPI_ROOT', 'COVFILE') +MINIMAL_ENV = ( + 'HOME', + 'TERM', + 'SSH_AUTH_SOCK', + 'http_proxy', + 'https_proxy', + 'PKG_CONFIG_PATH', + 'MODULEPATH', + 'MODULESHOME', + 'MODULESLOADED', + 'I_MPI_ROOT', + 'COVFILE', +) # Environment variables that are also kept when LD_PRELOAD is set. -PRELOAD_ENV = ('LD_PRELOAD', 'D_LOG_FILE', 'DAOS_AGENT_DRPC_DIR', 'D_LOG_MASK', 'DD_MASK', - 'DD_SUBSYS', 'D_IL_MAX_EQ') +PRELOAD_ENV = ( + 'LD_PRELOAD', + 'D_LOG_FILE', + 'DAOS_AGENT_DRPC_DIR', + 'D_LOG_MASK', + 'DD_MASK', + 'DD_SUBSYS', + 'D_IL_MAX_EQ', +) def scons(): @@ -424,7 +475,6 @@ def scons(): # Perform this check early before loading PreReqs as if this header is missing then we want # to exit before building any dependencies. if not GetOption('help'): - config = deps_env.Configure() if not config.CheckHeader('stdatomic.h'): Exit('stdatomic.h is required to compile DAOS, update your compiler or distro version') diff --git a/site_scons/components/__init__.py b/site_scons/components/__init__.py index ede55290b73..beb6e49a966 100644 --- a/site_scons/components/__init__.py +++ b/site_scons/components/__init__.py @@ -34,7 +34,7 @@ ARM_PLATFORM = True -class InstalledComps(): +class InstalledComps: """Checks for installed components and keeps track of prior checks""" installed = [] @@ -113,92 +113,113 @@ def define_mercury(reqs): # pylint: disable-next=wrong-spelling-in-comment,fixme # TODO: change to --enable-opx once upgraded to libfabric 1.17+ - ofi_build = ['./configure', - '--prefix=$OFI_PREFIX', - '--disable-efa', - '--disable-psm2', - '--disable-psm3', - '--disable-opx', - '--without-gdrcopy'] + ofi_build = [ + './configure', + '--prefix=$OFI_PREFIX', + '--disable-efa', + '--disable-psm2', + '--disable-psm3', + '--disable-opx', + '--without-gdrcopy', + ] if reqs.target_type == 'debug': ofi_build.append('--enable-debug') else: ofi_build.append('--disable-debug') - reqs.define('ofi', - retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), - commands=[['./autogen.sh'], - ofi_build, - ['make'], - ['make', 'install']], - libs=['fabric'], - config_cb=ofi_config, - headers=['rdma/fabric.h'], - pkgconfig='libfabric', - package='libfabric-devel' if inst(reqs, 'ofi') else None, - patch_rpath=['lib'], - build_env={'CFLAGS': "-fstack-usage"}) - - ucx_configure = ['./configure', '--disable-assertions', '--disable-params-check', '--enable-mt', - '--without-go', '--without-java', '--prefix=$UCX_PREFIX', - '--libdir=$UCX_PREFIX/lib64', '--enable-cma', '--without-cuda', - '--without-gdrcopy', '--with-verbs', '--without-knem', '--without-rocm', - '--without-xpmem', '--without-fuse3', '--without-ugni'] + reqs.define( + 'ofi', + retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), + commands=[['./autogen.sh'], ofi_build, ['make'], ['make', 'install']], + libs=['fabric'], + config_cb=ofi_config, + headers=['rdma/fabric.h'], + pkgconfig='libfabric', + package='libfabric-devel' if inst(reqs, 'ofi') else None, + patch_rpath=['lib'], + build_env={'CFLAGS': "-fstack-usage"}, + ) + + ucx_configure = [ + './configure', + '--disable-assertions', + '--disable-params-check', + '--enable-mt', + '--without-go', + '--without-java', + '--prefix=$UCX_PREFIX', + '--libdir=$UCX_PREFIX/lib64', + '--enable-cma', + '--without-cuda', + '--without-gdrcopy', + '--with-verbs', + '--without-knem', + '--without-rocm', + '--without-xpmem', + '--without-fuse3', + '--without-ugni', + ] if reqs.target_type == 'debug': ucx_configure.extend(['--enable-debug']) else: ucx_configure.extend(['--disable-debug', '--disable-logging']) - reqs.define('ucx', - retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), - libs=['ucs', 'ucp', 'uct'], - functions={'ucs': ['ucs_debug_disable_signal']}, - headers=['uct/api/uct.h'], - pkgconfig='ucx', - commands=[['./autogen.sh'], - ucx_configure, - ['make'], - ['make', 'install'], - ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], - ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig']], - build_env={'CFLAGS': '-Wno-error'}, - package='ucx-devel' if inst(reqs, 'ucx') else None) - - mercury_build = ['cmake', - '-DBUILD_SHARED_LIBS:BOOL=ON', - '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', - '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', - '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', - '-DBUILD_DOCUMENTATION:BOOL=OFF', - '-DBUILD_EXAMPLES:BOOL=OFF', - '-DBUILD_TESTING:BOOL=ON', - '-DBUILD_TESTING_PERF:BOOL=ON', - '-DBUILD_TESTING_UNIT:BOOL=OFF', - '-DMERCURY_USE_BOOST_PP:BOOL=ON', - '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', - '-DNA_USE_SM:BOOL=ON', - '-DNA_USE_OFI:BOOL=ON', - '-DNA_USE_UCX:BOOL=ON', - '../mercury'] + reqs.define( + 'ucx', + retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), + libs=['ucs', 'ucp', 'uct'], + functions={'ucs': ['ucs_debug_disable_signal']}, + headers=['uct/api/uct.h'], + pkgconfig='ucx', + commands=[ + ['./autogen.sh'], + ucx_configure, + ['make'], + ['make', 'install'], + ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], + ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig'], + ], + build_env={'CFLAGS': '-Wno-error'}, + package='ucx-devel' if inst(reqs, 'ucx') else None, + ) + + mercury_build = [ + 'cmake', + '-DBUILD_SHARED_LIBS:BOOL=ON', + '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', + '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', + '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', + '-DBUILD_DOCUMENTATION:BOOL=OFF', + '-DBUILD_EXAMPLES:BOOL=OFF', + '-DBUILD_TESTING:BOOL=ON', + '-DBUILD_TESTING_PERF:BOOL=ON', + '-DBUILD_TESTING_UNIT:BOOL=OFF', + '-DMERCURY_USE_BOOST_PP:BOOL=ON', + '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', + '-DNA_USE_SM:BOOL=ON', + '-DNA_USE_OFI:BOOL=ON', + '-DNA_USE_UCX:BOOL=ON', + '../mercury', + ] if reqs.target_type == 'debug': mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=ON') else: mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=OFF') - reqs.define('mercury', - retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), - commands=[mercury_build, - ['make'], - ['make', 'install']], - libs=['mercury'], - pkgconfig='mercury', - requires=['boost', 'ofi', 'ucx'] + libs, - out_of_src_build=True, - package='mercury-devel' if inst(reqs, 'mercury') else None, - build_env={'CFLAGS': '-fstack-usage'}) + reqs.define( + 'mercury', + retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), + commands=[mercury_build, ['make'], ['make', 'install']], + libs=['mercury'], + pkgconfig='mercury', + requires=['boost', 'ofi', 'ucx'] + libs, + out_of_src_build=True, + package='mercury-devel' if inst(reqs, 'mercury') else None, + build_env={'CFLAGS': '-fstack-usage'}, + ) def define_common(reqs): @@ -245,39 +266,48 @@ def define_components(reqs): define_mercury(reqs) define_ompi(reqs) - reqs.define('isal', - retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), - commands=[['./autogen.sh'], - ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], - ['make'], - ['make', 'install']], - libs=['isal']) - reqs.define('isal_crypto', - retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), - commands=[['./autogen.sh'], - ['./configure', - '--prefix=$ISAL_CRYPTO_PREFIX', - '--libdir=$ISAL_CRYPTO_PREFIX/lib'], - ['make'], - ['make', 'install']], - libs=['isal_crypto']) - - reqs.define('pmdk', - retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), - commands=[['make', - 'all', - 'BUILD_RPMEM=n', - 'NDCTL_ENABLE=n', - 'NDCTL_DISABLE=y', - 'DOC=n', - 'EXTRA_CFLAGS="-Wno-error"', - 'install', - 'prefix=$PMDK_PREFIX']], - libs=['pmemobj']) - abt_build = ['./configure', - '--prefix=$ARGOBOTS_PREFIX', - 'CC=gcc', - '--enable-stack-unwind'] + reqs.define( + 'isal', + retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], + ['make'], + ['make', 'install'], + ], + libs=['isal'], + ) + reqs.define( + 'isal_crypto', + retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$ISAL_CRYPTO_PREFIX', '--libdir=$ISAL_CRYPTO_PREFIX/lib'], + ['make'], + ['make', 'install'], + ], + libs=['isal_crypto'], + ) + + reqs.define( + 'pmdk', + retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), + commands=[ + [ + 'make', + 'all', + 'BUILD_RPMEM=n', + 'NDCTL_ENABLE=n', + 'NDCTL_DISABLE=y', + 'DOC=n', + 'EXTRA_CFLAGS="-Wno-error"', + 'install', + 'prefix=$PMDK_PREFIX', + ] + ], + libs=['pmemobj'], + ) + abt_build = ['./configure', '--prefix=$ARGOBOTS_PREFIX', 'CC=gcc', '--enable-stack-unwind'] if reqs.target_type == 'debug': abt_build.append('--enable-debug=most') @@ -287,19 +317,28 @@ def define_components(reqs): if inst(reqs, 'valgrind_devel'): abt_build.append('--enable-valgrind') - reqs.define('argobots', - retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), - commands=[['git', 'clean', '-dxf'], - ['./autogen.sh'], - abt_build, - ['make'], - ['make', 'install']], - requires=['libunwind'], - libs=['abt'], - headers=['abt.h']) - - reqs.define('fuse', libs=['fuse3'], defines=['FUSE_USE_VERSION=35'], - headers=['fuse3/fuse.h'], package='fuse3-devel') + reqs.define( + 'argobots', + retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), + commands=[ + ['git', 'clean', '-dxf'], + ['./autogen.sh'], + abt_build, + ['make'], + ['make', 'install'], + ], + requires=['libunwind'], + libs=['abt'], + headers=['abt.h'], + ) + + reqs.define( + 'fuse', + libs=['fuse3'], + defines=['FUSE_USE_VERSION=35'], + headers=['fuse3/fuse.h'], + package='fuse3-devel', + ) # Tell SPDK which CPU to optimize for, by default this is native which works well unless you # are relocating binaries across systems, for example in CI under GitHub actions etc. There @@ -320,44 +359,54 @@ def define_components(reqs): else: spdk_arch = 'haswell' - reqs.define('spdk', - retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), - commands=[['./configure', - '--prefix=$SPDK_PREFIX', - '--disable-tests', - '--disable-unit-tests', - '--disable-apps', - '--without-vhost', - '--without-crypto', - '--without-pmdk', - '--without-rbd', - '--without-iscsi-initiator', - '--without-isal', - '--without-vtune', - '--with-shared', - f'--target-arch={spdk_arch}'], - ['make', f'CONFIG_ARCH={spdk_arch}'], - ['make', 'install'], - ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], - ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], - ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], - ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], - ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], - ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], - ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], - ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf']], - headers=['spdk/nvme.h'], - patch_rpath=['lib', 'bin']) - - reqs.define('protobufc', - retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), - commands=[['./autogen.sh'], - ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], - ['make'], - ['make', 'install']], - libs=['protobuf-c'], - headers=['protobuf-c/protobuf-c.h'], - package='protobuf-c-devel') + reqs.define( + 'spdk', + retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), + commands=[ + [ + './configure', + '--prefix=$SPDK_PREFIX', + '--disable-tests', + '--disable-unit-tests', + '--disable-apps', + '--without-vhost', + '--without-crypto', + '--without-pmdk', + '--without-rbd', + '--without-iscsi-initiator', + '--without-isal', + '--without-vtune', + '--with-shared', + f'--target-arch={spdk_arch}', + ], + ['make', f'CONFIG_ARCH={spdk_arch}'], + ['make', 'install'], + ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], + ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], + ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], + ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], + ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], + ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], + ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], + ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf'], + ], + headers=['spdk/nvme.h'], + patch_rpath=['lib', 'bin'], + ) + + reqs.define( + 'protobufc', + retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], + ['make'], + ['make', 'install'], + ], + libs=['protobuf-c'], + headers=['protobuf-c/protobuf-c.h'], + package='protobuf-c-devel', + ) os_name = dist[0].split()[0] if os_name == 'Ubuntu': @@ -366,8 +415,9 @@ def define_components(reqs): capstone_pkg = 'libcapstone-devel' else: capstone_pkg = 'capstone-devel' - reqs.define('capstone', libs=['capstone'], headers=['capstone/capstone.h'], - package=capstone_pkg) + reqs.define( + 'capstone', libs=['capstone'], headers=['capstone/capstone.h'], package=capstone_pkg + ) __all__ = ['define_components'] diff --git a/site_scons/env_modules.py b/site_scons/env_modules.py index b7a20bf769b..6e4b01bcc7c 100644 --- a/site_scons/env_modules.py +++ b/site_scons/env_modules.py @@ -29,22 +29,32 @@ import distro -class _env_module(): # pylint: disable=invalid-name +class _env_module: # pylint: disable=invalid-name """Class for utilizing Modules component to load environment modules""" env_module_init = None - _mpi_map = {"mpich": ['mpi/mpich-x86_64', 'gnu-mpich'], - "openmpi": ['mpi/mlnx_openmpi-x86_64', 'mpi/openmpi3-x86_64', - 'gnu-openmpi', 'mpi/openmpi-x86_64']} + _mpi_map = { + "mpich": ['mpi/mpich-x86_64', 'gnu-mpich'], + "openmpi": [ + 'mpi/mlnx_openmpi-x86_64', + 'mpi/openmpi3-x86_64', + 'gnu-openmpi', + 'mpi/openmpi-x86_64', + ], + } def __init__(self, silent=False): """Load Modules for initializing environment variables""" # Leap 15's lmod-lua doesn't include the usual module path # in it's MODULEPATH, for some unknown reason - os.environ["MODULEPATH"] = ":".join([os.path.join(os.sep, "usr", "share", "modules"), - os.path.join(os.sep, "usr", "share", "modulefiles"), - os.path.join(os.sep, "etc", "modulefiles")] - + os.environ.get("MODULEPATH", "").split(":")) + os.environ["MODULEPATH"] = ":".join( + [ + os.path.join(os.sep, "usr", "share", "modules"), + os.path.join(os.sep, "usr", "share", "modulefiles"), + os.path.join(os.sep, "etc", "modulefiles"), + ] + + os.environ.get("MODULEPATH", "").split(":") + ) self._silent = silent self._module_load = self._init_mpi_module() @@ -199,7 +209,7 @@ def load_mpi(mpi, silent=False): raise for line in proc.stdout.readlines(): if line.startswith(b"Value:"): - if line[line.rfind(b".") + 1:-1].decode() == mpi: + if line[line.rfind(b".") + 1 : -1].decode() == mpi: return True return False return False diff --git a/site_scons/prereq_tools/base.py b/site_scons/prereq_tools/base.py index 6cdff7d12a0..733d700db90 100644 --- a/site_scons/prereq_tools/base.py +++ b/site_scons/prereq_tools/base.py @@ -173,7 +173,7 @@ def __str__(self): return f'{self.component} needs to be built, use --build-deps=yes' -class Runner(): +class Runner: """Runs commands in a specified environment""" def __init__(self): @@ -226,8 +226,11 @@ def default_libpath(): return [] try: # pylint: disable=consider-using-with - pipe = subprocess.Popen([dpkgarchitecture, '-qDEB_HOST_MULTIARCH'], - stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + pipe = subprocess.Popen( + [dpkgarchitecture, '-qDEB_HOST_MULTIARCH'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ) (stdo, _) = pipe.communicate() if pipe.returncode == 0: archpath = stdo.decode().strip() @@ -237,11 +240,10 @@ def default_libpath(): return [] -class GitRepoRetriever(): +class GitRepoRetriever: """Identify a git repository from which to download sources""" def __init__(self, url, has_submodules=False, branch=None): - self.url = url self.has_submodules = has_submodules self.branch = branch @@ -279,12 +281,14 @@ def get(self, subdir, **kw): passed_commit_sha = kw.get("commit_sha", None) if passed_commit_sha is None: comp = os.path.basename(subdir) - print(f""" + print( + f""" *********************** ERROR ************************ No commit_versions entry in utils/build.config for {comp}. Please specify one to avoid breaking the build with random upstream changes. -*********************** ERROR ************************\n""") +*********************** ERROR ************************\n""" + ) raise DownloadFailure(self.url, subdir) if not os.path.exists(subdir): @@ -330,7 +334,7 @@ def _get_specific(self, subdir, **kw): self._apply_patches(subdir, kw.get("patches", {})) -class BuildInfo(): +class BuildInfo: """A utility class to save build information""" def __init__(self): @@ -348,8 +352,10 @@ def save(self, filename): def gen_script(self, script_name): """Generate a shell script to set PATH, LD_LIBRARY_PATH, and PREFIX variables""" with open(script_name, "w") as script: - script.write('# Automatically generated by' - + f' {sys.argv[0]} at {datetime.datetime.today()}\n\n') + script.write( + '# Automatically generated by' + + f' {sys.argv[0]} at {datetime.datetime.today()}\n\n' + ) lib_paths = [] paths = [] @@ -398,7 +404,7 @@ def ensure_dir_exists(dirname, dry_run): # pylint: disable-next=function-redefined -class PreReqComponent(): +class PreReqComponent: """A class for defining and managing external components required by a project. If provided arch is a string to embed in any generated directories @@ -435,24 +441,60 @@ def __init__(self, env, opts): RUNNER.initialize(self.__env) - opts.Add(PathVariable('PREFIX', 'Installation path', install_dir, - PathVariable.PathIsDirCreate)) - opts.Add('ALT_PREFIX', f'Specifies {os.pathsep} separated list of alternative paths to add', - None) - opts.Add(PathVariable('BUILD_ROOT', 'Alternative build root directory', "build", - PathVariable.PathIsDirCreate)) + opts.Add( + PathVariable('PREFIX', 'Installation path', install_dir, PathVariable.PathIsDirCreate) + ) + opts.Add( + 'ALT_PREFIX', f'Specifies {os.pathsep} separated list of alternative paths to add', None + ) + opts.Add( + PathVariable( + 'BUILD_ROOT', + 'Alternative build root directory', + "build", + PathVariable.PathIsDirCreate, + ) + ) opts.Add('USE_INSTALLED', 'Comma separated list of preinstalled dependencies', 'none') opts.Add(('MPI_PKG', 'Specifies name of pkg-config to load for MPI', None)) opts.Add(BoolVariable('FIRMWARE_MGMT', 'Build in device firmware management.', 0)) opts.Add(BoolVariable('STACK_MMAP', 'Allocate ABT ULTs stacks with mmap()', 0)) - opts.Add(EnumVariable('BUILD_TYPE', "Set the build type", 'release', - ['dev', 'debug', 'release'], ignorecase=1)) - opts.Add(EnumVariable('TARGET_TYPE', "Set the prerequisite type", 'default', - ['default', 'dev', 'debug', 'release'], ignorecase=1)) - opts.Add(EnumVariable('COMPILER', "Set the compiler family to use", 'gcc', - ['gcc', 'covc', 'clang', 'icc'], ignorecase=2)) - opts.Add(EnumVariable('WARNING_LEVEL', "Set default warning level", 'error', - ['warning', 'warn', 'error'], ignorecase=2)) + opts.Add( + EnumVariable( + 'BUILD_TYPE', + "Set the build type", + 'release', + ['dev', 'debug', 'release'], + ignorecase=1, + ) + ) + opts.Add( + EnumVariable( + 'TARGET_TYPE', + "Set the prerequisite type", + 'default', + ['default', 'dev', 'debug', 'release'], + ignorecase=1, + ) + ) + opts.Add( + EnumVariable( + 'COMPILER', + "Set the compiler family to use", + 'gcc', + ['gcc', 'covc', 'clang', 'icc'], + ignorecase=2, + ) + ) + opts.Add( + EnumVariable( + 'WARNING_LEVEL', + "Set default warning level", + 'error', + ['warning', 'warn', 'error'], + ignorecase=2, + ) + ) opts.Update(self.__env) @@ -473,8 +515,14 @@ def __init__(self, env, opts): self.__build_dir = self._sub_path(build_dir_name) - opts.Add(PathVariable('GOPATH', 'Location of your GOPATH for the build', - f'{self.__build_dir}/go', PathVariable.PathIsDirCreate)) + opts.Add( + PathVariable( + 'GOPATH', + 'Location of your GOPATH for the build', + f'{self.__build_dir}/go', + PathVariable.PathIsDirCreate, + ) + ) opts.Update(env) @@ -519,8 +567,19 @@ def __init__(self, env, opts): def run_build(self, opts): """Build and dependencies""" # argobots is not really needed by client but it's difficult to separate - common_reqs = ['ucx', 'ofi', 'hwloc', 'mercury', 'boost', 'uuid', 'crypto', 'protobufc', - 'lz4', 'isal', 'isal_crypto'] + common_reqs = [ + 'ucx', + 'ofi', + 'hwloc', + 'mercury', + 'boost', + 'uuid', + 'crypto', + 'protobufc', + 'lz4', + 'isal', + 'isal_crypto', + ] client_reqs = ['fuse', 'json-c', 'capstone'] server_reqs = ['argobots', 'pmdk', 'spdk', 'ipmctl'] test_reqs = ['cmocka'] @@ -542,6 +601,7 @@ def run_build(self, opts): try: # pylint: disable-next=import-outside-toplevel from components import define_components + define_components(self) except Exception as old: raise BadScript("components", traceback.format_exc()) from old @@ -578,22 +638,29 @@ def _setup_intelc(self): self.__env.Replace(LINK=env.get("LINK")) # disable the warning about Cilk since we don't use it if not self._has_icx: - self.__env.AppendUnique(LINKFLAGS=["-static-intel", - "-diag-disable=10237"]) - self.__env.AppendUnique(CCFLAGS=["-diag-disable:2282", - "-diag-disable:188", - "-diag-disable:2405", - "-diag-disable:1338"]) + self.__env.AppendUnique(LINKFLAGS=["-static-intel", "-diag-disable=10237"]) + self.__env.AppendUnique( + CCFLAGS=[ + "-diag-disable:2282", + "-diag-disable:188", + "-diag-disable:2405", + "-diag-disable:1338", + ] + ) return {'CC': env.get("CC"), "CXX": env.get("CXX")} def _setup_compiler(self): """Setup the compiler to use""" - compiler_map = {'gcc': {'CC': 'gcc', 'CXX': 'g++'}, - 'covc': {'CC': '/opt/BullseyeCoverage/bin/gcc', - 'CXX': '/opt/BullseyeCoverage/bin/g++', - 'CVS': '/opt/BullseyeCoverage/bin/covselect', - 'COV01': '/opt/BullseyeCoverage/bin/cov01'}, - 'clang': {'CC': 'clang', 'CXX': 'clang++'}} + compiler_map = { + 'gcc': {'CC': 'gcc', 'CXX': 'g++'}, + 'covc': { + 'CC': '/opt/BullseyeCoverage/bin/gcc', + 'CXX': '/opt/BullseyeCoverage/bin/g++', + 'CVS': '/opt/BullseyeCoverage/bin/covselect', + 'COV01': '/opt/BullseyeCoverage/bin/cov01', + }, + 'clang': {'CC': 'clang', 'CXX': 'clang++'}, + } if GetOption('clean') or GetOption('help'): return @@ -630,35 +697,37 @@ def _setup_compiler(self): covfile = os.path.join(self.__top_dir, 'test.cov') if os.path.isfile(covfile): os.remove(covfile) - commands = [['$COV01', '-1'], - ['$COV01', '-s'], - ['$CVS', '--add', '!**/src/cart/test/utest/'], - ['$CVS', '--add', '!**/src/common/tests/'], - ['$CVS', '--add', '!**/src/gurt/tests/'], - ['$CVS', '--add', '!**/src/iosrv/tests/'], - ['$CVS', '--add', '!**/src/mgmt/tests/'], - ['$CVS', '--add', '!**/src/object/tests/'], - ['$CVS', '--add', '!**/src/placement/tests/'], - ['$CVS', '--add', '!**/src/rdb/tests/'], - ['$CVS', '--add', '!**/src/security/tests/'], - ['$CVS', '--add', '!**/src/utils/self_test/'], - ['$CVS', '--add', '!**/src/utils/ctl/'], - ['$CVS', '--add', '!**/src/vea/tests/'], - ['$CVS', '--add', '!**/src/vos/tests/'], - ['$CVS', '--add', '!**/src/engine/tests/'], - ['$CVS', '--add', '!**/src/tests/'], - ['$CVS', '--add', '!**/src/bio/smd/tests/'], - ['$CVS', '--add', '!**/src/cart/crt_self_test.h'], - ['$CVS', '--add', '!**/src/cart/crt_self_test_client.c'], - ['$CVS', '--add', '!**/src/cart/crt_self_test_service.c'], - ['$CVS', '--add', '!**/src/client/api/tests/'], - ['$CVS', '--add', '!**/src/client/dfuse/test/'], - ['$CVS', '--add', '!**/src/gurt/examples/'], - ['$CVS', '--add', '!**/src/utils/crt_launch/'], - ['$CVS', '--add', '!**/src/utils/daos_autotest.c'], - ['$CVS', '--add', '!**/src/placement/ring_map.c'], - ['$CVS', '--add', '!**/src/common/tests_dmg_helpers.c'], - ['$CVS', '--add', '!**/src/common/tests_lib.c']] + commands = [ + ['$COV01', '-1'], + ['$COV01', '-s'], + ['$CVS', '--add', '!**/src/cart/test/utest/'], + ['$CVS', '--add', '!**/src/common/tests/'], + ['$CVS', '--add', '!**/src/gurt/tests/'], + ['$CVS', '--add', '!**/src/iosrv/tests/'], + ['$CVS', '--add', '!**/src/mgmt/tests/'], + ['$CVS', '--add', '!**/src/object/tests/'], + ['$CVS', '--add', '!**/src/placement/tests/'], + ['$CVS', '--add', '!**/src/rdb/tests/'], + ['$CVS', '--add', '!**/src/security/tests/'], + ['$CVS', '--add', '!**/src/utils/self_test/'], + ['$CVS', '--add', '!**/src/utils/ctl/'], + ['$CVS', '--add', '!**/src/vea/tests/'], + ['$CVS', '--add', '!**/src/vos/tests/'], + ['$CVS', '--add', '!**/src/engine/tests/'], + ['$CVS', '--add', '!**/src/tests/'], + ['$CVS', '--add', '!**/src/bio/smd/tests/'], + ['$CVS', '--add', '!**/src/cart/crt_self_test.h'], + ['$CVS', '--add', '!**/src/cart/crt_self_test_client.c'], + ['$CVS', '--add', '!**/src/cart/crt_self_test_service.c'], + ['$CVS', '--add', '!**/src/client/api/tests/'], + ['$CVS', '--add', '!**/src/client/dfuse/test/'], + ['$CVS', '--add', '!**/src/gurt/examples/'], + ['$CVS', '--add', '!**/src/utils/crt_launch/'], + ['$CVS', '--add', '!**/src/utils/daos_autotest.c'], + ['$CVS', '--add', '!**/src/placement/ring_map.c'], + ['$CVS', '--add', '!**/src/common/tests_dmg_helpers.c'], + ['$CVS', '--add', '!**/src/common/tests_lib.c'], + ] if not RUNNER.run_commands(commands): raise BuildFailure("cov01") @@ -740,10 +809,12 @@ def _modify_prefix(self, comp_def): if comp_def.package: return - if comp_def.src_path and \ - not os.path.exists(comp_def.src_path) and \ - not os.path.exists(os.path.join(self.prereq_prefix, comp_def.name)) and \ - not os.path.exists(self.__env.get(f'{comp_def.name.upper()}_PREFIX')): + if ( + comp_def.src_path + and not os.path.exists(comp_def.src_path) + and not os.path.exists(os.path.join(self.prereq_prefix, comp_def.name)) + and not os.path.exists(self.__env.get(f'{comp_def.name.upper()}_PREFIX')) + ): self._save_component_prefix(f'{comp_def.name.upper()}_PREFIX', '/usr') def require(self, env, *comps, **kw): @@ -923,7 +994,7 @@ def get_config(self, section, name): return self._configs.get(section, name) -class _Component(): +class _Component: """A class to define attributes of an external component Args: @@ -949,12 +1020,7 @@ class _Component(): skip_arch -- not required on this platform """ - def __init__(self, - prereqs, - name, - use_installed, - **kw): - + def __init__(self, prereqs, name, use_installed, **kw): self.__check_only = GetOption('check_only') self.__dry_run = GetOption('no_exec') self.targets_found = False @@ -1016,8 +1082,9 @@ def _resolve_patches(self): patches[patch_path] = patch_subdir if os.path.exists(patch_path): continue - command = [['curl', '-sSfL', '--retry', '10', '--retry-max-time', '60', - '-o', patch_path, raw]] + command = [ + ['curl', '-sSfL', '--retry', '10', '--retry-max-time', '60', '-o', patch_path, raw] + ] if not RUNNER.run_commands(command): raise BuildFailure(raw) # Remove old patches @@ -1055,8 +1122,7 @@ def get(self): print(f'Downloading source for {self.name}') patches = self._resolve_patches() - self.retriever.get(self.src_path, commit_sha=commit_sha, - patches=patches, branch=branch) + self.retriever.get(self.src_path, commit_sha=commit_sha, patches=patches, branch=branch) def _has_missing_system_deps(self, env): """Check for required system libs""" @@ -1100,8 +1166,11 @@ def _parse_config(self, env, opts): path = os.environ.get("PKG_CONFIG_PATH", None) if path and "PKG_CONFIG_PATH" not in env["ENV"]: env["ENV"]["PKG_CONFIG_PATH"] = path - if (not self.use_installed and self.component_prefix is not None - and not self.component_prefix == "/usr"): + if ( + not self.use_installed + and self.component_prefix is not None + and not self.component_prefix == "/usr" + ): path_found = False for path in ["lib", "lib64"]: config = os.path.join(self.component_prefix, path, "pkgconfig") @@ -1230,8 +1299,9 @@ def configure(self): else: self.prebuilt_path = self.prereqs.get_prebuilt_path(self, self.name) - (self.component_prefix, self.prefix) = self.prereqs.get_prefixes(self.name, - self.prebuilt_path) + (self.component_prefix, self.prefix) = self.prereqs.get_prefixes( + self.name, self.prebuilt_path + ) self.src_path = None if self.retriever: self.src_path = self.prereqs.get_src_path(self.name) @@ -1418,7 +1488,6 @@ def build(self, env, needed_libs): missing_targets = self.has_missing_targets(envcopy) if build_dep: - if self._has_missing_system_deps(self.prereqs.system_env): raise MissingSystemLibs(self.name) @@ -1453,6 +1522,15 @@ def build(self, env, needed_libs): return changes -__all__ = ["GitRepoRetriever", "DownloadFailure", "BadScript", "BuildFailure", "MissingDefinition", - "MissingTargets", "MissingSystemLibs", "DownloadRequired", "PreReqComponent", - "BuildRequired"] +__all__ = [ + "GitRepoRetriever", + "DownloadFailure", + "BadScript", + "BuildFailure", + "MissingDefinition", + "MissingTargets", + "MissingSystemLibs", + "DownloadRequired", + "PreReqComponent", + "BuildRequired", +] diff --git a/site_scons/site_tools/compiler_setup.py b/site_scons/site_tools/compiler_setup.py index 9f25ccace71..4cd0394faa8 100644 --- a/site_scons/site_tools/compiler_setup.py +++ b/site_scons/site_tools/compiler_setup.py @@ -2,18 +2,20 @@ from SCons.Script import Configure, Exit, GetOption -DESIRED_FLAGS = ['-fstack-usage', - '-Wno-sign-compare', - '-Wno-unused-parameter', - '-Wno-missing-field-initializers', - '-Wno-implicit-fallthrough', - '-Wno-ignored-attributes', - '-Wno-gnu-zero-variadic-macro-arguments', - '-Wno-tautological-constant-out-of-range-compare', - '-Wno-unused-command-line-argument', - '-Wmismatched-dealloc', - '-Wfree-nonheap-object', - '-Wframe-larger-than=4096'] +DESIRED_FLAGS = [ + '-fstack-usage', + '-Wno-sign-compare', + '-Wno-unused-parameter', + '-Wno-missing-field-initializers', + '-Wno-implicit-fallthrough', + '-Wno-ignored-attributes', + '-Wno-gnu-zero-variadic-macro-arguments', + '-Wno-tautological-constant-out-of-range-compare', + '-Wno-unused-command-line-argument', + '-Wmismatched-dealloc', + '-Wfree-nonheap-object', + '-Wframe-larger-than=4096', +] # Compiler flags to prevent optimizing out security checks DESIRED_FLAGS.extend(['-fno-strict-overflow', '-fno-delete-null-pointer-checks', '-fwrapv']) @@ -21,8 +23,11 @@ # Compiler flags for stack hardening DESIRED_FLAGS.extend(['-fstack-protector-strong', '-fstack-clash-protection']) -PP_ONLY_FLAGS = ['-Wno-parentheses-equality', '-Wno-builtin-requires-header', - '-Wno-unused-function'] +PP_ONLY_FLAGS = [ + '-Wno-parentheses-equality', + '-Wno-builtin-requires-header', + '-Wno-unused-function', +] def _base_setup(env): @@ -110,12 +115,15 @@ def _check_flag_helper(context, compiler, ext, flag): flags.append('-O1') context.Message(f'Checking {compiler} {flag} ') context.env.Replace(CCFLAGS=flags) - ret = context.TryCompile(""" + ret = context.TryCompile( + """ # include int main() { return 0; } -""", ext) +""", + ext, + ) context.Result(ret) return ret @@ -156,8 +164,7 @@ def _check_flags(env, config, key, value): def _append_if_supported(env, **kwargs): """Check and append flags for construction variables""" cenv = env.Clone() - config = Configure(cenv, custom_tests={'CheckFlag': _check_flag, - 'CheckFlagCC': _check_flag_cc}) + config = Configure(cenv, custom_tests={'CheckFlag': _check_flag, 'CheckFlagCC': _check_flag_cc}) for key, value in kwargs.items(): if key not in ["CFLAGS", "CXXFLAGS", "CCFLAGS"]: env.AppendUnique(**{key: value}) diff --git a/site_scons/site_tools/daos_builder.py b/site_scons/site_tools/daos_builder.py index 23c3a68c7d5..90acb85a514 100644 --- a/site_scons/site_tools/daos_builder.py +++ b/site_scons/site_tools/daos_builder.py @@ -41,8 +41,9 @@ def _add_rpaths(env, install_off, set_cgo_ld, is_bin): relpath = os.path.relpath(rpath, prefix) if relpath != rpath: if set_cgo_ld: - env.AppendENVPath("CGO_LDFLAGS", f'-Wl,-rpath=$ORIGIN/{install_off}/{relpath}', - sep=" ") + env.AppendENVPath( + "CGO_LDFLAGS", f'-Wl,-rpath=$ORIGIN/{install_off}/{relpath}', sep=" " + ) else: joined = os.path.normpath(os.path.join(install_off, relpath)) env.AppendUnique(RPATH=[DaosLiteral(fr'\$$ORIGIN/{joined}')]) diff --git a/site_scons/site_tools/doneapi.py b/site_scons/site_tools/doneapi.py index 24042439969..3569d02a38e 100644 --- a/site_scons/site_tools/doneapi.py +++ b/site_scons/site_tools/doneapi.py @@ -13,7 +13,7 @@ # pylint: disable=too-few-public-methods -class DetectCompiler(): +class DetectCompiler: """Find oneapi compiler""" def __init__(self): @@ -29,13 +29,15 @@ def __init__(self): for path in [root, binp, libp, binarch, libarch, include, icx]: if not os.path.exists(path): return - self.map = {'root': root, - 'bin': binp, - 'lib': libp, - 'binarch': binarch, - 'libarch': libarch, - 'include': include, - 'icx': icx} + self.map = { + 'root': root, + 'bin': binp, + 'lib': libp, + 'binarch': binarch, + 'libarch': libarch, + 'include': include, + 'icx': icx, + } def __getitem__(self, key): """Return key""" @@ -51,11 +53,13 @@ def generate(env): raise SCons.Errors.InternalError("No oneapi compiler found") env['INTEL_C_COMPILER_TOP'] = detector['root'] - paths = {'INCLUDE': 'include', - 'LIB': 'libarch', - 'PATH': 'binarch', - 'LD_LIBRARY_PATH': 'libarch'} - for (key, value) in paths.items(): + paths = { + 'INCLUDE': 'include', + 'LIB': 'libarch', + 'PATH': 'binarch', + 'LD_LIBRARY_PATH': 'libarch', + } + for key, value in paths.items(): env.PrependENVPath(key, detector[value]) env.PrependENVPath("PATH", detector["bin"]) env.PrependENVPath("LIB", detector["lib"]) diff --git a/site_scons/site_tools/go_builder.py b/site_scons/site_tools/go_builder.py index b3706a8976d..aff663ef47f 100644 --- a/site_scons/site_tools/go_builder.py +++ b/site_scons/site_tools/go_builder.py @@ -17,8 +17,12 @@ def _scan_go_file(node, env, _path): src_dir = os.path.dirname(str(node)) includes = [] path_name = str(node)[12:] - rc = subprocess.run([env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], - cwd='src/control', stdout=subprocess.PIPE, check=True) + rc = subprocess.run( + [env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], + cwd='src/control', + stdout=subprocess.PIPE, + check=True, + ) data = json.loads(rc.stdout.decode('utf-8')) for dep in data['Deps']: if not dep.startswith('github.com/daos-stack/daos'): @@ -70,9 +74,16 @@ def _check_go_version(context): # go version go1.2.3 Linux/amd64 go_version = out.split(' ')[2].replace('go', '') - if len([x for x, y in - zip(go_version.split('.'), MIN_GO_VERSION.split('.')) - if int(x) < int(y)]) > 0: + if ( + len( + [ + x + for x, y in zip(go_version.split('.'), MIN_GO_VERSION.split('.')) + if int(x) < int(y) + ] + ) + > 0 + ): context.Result(f'{go_version} is too old (min supported: {MIN_GO_VERSION}) ') return 0 context.Result(str(go_version)) diff --git a/site_scons/site_tools/protoc/__init__.py b/site_scons/site_tools/protoc/__init__.py index 239ab73422c..aadb7d93d1e 100644 --- a/site_scons/site_tools/protoc/__init__.py +++ b/site_scons/site_tools/protoc/__init__.py @@ -46,7 +46,7 @@ class PythonGRPCCompilerNotFound(ToolProtocWarning): def _detect(env): - """ Try to detect the various protoc components """ + """Try to detect the various protoc components""" protoc_found = False protoc_gen_go_found = False grpc_tools_found = False @@ -78,6 +78,7 @@ def _detect(env): try: # pylint: disable=unused-import,import-outside-toplevel import grpc_tools.protoc # noqa: F401 + grpc_tools_found = True except ImportError: grpc_tools_found = False @@ -85,14 +86,13 @@ def _detect(env): if protoc_found and protoc_gen_go_found and grpc_tools_found: return True if not protoc_found: - raise SCons.Errors.StopError(ProtocCompilerNotFound, - "Could not detect protoc compiler") + raise SCons.Errors.StopError(ProtocCompilerNotFound, "Could not detect protoc compiler") if not protoc_gen_go_found: - raise SCons.Errors.StopError(GoProtocCompilerNotFound, - "Could not detect protoc-gen-go") + raise SCons.Errors.StopError(GoProtocCompilerNotFound, "Could not detect protoc-gen-go") if not grpc_tools_found: - raise SCons.Errors.StopError(PythonGRPCCompilerNotFound, - "grpc_tools.protoc python module is not installed") + raise SCons.Errors.StopError( + PythonGRPCCompilerNotFound, "grpc_tools.protoc python module is not installed" + ) return None @@ -106,10 +106,7 @@ def run_python(_source, _target, env, _for_signature): _grpc_python_builder = SCons.Builder.Builder( - generator=run_python, - suffix='$PYTHON_SUFFIX', - src_suffix='$PROTO_SUFFIX', - single_source=1 + generator=run_python, suffix='$PYTHON_SUFFIX', src_suffix='$PROTO_SUFFIX', single_source=1 ) @@ -123,10 +120,7 @@ def run_go(_source, _target, env, _for_signature): _grpc_go_builder = SCons.Builder.Builder( - generator=run_go, - suffix='$GO_SUFFIX', - src_suffix='$PROTO_SUFFIX', - single_source=1 + generator=run_go, suffix='$GO_SUFFIX', src_suffix='$PROTO_SUFFIX', single_source=1 ) @@ -142,7 +136,7 @@ def generate(env, **_kwargs): + '--python_out=$GTARGET_DIR --grpc_python_out=$GTARGET_DIR $SOURCE', PYTHON_COMSTR='', GO_COM='$PROTOC -I$PROTO_INCLUDES $SOURCE --go_out=plugins=grpc:$GTARGET_DIR', - GO_COMSTR='' + GO_COMSTR='', ) env['BUILDERS']['GRPCPython'] = _grpc_python_builder env['BUILDERS']['GRPCGo'] = _grpc_go_builder diff --git a/site_scons/site_tools/stack_analyzer.py b/site_scons/site_tools/stack_analyzer.py index 11fe8af1734..267870fce17 100644 --- a/site_scons/site_tools/stack_analyzer.py +++ b/site_scons/site_tools/stack_analyzer.py @@ -17,7 +17,7 @@ def exit_handler(handle): handle.analyze() -class Analyzer(): +class Analyzer: """Class to parse .su files""" def __init__(self, env, daos_prefix, comp_prefix, arg=""): @@ -36,16 +36,46 @@ def __init__(self, env, daos_prefix, comp_prefix, arg=""): def parse_args(self, arg_str): """Parse the arguments""" parser = argparse.ArgumentParser(description='Stack size analyzer') - parser.add_argument('-x', '--exclude-dir', dest='xdirs', nargs='*', default=[], - help="string to match indicating directories to exclude") - parser.add_argument('-I', '--include-dir', dest='dirs', nargs='*', default=[], - help="string to match indicating directories to include") - parser.add_argument('-i', '--include-file', dest='files', nargs='*', default=[], - help="string to match indicating a directory to include") - parser.add_argument('-c', '--cutoff', dest='cutoff', default=100, type=int, - help="Lower bound cutoff for entries to print") - parser.add_argument('-e', '--exit', dest='exit', default=False, action="store_true", - help="Do not wait for build. Run the analysis immediately and exit.") + parser.add_argument( + '-x', + '--exclude-dir', + dest='xdirs', + nargs='*', + default=[], + help="string to match indicating directories to exclude", + ) + parser.add_argument( + '-I', + '--include-dir', + dest='dirs', + nargs='*', + default=[], + help="string to match indicating directories to include", + ) + parser.add_argument( + '-i', + '--include-file', + dest='files', + nargs='*', + default=[], + help="string to match indicating a directory to include", + ) + parser.add_argument( + '-c', + '--cutoff', + dest='cutoff', + default=100, + type=int, + help="Lower bound cutoff for entries to print", + ) + parser.add_argument( + '-e', + '--exit', + dest='exit', + default=False, + action="store_true", + help="Do not wait for build. Run the analysis immediately and exit.", + ) args = parser.parse_args(arg_str.split()) self.dir_exclusions = args.xdirs self.dir_inclusions = args.dirs diff --git a/src/tests/ftest/launch.py b/src/tests/ftest/launch.py index 2dc863f9619..b8df3f0244c 100755 --- a/src/tests/ftest/launch.py +++ b/src/tests/ftest/launch.py @@ -24,6 +24,7 @@ # from avocado.core.version import MAJOR, MINOR # from avocado.utils.stacktrace import prepare_exc_info from ClusterShell.NodeSet import NodeSet + # When SRE-439 is fixed we should be able to include these import statements here # from util.distro_utils import detect # pylint: disable=import-error,no-name-in-module @@ -34,6 +35,7 @@ # This is not good coding practice. Should use package paths and remove all these E402. sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "util")) from data_utils import dict_extract_values, list_flatten, list_unique # noqa: E402 + # pylint: disable=import-outside-toplevel from host_utils import HostException, HostInfo, get_local_host, get_node_set # noqa: E402 from logger_utils import get_console_handler, get_file_handler # noqa: E402 diff --git a/src/tests/ftest/util/agent_utils.py b/src/tests/ftest/util/agent_utils.py index 76c293f0e4f..d4b967b45d1 100644 --- a/src/tests/ftest/util/agent_utils.py +++ b/src/tests/ftest/util/agent_utils.py @@ -11,8 +11,12 @@ from agent_utils_params import DaosAgentTransportCredentials, DaosAgentYamlParameters from ClusterShell.NodeSet import NodeSet from command_utils import CommandWithSubCommand, SubprocessManager, YamlCommand -from command_utils_base import (CommandWithParameters, CommonConfig, EnvironmentVariables, - FormattedParameter) +from command_utils_base import ( + CommandWithParameters, + CommonConfig, + EnvironmentVariables, + FormattedParameter, +) from exception_utils import CommandFailure from general_utils import get_default_config_file, get_log_file, run_pcmd from run_utils import run_remote diff --git a/src/tests/ftest/util/apricot/apricot/test.py b/src/tests/ftest/util/apricot/apricot/test.py index 2489c49cf14..3b1e8ee57e0 100644 --- a/src/tests/ftest/util/apricot/apricot/test.py +++ b/src/tests/ftest/util/apricot/apricot/test.py @@ -25,9 +25,18 @@ from dmg_utils import get_dmg_command from exception_utils import CommandFailure from fault_config_utils import FaultInjection -from general_utils import (DaosTestError, dict_to_str, dump_engines_stacks, - get_avocado_config_value, get_default_config_file, get_file_listing, - nodeset_append_suffix, pcmd, run_command, set_avocado_config_value) +from general_utils import ( + DaosTestError, + dict_to_str, + dump_engines_stacks, + get_avocado_config_value, + get_default_config_file, + get_file_listing, + nodeset_append_suffix, + pcmd, + run_command, + set_avocado_config_value, +) from host_utils import HostException, HostInfo, HostRole, get_host_parameters, get_local_host from job_manager_utils import get_job_manager from logger_utils import TestLogger diff --git a/src/tests/ftest/util/command_utils.py b/src/tests/ftest/util/command_utils.py index 9c21c21feeb..f4c18cb7795 100644 --- a/src/tests/ftest/util/command_utils.py +++ b/src/tests/ftest/util/command_utils.py @@ -16,12 +16,26 @@ from avocado.utils import process from ClusterShell.NodeSet import NodeSet -from command_utils_base import (BasicParameter, CommandWithParameters, EnvironmentVariables, - LogParameter, ObjectWithParameters) +from command_utils_base import ( + BasicParameter, + CommandWithParameters, + EnvironmentVariables, + LogParameter, + ObjectWithParameters, +) from exception_utils import CommandFailure -from general_utils import (DaosTestError, change_file_owner, check_file_exists, create_directory, - distribute_files, get_file_listing, get_job_manager_class, - get_subprocess_stdout, run_command, run_pcmd) +from general_utils import ( + DaosTestError, + change_file_owner, + check_file_exists, + create_directory, + distribute_files, + get_file_listing, + get_job_manager_class, + get_subprocess_stdout, + run_command, + run_pcmd, +) from run_utils import command_as_user from user_utils import get_primary_group from yaml_utils import get_yaml_data diff --git a/src/tests/ftest/util/data_mover_test_base.py b/src/tests/ftest/util/data_mover_test_base.py index 4a839bb587f..01078140fb0 100644 --- a/src/tests/ftest/util/data_mover_test_base.py +++ b/src/tests/ftest/util/data_mover_test_base.py @@ -4,14 +4,22 @@ SPDX-License-Identifier: BSD-2-Clause-Patent """ import ctypes + # pylint: disable=too-many-lines import os import re from os.path import join from command_utils_base import BasicParameter, EnvironmentVariables -from data_mover_utils import (ContClone, DcpCommand, DdeserializeCommand, DserializeCommand, - DsyncCommand, FsCopy, uuid_from_obj) +from data_mover_utils import ( + ContClone, + DcpCommand, + DdeserializeCommand, + DserializeCommand, + DsyncCommand, + FsCopy, + uuid_from_obj, +) from duns_utils import format_path from exception_utils import CommandFailure from general_utils import create_string_buffer, get_log_file diff --git a/src/tests/ftest/util/dmg_utils.py b/src/tests/ftest/util/dmg_utils.py index 7d336c4304d..09a55765399 100644 --- a/src/tests/ftest/util/dmg_utils.py +++ b/src/tests/ftest/util/dmg_utils.py @@ -5,6 +5,7 @@ """ import re from grp import getgrgid + # pylint: disable=too-many-lines from logging import getLogger from pwd import getpwuid diff --git a/src/tests/ftest/util/job_manager_utils.py b/src/tests/ftest/util/job_manager_utils.py index afb9b3e4bdd..93971bbf492 100644 --- a/src/tests/ftest/util/job_manager_utils.py +++ b/src/tests/ftest/util/job_manager_utils.py @@ -6,6 +6,7 @@ import os import re import time + # pylint: disable=too-many-lines from distutils.spawn import find_executable # pylint: disable=deprecated-module @@ -14,8 +15,13 @@ from command_utils_base import EnvironmentVariables, FormattedParameter from env_modules import load_mpi from exception_utils import CommandFailure, MPILoadError -from general_utils import (get_job_manager_class, get_journalctl_command, journalctl_time, pcmd, - run_pcmd) +from general_utils import ( + get_job_manager_class, + get_journalctl_command, + journalctl_time, + pcmd, + run_pcmd, +) from run_utils import run_remote, stop_processes from write_host_file import write_host_file diff --git a/src/tests/ftest/util/server_utils.py b/src/tests/ftest/util/server_utils.py index a9f286bc7e8..6cda1354f4e 100644 --- a/src/tests/ftest/util/server_utils.py +++ b/src/tests/ftest/util/server_utils.py @@ -18,8 +18,14 @@ from command_utils_base import BasicParameter, CommonConfig from dmg_utils import get_dmg_command from exception_utils import CommandFailure -from general_utils import (get_default_config_file, get_display_size, get_log_file, list_to_str, - pcmd, run_pcmd) +from general_utils import ( + get_default_config_file, + get_display_size, + get_log_file, + list_to_str, + pcmd, + run_pcmd, +) from host_utils import get_local_host from run_utils import run_remote, stop_processes from server_utils_base import DaosServerCommand, DaosServerInformation, ServerFailed diff --git a/src/tests/ftest/util/soak_test_base.py b/src/tests/ftest/util/soak_test_base.py index 03c01106894..e9361276e44 100644 --- a/src/tests/ftest/util/soak_test_base.py +++ b/src/tests/ftest/util/soak_test_base.py @@ -23,13 +23,32 @@ from general_utils import journalctl_time from host_utils import get_local_host from run_utils import RunException, run_local, run_remote -from soak_utils import (SoakTestError, add_pools, build_job_script, cleanup_dfuse, - create_app_cmdline, create_dm_cmdline, create_fio_cmdline, - create_ior_cmdline, create_macsio_cmdline, create_mdtest_cmdline, - create_racer_cmdline, ddhhmmss_format, get_daos_server_logs, get_harassers, - get_journalctl, launch_exclude_reintegrate, launch_extend, - launch_server_stop_start, launch_snapshot, launch_vmd_identify_check, - reserved_file_copy, run_event_check, run_metrics_check, run_monitor_check) +from soak_utils import ( + SoakTestError, + add_pools, + build_job_script, + cleanup_dfuse, + create_app_cmdline, + create_dm_cmdline, + create_fio_cmdline, + create_ior_cmdline, + create_macsio_cmdline, + create_mdtest_cmdline, + create_racer_cmdline, + ddhhmmss_format, + get_daos_server_logs, + get_harassers, + get_journalctl, + launch_exclude_reintegrate, + launch_extend, + launch_server_stop_start, + launch_snapshot, + launch_vmd_identify_check, + reserved_file_copy, + run_event_check, + run_metrics_check, + run_monitor_check, +) class SoakTestBase(TestWithServers): diff --git a/src/tests/ftest/util/soak_utils.py b/src/tests/ftest/util/soak_utils.py index 9aaec2fdbf1..72570f02cab 100644 --- a/src/tests/ftest/util/soak_utils.py +++ b/src/tests/ftest/util/soak_utils.py @@ -21,8 +21,17 @@ from dmg_utils import get_storage_query_device_info from duns_utils import format_path from fio_utils import FioCommand -from general_utils import (DaosTestError, get_host_data, get_log_file, get_random_bytes, - get_random_string, list_to_str, pcmd, run_command, run_pcmd) +from general_utils import ( + DaosTestError, + get_host_data, + get_log_file, + get_random_bytes, + get_random_string, + list_to_str, + pcmd, + run_command, + run_pcmd, +) from ior_utils import IorCommand from job_manager_utils import Mpirun from macsio_util import MacsioCommand diff --git a/src/tests/ftest/util/test_utils_pool.py b/src/tests/ftest/util/test_utils_pool.py index 5619e5baef6..ee2606f7041 100644 --- a/src/tests/ftest/util/test_utils_pool.py +++ b/src/tests/ftest/util/test_utils_pool.py @@ -5,6 +5,7 @@ """ import ctypes import json + # pylint: disable=too-many-lines import os from time import sleep, time From 1036d1e1cadf295bd759406669ab6ae2fa559642 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:46:33 +0000 Subject: [PATCH 05/26] Fix pylint. Required-githooks: true Signed-off-by: Ashley Pittman --- utils/cq/daos_pylint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/cq/daos_pylint.py b/utils/cq/daos_pylint.py index 05308e805f7..92f9c8def72 100755 --- a/utils/cq/daos_pylint.py +++ b/utils/cq/daos_pylint.py @@ -366,7 +366,7 @@ def parse_msg(msg): return vals def msg_to_github(vals): - # pylint: disable-next=consider-using-f-string + # pylint: disable=consider-using-f-string print( '::{category} file={path},line={line},col={column},::{symbol}, {msg}'.format(**vals) ) From d3fa8bec7d43c9c65ba22326a648991318fc1ca8 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:48:55 +0000 Subject: [PATCH 06/26] Rename a file. Required-githooks: true Signed-off-by: Ashley Pittman --- .../{junit_list_unsuccessful => junit_list_unsuccessful.py} | 0 ci/functional/launchable_analysis | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename ci/functional/{junit_list_unsuccessful => junit_list_unsuccessful.py} (100%) diff --git a/ci/functional/junit_list_unsuccessful b/ci/functional/junit_list_unsuccessful.py similarity index 100% rename from ci/functional/junit_list_unsuccessful rename to ci/functional/junit_list_unsuccessful.py diff --git a/ci/functional/launchable_analysis b/ci/functional/launchable_analysis index aa640e67316..304042cab97 100755 --- a/ci/functional/launchable_analysis +++ b/ci/functional/launchable_analysis @@ -23,7 +23,7 @@ fi notify=false -ci/functional/junit_list_unsuccessful "$STAGE_NAME/*/*/xunit1_results.xml" > "$dir"test_fails +ci/functional/junit_list_unsuccessful.py "$STAGE_NAME/*/*/xunit1_results.xml" > "$dir"test_fails mv subset.txt "$dir"launchable_subset while IFS=: read -r class test; do From 9996dc360b8c041257a977dc285a5374294bea20 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Thu, 2 Nov 2023 20:59:47 +0000 Subject: [PATCH 07/26] Add a new package. Required-githooks: true Signed-off-by: Ashley Pittman --- utils/cq/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/cq/requirements.txt b/utils/cq/requirements.txt index a5d00a92fa7..b9a383b50c4 100644 --- a/utils/cq/requirements.txt +++ b/utils/cq/requirements.txt @@ -6,6 +6,7 @@ avocado-framework-plugin-varianter-yaml-to-mux<94 clustershell paramiko pyenchant +junitparser ## flake8 6 removed --diff option which breaks flake precommit hook. ## https://github.com/pycqa/flake8/issues/1389 https://github.com/PyCQA/flake8/pull/1720 flake8<6.0.0 From 610ebaf301a78b14674c25fb1efa0cccaa605e89 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 3 Nov 2023 10:48:37 +0000 Subject: [PATCH 08/26] Revert changes to utils, except using/sl Required-githooks: true Signed-off-by: Ashley Pittman --- .github/workflows/linting.yml | 2 +- utils/ansible/ftest/library/daos_hugepages.py | 51 +- utils/certs/SConscript | 8 +- utils/cq/d_logging_check.py | 52 +- utils/cq/daos_pylint.py | 81 +- utils/cq/requirements.txt | 1 - .../10-submodule-update-check.py | 19 +- utils/node_local_test.py | 1339 ++++++++--------- utils/run_utest.py | 136 +- 9 files changed, 725 insertions(+), 964 deletions(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index af10d26c1df..f23b232119a 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -43,4 +43,4 @@ jobs: - name: Black uses: psf/black@stable with: - options: "--check --verbose --extend-exclude (ftest|vendor)" + options: "--check --verbose --extend-exclude (ftest|vendor|utils)" diff --git a/utils/ansible/ftest/library/daos_hugepages.py b/utils/ansible/ftest/library/daos_hugepages.py index 529abd1c4f2..35d192b3035 100644 --- a/utils/ansible/ftest/library/daos_hugepages.py +++ b/utils/ansible/ftest/library/daos_hugepages.py @@ -23,7 +23,11 @@ """ -ANSIBLE_METADATA = {'metadata_version': '0.1', 'status': ['preview'], 'supported_by': 'Intel'} +ANSIBLE_METADATA = { + 'metadata_version': '0.1', + 'status': ['preview'], + 'supported_by': 'Intel' +} DOCUMENTATION = ''' @@ -93,8 +97,7 @@ def main(): if not os.path.isfile(r"/sys/kernel/mm/transparent_hugepage/enabled"): module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg="Huge Pages not activated in kernel", - ) + msg="Huge Pages not activated in kernel") if not is_huge_pages_enabled(): try: @@ -103,26 +106,19 @@ def main(): except Exception as error: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Huge Pages could not be enabled: {error}", - ) + msg=f"Huge Pages could not be enabled: {error}") if not is_huge_pages_enabled(): module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg="Huge Pages could not be enabled", - ) + msg="Huge Pages could not be enabled") result = subprocess.run( [r'sysctl', r'vm.nr_hugepages'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - timeout=3, - check=False, - ) + stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=3, check=False) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Size of Huge Pages could not be read: {result.stderr.decode('ascii')}", - ) + msg=f"Size of Huge Pages could not be read: {result.stderr.decode('ascii')}") hugepages_current_size = 0 stdout_str = result.stdout.decode('ascii') @@ -130,30 +126,23 @@ def main(): if match is None: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Invalid size of huge pages from sysctl: {stdout_str}", - ) + msg=f"Invalid size of huge pages from sysctl: {stdout_str}") hugepages_current_size = int(match.groupdict()['size']) if hugepages_size != hugepages_current_size: if check_mode: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Invalid size of huge pages: {hugepages_current_size}", - ) + msg=f"Invalid size of huge pages: {hugepages_current_size}") result = subprocess.run( [r'sysctl', f"vm.nr_hugepages={hugepages_size}"], - stdout=subprocess.DEVNULL, - stderr=subprocess.PIPE, - timeout=3, - check=False, - ) + stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, timeout=3, check=False) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, msg="Size of Huge Pages could not be dynamically set: " - f"{result.stderr.decode('ascii')}", - ) + f"{result.stderr.decode('ascii')}") try: with open(r"/etc/sysctl.d/50-hugepages.conf", "w", encoding="utf8") as fd: @@ -161,19 +150,19 @@ def main(): except Exception as error: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, - msg=f"Setup of Huge Pages size at boot could not be defined: {error}", - ) + msg=f"Setup of Huge Pages size at boot could not be defined: {error}") result = subprocess.run([r'sysctl', '-p'], stderr=subprocess.PIPE, timeout=3, check=False) if result.returncode != 0: module.fail_json( elapsed=(datetime.datetime.utcnow() - start_time).seconds, msg="Setup of Huge Pages size at boot could not be applied: " - f"{result.stderr.decode('ascii')}", - ) + f"{result.stderr.decode('ascii')}") - module.exit_json(changed=True, elapsed=(datetime.datetime.utcnow() - start_time).seconds) + module.exit_json(changed=True, + elapsed=(datetime.datetime.utcnow() - start_time).seconds) - module.exit_json(changed=False, elapsed=(datetime.datetime.utcnow() - start_time).seconds) + module.exit_json(changed=False, + elapsed=(datetime.datetime.utcnow() - start_time).seconds) if __name__ == '__main__': diff --git a/utils/certs/SConscript b/utils/certs/SConscript index 4e9eeb1863c..446a2059a5a 100644 --- a/utils/certs/SConscript +++ b/utils/certs/SConscript @@ -5,10 +5,10 @@ def scons(): """Execute build""" Import('env') - env.Install( - "$PREFIX/lib64/daos/certgen", - ['admin.cnf', 'agent.cnf', 'server.cnf', 'gen_certificates.sh'], - ) + env.Install("$PREFIX/lib64/daos/certgen", ['admin.cnf', + 'agent.cnf', + 'server.cnf', + 'gen_certificates.sh']) if __name__ == "SCons.Script": diff --git a/utils/cq/d_logging_check.py b/utils/cq/d_logging_check.py index 1dc6852c93a..134e204cb7c 100755 --- a/utils/cq/d_logging_check.py +++ b/utils/cq/d_logging_check.py @@ -21,7 +21,7 @@ ARGS = None -class FileLine: +class FileLine(): """One line from a file""" def __init__(self, file_object, line, lineno): @@ -124,47 +124,15 @@ def __next__(self): # Logging macros where the new-line is added if missing. -PREFIXES = [ - 'D_ERROR', - 'D_WARN', - 'D_INFO', - 'D_NOTE', - 'D_ALERT', - 'D_CRIT', - 'D_FATAT', - 'D_EMIT', - 'D_TRACE_INFO', - 'D_TRACE_NOTE', - 'D_TRACE_WARN', - 'D_TRACE_ERROR', - 'D_TRACE_ALERT', - 'D_TRACE_CRIT', - 'D_TRACE_FATAL', - 'D_TRACE_EMIT', - 'RPC_TRACE', - 'RPC_ERROR', - 'VOS_TX_LOG_FAIL', - 'VOS_TX_TRACE_FAIL', - 'D_DEBUG', - 'D_CDEBUG', - 'IV_DBG', -] +PREFIXES = ['D_ERROR', 'D_WARN', 'D_INFO', 'D_NOTE', 'D_ALERT', 'D_CRIT', 'D_FATAT', 'D_EMIT', + 'D_TRACE_INFO', 'D_TRACE_NOTE', 'D_TRACE_WARN', 'D_TRACE_ERROR', 'D_TRACE_ALERT', + 'D_TRACE_CRIT', 'D_TRACE_FATAL', 'D_TRACE_EMIT', 'RPC_TRACE', 'RPC_ERROR', + 'VOS_TX_LOG_FAIL', 'VOS_TX_TRACE_FAIL', 'D_DEBUG', 'D_CDEBUG', 'IV_DBG'] # Logging macros where a new-line is always added. -PREFIXES_NNL = [ - 'DFUSE_LOG_WARNING', - 'DFUSE_LOG_ERROR', - 'DFUSE_LOG_DEBUG', - 'DFUSE_LOG_INFO', - 'DFUSE_TRA_WARNING', - 'DFUSE_TRA_ERROR', - 'DFUSE_TRA_DEBUG', - 'DFUSE_TRA_INFO', - 'DH_PERROR_SYS', - 'DH_PERROR_DER', - 'DL_CDEBUG', - 'PRINT_ERROR', -] +PREFIXES_NNL = ['DFUSE_LOG_WARNING', 'DFUSE_LOG_ERROR', 'DFUSE_LOG_DEBUG', 'DFUSE_LOG_INFO', + 'DFUSE_TRA_WARNING', 'DFUSE_TRA_ERROR', 'DFUSE_TRA_DEBUG', 'DFUSE_TRA_INFO', + 'DH_PERROR_SYS', 'DH_PERROR_DER', 'DL_CDEBUG', 'PRINT_ERROR'] for prefix in ['DL', 'DHL', 'DS', 'DHS']: for suffix in ['ERROR', 'WARN', 'INFO']: @@ -174,7 +142,7 @@ def __next__(self): PREFIXES_ALL.extend(PREFIXES_NNL) -class AllChecks: +class AllChecks(): """All the checks in one class""" def __init__(self, file_object): @@ -351,7 +319,7 @@ def check_df_rc(self, line): if any(map(msg.endswith, [' ', '=', '.', ',', ':', ';'])): msg = msg[:-1] if msg.endswith(var_name): - msg = msg[: -len(var_name)] + msg = msg[:-len(var_name)] if msg.endswith('rc'): msg = msg[:-2] diff --git a/utils/cq/daos_pylint.py b/utils/cq/daos_pylint.py index 92f9c8def72..22e1fbc997c 100755 --- a/utils/cq/daos_pylint.py +++ b/utils/cq/daos_pylint.py @@ -18,15 +18,11 @@ from pylint.lint import Run from pylint.reporters.collecting_reporter import CollectingReporter except ImportError: + if os.path.exists('venv'): - sys.path.append( - os.path.join( - 'venv', - 'lib', - f'python{sys.version_info.major}.{sys.version_info.minor}', - 'site-packages', - ) - ) + sys.path.append(os.path.join('venv', 'lib', + f'python{sys.version_info.major}.{sys.version_info.minor}', + 'site-packages')) try: from pylint.constants import full_version from pylint.lint import Run @@ -67,7 +63,7 @@ # also be enabled shortly however we have a number to correct or resolve before enabling. -class WrapScript: +class WrapScript(): """Create a wrapper for a scons file and maintain a line mapping An update here is needed as files in site_scons/*.py do not automatically import SCons but @@ -75,6 +71,7 @@ class WrapScript: """ def __init__(self, fname, from_stdin): + self.line_map = {} # pylint: disable-next=consider-using-with self._outfile = tempfile.NamedTemporaryFile(mode='w+', prefix='daos_pylint_') @@ -162,8 +159,7 @@ def write_variables(outfile, prefix, variables): if variable.upper() == 'PREREQS': newlines += 1 outfile.write( - f'{prefix}{variable} = PreReqComponent(DefaultEnvironment(), Variables())\n' - ) + f'{prefix}{variable} = PreReqComponent(DefaultEnvironment(), Variables())\n') elif "ENV" in variable.upper(): newlines += 1 outfile.write(f'{prefix}{variable} = DefaultEnvironment()\n') @@ -188,13 +184,11 @@ def write_header(outfile): # Always import PreReqComponent here, but it'll only be used in some cases. This causes # errors in the toplevel SConstruct which are suppressed, the alternative would be to do # two passes and only add the include if needed later. - outfile.write( - """# pylint: disable-next=unused-wildcard-import,wildcard-import + outfile.write("""# pylint: disable-next=unused-wildcard-import,wildcard-import from SCons.Script import * # pylint: disable=import-outside-toplevel # pylint: disable-next=import-outside-toplevel,unused-wildcard-import,wildcard-import from SCons.Variables import * -from prereq_tools import PreReqComponent # pylint: disable=unused-import\n""" - ) +from prereq_tools import PreReqComponent # pylint: disable=unused-import\n""") return 5 def convert_line(self, line): @@ -202,7 +196,7 @@ def convert_line(self, line): return self.line_map[line] -class FileTypeList: +class FileTypeList(): """Class for sorting files Consumes a list of file/module names and sorts them into categories so that later on each @@ -219,9 +213,8 @@ def __init__(self): def file_count(self): """Return the number of files to be checked""" - return ( - len(self.ftest_files) + len(self.scons_files) + len(self.files) + len(self.fake_scons) - ) + return len(self.ftest_files) + len(self.scons_files) \ + + len(self.files) + len(self.fake_scons) def add(self, file, force=False): """Add a filename to the correct list""" @@ -347,15 +340,13 @@ def word_is_allowed(word, code): def parse_msg(msg): # Convert from a pylint message into a dict that can be using for printing. - vals = { - 'category': msg.category, - 'column': msg.column, - 'message-id': msg.msg_id, - 'message': msg.msg, - 'symbol': msg.symbol, - 'msg': msg.msg, - 'msg_id': msg.msg_id, - } + vals = {'category': msg.category, + 'column': msg.column, + 'message-id': msg.msg_id, + 'message': msg.msg, + 'symbol': msg.symbol, + 'msg': msg.msg, + 'msg_id': msg.msg_id} if wrapper: vals['path'] = target_file @@ -366,10 +357,9 @@ def parse_msg(msg): return vals def msg_to_github(vals): - # pylint: disable=consider-using-f-string - print( - '::{category} file={path},line={line},col={column},::{symbol}, {msg}'.format(**vals) - ) + # pylint: disable-next=consider-using-f-string + print('::{category} file={path},line={line},col={column},::{symbol}, {msg}'.format( + **vals)) failed = False rep = CollectingReporter() @@ -447,16 +437,14 @@ def msg_to_github(vals): symbols[msg.symbol] += 1 if args.output_format == 'json': - report = { - 'type': vals['category'], - 'path': msg.path, - 'module': msg.module, - 'line': vals['line'], - 'column': vals['column'], - 'symbol': vals['symbol'], - 'message': vals['message'], - 'message-id': vals['message-id'], - } + report = {'type': vals['category'], + 'path': msg.path, + 'module': msg.module, + 'line': vals['line'], + 'column': vals['column'], + 'symbol': vals['symbol'], + 'message': vals['message'], + 'message-id': vals['message-id']} if msg.obj: report['obj'] = msg.obj @@ -486,10 +474,10 @@ def msg_to_github(vals): if not types or args.reports == 'n': return failed - for mtype, count in types.most_common(): + for (mtype, count) in types.most_common(): print(f'{mtype}:{count}') - for mtype, count in symbols.most_common(): + for (mtype, count) in symbols.most_common(): print(f'{mtype}:{count}') return failed @@ -531,9 +519,8 @@ def main(): rcfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pylintrc') - parser.add_argument( - '--msg-template', default='{path}:{line}:{column}: {message-id}: {message} ({symbol})' - ) + parser.add_argument('--msg-template', + default='{path}:{line}:{column}: {message-id}: {message} ({symbol})') parser.add_argument('--reports', choices=['y', 'n'], default='y') parser.add_argument('--output-format', choices=['text', 'json', 'github'], default='text') parser.add_argument('--rcfile', default=rcfile) diff --git a/utils/cq/requirements.txt b/utils/cq/requirements.txt index b9a383b50c4..a5d00a92fa7 100644 --- a/utils/cq/requirements.txt +++ b/utils/cq/requirements.txt @@ -6,7 +6,6 @@ avocado-framework-plugin-varianter-yaml-to-mux<94 clustershell paramiko pyenchant -junitparser ## flake8 6 removed --diff option which breaks flake precommit hook. ## https://github.com/pycqa/flake8/issues/1389 https://github.com/PyCQA/flake8/pull/1720 flake8<6.0.0 diff --git a/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py b/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py index 0d9ffba3d27..34a30c55c6e 100755 --- a/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py +++ b/utils/githooks/prepare-commit-msg.d/10-submodule-update-check.py @@ -12,9 +12,10 @@ def rebasing(): """Determines if the current operation is a rebase""" - with subprocess.Popen( - ["git", "branch"], stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) as process: + with subprocess.Popen(["git", "branch"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as process: + stdout = process.communicate()[0].decode() return stdout.split('\n', maxsplit=1)[0].startswith("* (no branch, rebasing") @@ -34,10 +35,8 @@ def submodule_check(modname, msg_file): with open(msg_file, 'r', encoding='utf-8') as file: lines = file.readlines() - message = ( - f'# WARNING *** This patch modifies the {modname} reference. ' - 'Are you sure this is intended? *** WARNING' - ) + message = f'# WARNING *** This patch modifies the {modname} reference. ' \ + 'Are you sure this is intended? *** WARNING' if lines[0] != message: lines = [message, "\n", "\n"] + lines @@ -48,12 +47,12 @@ def submodule_check(modname, msg_file): def main(msg_file): """main""" - for line in ( - subprocess.check_output(['git', 'submodule', 'status']).decode().rstrip().split('\n') - ): + for line in subprocess.check_output(['git', 'submodule', + 'status']).decode().rstrip().split('\n'): if line: submodule_check(line[1:].split(' ')[1], msg_file) if __name__ == '__main__': + main(sys.argv[1]) diff --git a/utils/node_local_test.py b/utils/node_local_test.py index 2d15ae5820c..770c30dc464 100755 --- a/utils/node_local_test.py +++ b/utils/node_local_test.py @@ -79,10 +79,11 @@ def umount(path, background=False): return ret.returncode -class NLTConf: +class NLTConf(): """Helper class for configuration""" def __init__(self, json_file, args): + with open(json_file, 'r') as ofh: self._bc = json.load(ofh) self.agent_dir = None @@ -92,7 +93,8 @@ def __init__(self, json_file, args): self.valgrind_errors = False self.log_timer = CulmTimer() self.compress_timer = CulmTimer() - self.dfuse_parent_dir = tempfile.mkdtemp(dir=args.dfuse_dir, prefix='dnt_dfuse_') + self.dfuse_parent_dir = tempfile.mkdtemp(dir=args.dfuse_dir, + prefix='dnt_dfuse_') self.tmp_dir = None if args.class_name: self.tmp_dir = join('nlt_logs', args.class_name) @@ -121,10 +123,10 @@ def set_args(self, args): size = args.max_log_size if size.endswith('MiB'): size = int(size[:-3]) - size *= 1024 * 1024 + size *= (1024 * 1024) elif size.endswith('GiB'): size = int(size[:-3]) - size *= 1024 * 1024 * 1024 + size *= (1024 * 1024 * 1024) self.max_log_size = int(size) def __getitem__(self, key): @@ -149,7 +151,7 @@ def flush_bz2(self): self.compress_timer.stop() -class CulmTimer: +class CulmTimer(): """Class to keep track of elapsed time so we know where to focus performance tuning""" def __init__(self): @@ -165,7 +167,7 @@ def stop(self): self.total += time.perf_counter() - self._start -class BoolRatchet: +class BoolRatchet(): """Used for saving test results""" # Any call to fail() of add_result with a True value will result @@ -184,7 +186,7 @@ def add_result(self, result): self.fail() -class WarningsFactory: +class WarningsFactory(): """Class to parse warnings, and save to JSON output file Take a list of failures, and output the data in a way that is best @@ -194,9 +196,13 @@ class WarningsFactory: # Error levels supported by the reporting are LOW, NORMAL, HIGH, ERROR. - def __init__( - self, filename, junit=False, class_id=None, post=False, post_error=False, check=None - ): + def __init__(self, + filename, + junit=False, + class_id=None, + post=False, + post_error=False, + check=None): # pylint: disable=consider-using-with self._fd = open(filename, 'w') self.filename = filename @@ -220,9 +226,8 @@ def __init__( tc_startup = junit_xml.TestCase('Startup', classname=self._class_name('core')) tc_sanity = junit_xml.TestCase('Sanity', classname=self._class_name('core')) tc_sanity.add_error_info('NLT exited abnormally') - self.test_suite = junit_xml.TestSuite( - 'Node Local Testing', test_cases=[tc_startup, tc_sanity] - ) + self.test_suite = junit_xml.TestSuite('Node Local Testing', + test_cases=[tc_startup, tc_sanity]) self._write_test_file() else: self.test_suite = None @@ -254,16 +259,8 @@ def __del__(self): self.test_suite = None self.close() - def add_test_case( - self, - name, - failure=None, - test_class='core', - output=None, - duration=None, - stdout=None, - stderr=None, - ): + def add_test_case(self, name, failure=None, test_class='core', output=None, duration=None, + stdout=None, stderr=None): """Add a test case to the results class and other metadata will be set automatically, @@ -273,13 +270,8 @@ class and other metadata will be set automatically, if not self.test_suite: return - test_case = junit_xml.TestCase( - name, - classname=self._class_name(test_class), - elapsed_sec=duration, - stdout=stdout, - stderr=stderr, - ) + test_case = junit_xml.TestCase(name, classname=self._class_name(test_class), + elapsed_sec=duration, stdout=stdout, stderr=stderr) if failure: test_case.add_failure_info(failure, output=output) self.test_suite.test_cases.append(test_case) @@ -312,7 +304,7 @@ def explain(self, line, log_file, esignal): if count == 0: return - for sline, smessage in self.pending: + for (sline, smessage) in self.pending: locs.add(f'{sline.filename}:{sline.lineno}') symptoms.add(smessage) @@ -436,7 +428,7 @@ def get_base_env(clean=False): return env -class DaosPool: +class DaosPool(): """Class to store data about daos pools""" def __init__(self, server, pool_uuid, label): @@ -479,7 +471,7 @@ def fetch_containers(self): return containers -class DaosCont: +class DaosCont(): """Class to store data about daos containers""" def __init__(self, cont_uuid, label, pool): @@ -522,17 +514,15 @@ def destroy(self, valgrind=True, log_check=True): Raises: NLTestFail: If Pool was not provided when object created. """ - destroy_container( - self.pool.conf, self.pool.id(), self.id(), valgrind=valgrind, log_check=log_check - ) + destroy_container(self.pool.conf, self.pool.id(), self.id(), + valgrind=valgrind, log_check=log_check) -class DaosServer: +class DaosServer(): """Manage a DAOS server instance""" - def __init__( - self, conf, test_class=None, valgrind=False, wf=None, fatal_errors=None, enable_fi=False - ): + def __init__(self, conf, test_class=None, valgrind=False, wf=None, fatal_errors=None, + enable_fi=False): self.running = False self._file = __file__.lstrip('./') self._sp = None @@ -553,23 +543,25 @@ def __init__( self.engines = conf.args.engine_count self.sys_ram_rsvd = conf.args.system_ram_reserved # pylint: disable=consider-using-with - self.control_log = tempfile.NamedTemporaryFile( - prefix='dnt_control_', suffix='.log', dir=conf.tmp_dir, delete=False - ) - self.helper_log = tempfile.NamedTemporaryFile( - prefix='dnt_helper_', suffix='.log', dir=conf.tmp_dir, delete=False - ) - self.agent_log = tempfile.NamedTemporaryFile( - prefix='dnt_agent_', suffix='.log', dir=conf.tmp_dir, delete=False - ) + self.control_log = tempfile.NamedTemporaryFile(prefix='dnt_control_', + suffix='.log', + dir=conf.tmp_dir, + delete=False) + self.helper_log = tempfile.NamedTemporaryFile(prefix='dnt_helper_', + suffix='.log', + dir=conf.tmp_dir, + delete=False) + self.agent_log = tempfile.NamedTemporaryFile(prefix='dnt_agent_', + suffix='.log', + dir=conf.tmp_dir, + delete=False) self.server_logs = [] for engine in range(self.engines): prefix = f'dnt_server_{self._test_class}_{engine}_' - self.server_logs.append( - tempfile.NamedTemporaryFile( - prefix=prefix, suffix='.log', dir=conf.tmp_dir, delete=False - ) - ) + self.server_logs.append(tempfile.NamedTemporaryFile(prefix=prefix, + suffix='.log', + dir=conf.tmp_dir, + delete=False)) self.__process_name = 'daos_engine' if self.valgrind: self.__process_name = 'memcheck-amd64-' @@ -635,9 +627,8 @@ def _add_test_case(self, name, failure=None, duration=None): if not self._test_class: return - self.conf.wf.add_test_case( - name, failure=failure, duration=duration, test_class=self._test_class - ) + self.conf.wf.add_test_case(name, failure=failure, duration=duration, + test_class=self._test_class) def _check_timing(self, name, start, max_time): elapsed = time.perf_counter() - start @@ -680,15 +671,13 @@ def start(self): plain_env = os.environ.copy() if self.valgrind: - valgrind_args = [ - '--fair-sched=yes', - '--gen-suppressions=all', - '--xml=yes', - '--xml-file=dnt.server.%p.memcheck.xml', - '--num-callers=10', - '--track-origins=yes', - '--leak-check=full', - ] + valgrind_args = ['--fair-sched=yes', + '--gen-suppressions=all', + '--xml=yes', + '--xml-file=dnt.server.%p.memcheck.xml', + '--num-callers=10', + '--track-origins=yes', + '--leak-check=full'] suppression_file = join('src', 'cart', 'utils', 'memcheck-cart.supp') if not os.path.exists(suppression_file): suppression_file = join(self.conf['PREFIX'], 'etc', 'memcheck-cart.supp') @@ -702,7 +691,8 @@ def start(self): fd.write(f"export PATH={join(self.conf['PREFIX'], 'bin')}:$PATH\n") fd.write(f'exec valgrind {" ".join(valgrind_args)} daos_engine "$@"\n') - os.chmod(join(self._io_server_dir.name, 'daos_engine'), stat.S_IXUSR | stat.S_IRUSR) + os.chmod(join(self._io_server_dir.name, 'daos_engine'), + stat.S_IXUSR | stat.S_IRUSR) plain_env['PATH'] = f'{self._io_server_dir.name}:{plain_env["PATH"]}' self.max_start_time = 300 @@ -729,7 +719,9 @@ def start(self): if self._fi: # Set D_ALLOC to fail, but do not enable it. This can be changed later via # the set_fi() method. - faults = {'fault_config': [{'id': 0, 'probability_x': 0, 'probability_y': 100}]} + faults = {'fault_config': [{'id': 0, + 'probability_x': 0, + 'probability_y': 100}]} self._fi_file = tempfile.NamedTemporaryFile(prefix='fi_', suffix='.yaml') @@ -737,7 +729,7 @@ def start(self): self._fi_file.flush() server_env['D_FI_CONFIG'] = self._fi_file.name - for key, value in server_env.items(): + for (key, value) in server_env.items(): # If server log is set via server_debug then do not also set env settings. if self.conf.args.server_debug and key in ('DD_MASK', 'DD_SUBSYS', 'D_LOG_MASK'): continue @@ -780,16 +772,11 @@ def start(self): agent_bin = join(self.conf['PREFIX'], 'bin', 'daos_agent') - agent_cmd = [ - agent_bin, - '--config-path', - agent_config, - '--insecure', - '--runtime_dir', - self.agent_dir, - '--logfile', - self.agent_log.name, - ] + agent_cmd = [agent_bin, + '--config-path', agent_config, + '--insecure', + '--runtime_dir', self.agent_dir, + '--logfile', self.agent_log.name] if not self.conf.args.server_debug and not self.conf.args.client_debug: agent_cmd.append('--debug') @@ -960,7 +947,10 @@ def run_dmg(self, cmd): exe_cmd.extend(cmd) print(f'running {exe_cmd}') - return subprocess.run(exe_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) + return subprocess.run(exe_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False) def run_dmg_json(self, cmd): """Run the specified dmg command in json mode @@ -1036,17 +1026,17 @@ def run_daos_client_cmd(self, cmd): cmd_env = get_base_env() - with tempfile.NamedTemporaryFile( - prefix=f'dnt_cmd_{get_inc_id()}_', suffix='.log', dir=self.conf.tmp_dir, delete=False - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=f'dnt_cmd_{get_inc_id()}_', + suffix='.log', + dir=self.conf.tmp_dir, + delete=False) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name cmd_env['DAOS_AGENT_DRPC_DIR'] = self.conf.agent_dir - rc = subprocess.run( - exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env, check=False - ) + rc = subprocess.run(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=cmd_env, check=False) if rc.stderr != b'': print('Stderr from command') @@ -1093,12 +1083,10 @@ def run_daos_client_cmd_pil4dfs(self, cmd, check=True, container=None, report=Tr cmd_env = get_base_env() - with tempfile.NamedTemporaryFile( - prefix=f'dnt_pil4dfs_{cmd[0]}_{get_inc_id()}_', - suffix='.log', - dir=self.conf.tmp_dir, - delete=False, - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=f'dnt_pil4dfs_{cmd[0]}_{get_inc_id()}_', + suffix='.log', + dir=self.conf.tmp_dir, + delete=False) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name @@ -1123,9 +1111,8 @@ def run_daos_client_cmd_pil4dfs(self, cmd, check=True, container=None, report=Tr print('Run command: ') print(cmd) - rc = subprocess.run( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=cmd_env, check=False - ) + rc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, + env=cmd_env, check=False) print(rc) if rc.stderr != b'': @@ -1177,51 +1164,53 @@ def set_fi(self, probability=0): agent_bin = join(self.conf['PREFIX'], 'bin', 'daos_agent') - with tempfile.TemporaryDirectory( - prefix='dnt_addr_', - ) as addr_dir: + with tempfile.TemporaryDirectory(prefix='dnt_addr_',) as addr_dir: + addr_file = join(addr_dir, f'{system_name}.attach_info_tmp') - agent_cmd = [agent_bin, '-i', '-s', self.agent_dir, 'dump-attachinfo', '-o', addr_file] + agent_cmd = [agent_bin, + '-i', + '-s', + self.agent_dir, + 'dump-attachinfo', + '-o', + addr_file] rc = subprocess.run(agent_cmd, env=cmd_env, check=True) print(rc) # options here are: fault_id,max_faults,probability,err_code[,argument] - cmd = [ - 'set_fi_attr', - '--cfg_path', - addr_dir, - '--group-name', - 'daos_server', - '--rank', - '0', - '--attr', - f'0,0,{probability},0,0', - ] + cmd = ['set_fi_attr', + '--cfg_path', + addr_dir, + '--group-name', + 'daos_server', + '--rank', + '0', + '--attr', + f'0,0,{probability},0,0'] exec_cmd.append(join(self.conf['PREFIX'], 'bin', 'cart_ctl')) exec_cmd.extend(cmd) - with tempfile.NamedTemporaryFile( - prefix=f'dnt_crt_ctl_{get_inc_id()}_', suffix='.log', delete=False - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=f'dnt_crt_ctl_{get_inc_id()}_', + suffix='.log', + delete=False) as log_file: + cmd_env['D_LOG_FILE'] = log_file.name cmd_env['DAOS_AGENT_DRPC_DIR'] = self.agent_dir - rc = subprocess.run( - exec_cmd, - env=cmd_env, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - check=False, - ) + rc = subprocess.run(exec_cmd, + env=cmd_env, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False) print(rc) valgrind_hdl.convert_xml() log_test(self.conf, log_file.name, show_memleaks=False) -class ValgrindHelper: +class ValgrindHelper(): """Class for running valgrind commands This helps setup the command line required, and @@ -1230,6 +1219,7 @@ class ValgrindHelper: """ def __init__(self, conf, logid=None): + # Set this to False to disable valgrind, which will run faster. self.conf = conf self.use_valgrind = True @@ -1248,19 +1238,16 @@ def get_cmd_prefix(self): if not self._logid: self._logid = get_inc_id() - with tempfile.NamedTemporaryFile( - prefix=f'dnt.{self._logid}.', dir='.', suffix='.memcheck', delete=False - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=f'dnt.{self._logid}.', dir='.', + suffix='.memcheck', delete=False) as log_file: self._xml_file = log_file.name - cmd = [ - 'valgrind', - f'--xml-file={self._xml_file}', - '--xml=yes', - '--fair-sched=yes', - '--gen-suppressions=all', - '--error-exitcode=42', - ] + cmd = ['valgrind', + f'--xml-file={self._xml_file}', + '--xml=yes', + '--fair-sched=yes', + '--gen-suppressions=all', + '--error-exitcode=42'] if self.full_check: cmd.extend(['--leak-check=full', '--show-leak-kinds=all']) @@ -1289,23 +1276,13 @@ def convert_xml(self): os.unlink(self._xml_file) -class DFuse: +class DFuse(): """Manage a dfuse instance""" instance_num = 0 - def __init__( - self, - daos, - conf, - pool=None, - container=None, - mount_path=None, - uns_path=None, - caching=True, - wbcache=True, - multi_user=False, - ): + def __init__(self, daos, conf, pool=None, container=None, mount_path=None, uns_path=None, + caching=True, wbcache=True, multi_user=False): if mount_path: self.dir = mount_path else: @@ -1335,6 +1312,7 @@ def __init__( os.mkdir(self.dir) def __str__(self): + if self._sp: running = 'running' else: @@ -1535,13 +1513,8 @@ def il_cmd(self, cmd, check_read=True, check_write=True, check_fstat=True): check_fstat = False try: - log_test( - self.conf, - log_name, - check_read=check_read, - check_write=check_write, - check_fstat=check_fstat, - ) + log_test(self.conf, log_name, check_read=check_read, check_write=check_write, + check_fstat=check_fstat) assert ret.returncode == 0 except NLTestNoFunction as error: command = ' '.join(cmd) @@ -1553,13 +1526,8 @@ def il_cmd(self, cmd, check_read=True, check_write=True, check_fstat=True): def run_query(self, use_json=False, quiet=False): """Run filesystem query""" - rc = run_daos_cmd( - self.conf, - ['filesystem', 'query', self.dir], - use_json=use_json, - log_check=quiet, - valgrind=quiet, - ) + rc = run_daos_cmd(self.conf, ['filesystem', 'query', self.dir], + use_json=use_json, log_check=quiet, valgrind=quiet) print(rc) return rc @@ -1644,7 +1612,7 @@ def import_daos(server, conf): return daos -class DaosCmdReturn: +class DaosCmdReturn(): """Class to enable pretty printing of daos output""" def __init__(self): @@ -1675,16 +1643,14 @@ def __str__(self): return output -def run_daos_cmd( - conf, - cmd, - show_stdout=False, - valgrind=True, - log_check=True, - ignore_busy=False, - use_json=False, - cwd=None, -): +def run_daos_cmd(conf, + cmd, + show_stdout=False, + valgrind=True, + log_check=True, + ignore_busy=False, + use_json=False, + cwd=None): """Run a DAOS command Run a command, returning what subprocess.run() would. @@ -1719,17 +1685,17 @@ def run_daos_cmd( del cmd_env['DD_SUBSYS'] del cmd_env['D_LOG_MASK'] - with tempfile.NamedTemporaryFile( - prefix=f'dnt_cmd_{get_inc_id()}_', suffix='.log', dir=conf.tmp_dir, delete=False - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=f'dnt_cmd_{get_inc_id()}_', + suffix='.log', + dir=conf.tmp_dir, + delete=False) as log_file: log_name = log_file.name cmd_env['D_LOG_FILE'] = log_name cmd_env['DAOS_AGENT_DRPC_DIR'] = conf.agent_dir - rc = subprocess.run( - exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env, check=False, cwd=cwd - ) + rc = subprocess.run(exec_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=cmd_env, check=False, cwd=cwd) if rc.stderr != b'': print('Stderr from command') @@ -1764,20 +1730,8 @@ def run_daos_cmd( # pylint: disable-next=too-many-arguments -def create_cont( - conf, - pool=None, - ctype=None, - label=None, - path=None, - oclass=None, - dir_oclass=None, - file_oclass=None, - hints=None, - valgrind=False, - log_check=True, - cwd=None, -): +def create_cont(conf, pool=None, ctype=None, label=None, path=None, oclass=None, dir_oclass=None, + file_oclass=None, hints=None, valgrind=False, log_check=True, cwd=None): """Use 'daos' command to create a new container. Args: @@ -1830,17 +1784,16 @@ def create_cont( def _create_cont(): """Helper function for create_cont""" - rc = run_daos_cmd(conf, cmd, use_json=True, log_check=log_check, valgrind=valgrind, cwd=cwd) + rc = run_daos_cmd(conf, cmd, use_json=True, log_check=log_check, valgrind=valgrind, + cwd=cwd) print(rc) return rc rc = _create_cont() - if ( - rc.returncode == 1 - and rc.json['error'] - == 'failed to create container: DER_EXIST(-1004): Entity already exists' - ): + if rc.returncode == 1 and \ + rc.json['error'] == 'failed to create container: DER_EXIST(-1004): Entity already exists': + # If a path is set DER_EXIST may refer to the path, not a container so do not attempt to # remove and retry in this case. if path is None: @@ -1868,11 +1821,9 @@ def destroy_container(conf, pool, container, valgrind=True, log_check=True): # This shouldn't happen but can on unclean shutdown, file it as a test failure so it does # not get lost, however destroy the container and attempt to continue. # DAOS-8860 - conf.wf.add_test_case( - f'destroy_container_{pool}/{container}', - failure='Failed to destroy container', - output=rc, - ) + conf.wf.add_test_case(f'destroy_container_{pool}/{container}', + failure='Failed to destroy container', + output=rc) cmd = ['container', 'destroy', '--force', pool, container] rc = run_daos_cmd(conf, cmd, valgrind=valgrind, use_json=True) print(rc) @@ -1899,7 +1850,6 @@ def needs_dfuse(method): Runs every test twice, once with caching enabled, and once with caching disabled. """ - @functools.wraps(method) def _helper(self): if self.call_index == 0: @@ -1909,7 +1859,10 @@ def _helper(self): else: caching = False - self.dfuse = DFuse(self.server, self.conf, caching=caching, container=self.container) + self.dfuse = DFuse(self.server, + self.conf, + caching=caching, + container=self.container) self.dfuse.start(v_hint=self.test_name) try: rc = method(self) @@ -1922,7 +1875,7 @@ def _helper(self): # pylint: disable-next=invalid-name -class needs_dfuse_with_opt: +class needs_dfuse_with_opt(): """Decorator class for starting dfuse under posix_tests class By default runs the method twice, once with caching and once without, however can be @@ -1939,9 +1892,9 @@ def __init__(self, caching=None, wbcache=True, single_threaded=False): def __call__(self, method): """Wrapper function""" - @functools.wraps(method) def _helper(obj): + caching = self.caching if caching is None: if obj.call_index == 0: @@ -1951,9 +1904,11 @@ def _helper(obj): else: caching = False - obj.dfuse = DFuse( - obj.server, obj.conf, caching=caching, wbcache=self.wbcache, container=obj.container - ) + obj.dfuse = DFuse(obj.server, + obj.conf, + caching=caching, + wbcache=self.wbcache, + container=obj.container) obj.dfuse.start(v_hint=method.__name__, single_threaded=self.single_threaded) try: rc = method(obj) @@ -1961,11 +1916,10 @@ def _helper(obj): if obj.dfuse.stop(): obj.fatal_errors = True return rc - return _helper -class PrintStat: +class PrintStat(): """Class for nicely showing file 'stat' data, similar to ls -l""" headers = ['uid', 'gid', 'size', 'mode', 'filename'] @@ -1992,9 +1946,11 @@ def add(self, filename, attr=None, show_dir=False): if attr is None: attr = os.stat(filename) - self._stats.append( - [attr.st_uid, attr.st_gid, attr.st_size, stat.filemode(attr.st_mode), filename] - ) + self._stats.append([attr.st_uid, + attr.st_gid, + attr.st_size, + stat.filemode(attr.st_mode), + filename]) self.count += 1 if show_dir: @@ -2010,7 +1966,7 @@ def __eq__(self, other): # This is test code where methods are tests, so we want to have lots of them. -class PosixTests: +class PosixTests(): """Class for adding standalone unit tests""" # pylint: disable=too-many-public-methods @@ -2069,18 +2025,12 @@ def test_cont_list(self): @needs_dfuse_with_opt(caching=False) def test_oclass(self): """Test container object class options""" - container = create_cont( - self.conf, - self.pool, - ctype="POSIX", - label='oclass_test', - oclass='S1', - dir_oclass='S2', - file_oclass='S4', - ) - run_daos_cmd( - self.conf, ['container', 'query', self.pool.id(), container.id()], show_stdout=True - ) + container = create_cont(self.conf, self.pool, ctype="POSIX", label='oclass_test', + oclass='S1', dir_oclass='S2', file_oclass='S4') + run_daos_cmd(self.conf, + ['container', 'query', + self.pool.id(), container.id()], + show_stdout=True) dfuse = DFuse(self.server, self.conf, container=container) dfuse.use_valgrind = False @@ -2115,23 +2065,21 @@ def test_oclass(self): def test_cache(self): """Test with caching enabled""" - run_daos_cmd( - self.conf, ['container', 'query', self.pool.id(), self.container.id()], show_stdout=True - ) - - cont_attrs = { - 'dfuse-attr-time': 2, - 'dfuse-dentry-time': '100s', - 'dfuse-dentry-dir-time': '100s', - 'dfuse-ndentry-time': '100s', - } + run_daos_cmd(self.conf, + ['container', 'query', + self.pool.id(), self.container.id()], + show_stdout=True) + + cont_attrs = {'dfuse-attr-time': 2, + 'dfuse-dentry-time': '100s', + 'dfuse-dentry-dir-time': '100s', + 'dfuse-ndentry-time': '100s'} self.container.set_attrs(cont_attrs) - run_daos_cmd( - self.conf, - ['container', 'get-attr', self.pool.id(), self.container.id()], - show_stdout=True, - ) + run_daos_cmd(self.conf, + ['container', 'get-attr', + self.pool.id(), self.container.id()], + show_stdout=True) dfuse = DFuse(self.server, self.conf, container=self.container) dfuse.start() @@ -2160,12 +2108,14 @@ def test_cont_info(self): """Check that daos container info and fs get-attr works on container roots""" def _check_cmd(check_path): - rc = run_daos_cmd( - self.conf, ['container', 'query', '--path', check_path], use_json=True - ) + rc = run_daos_cmd(self.conf, + ['container', 'query', '--path', check_path], + use_json=True) print(rc) assert rc.returncode == 0, rc - rc = run_daos_cmd(self.conf, ['fs', 'get-attr', '--path', check_path], use_json=True) + rc = run_daos_cmd(self.conf, + ['fs', 'get-attr', '--path', check_path], + use_json=True) print(rc) assert rc.returncode == 0, rc @@ -2271,10 +2221,16 @@ def test_pre_read(self): def test_two_mounts(self): """Create two mounts, and check that a file created in one can be read from the other""" - dfuse0 = DFuse(self.server, self.conf, caching=False, container=self.container) + dfuse0 = DFuse(self.server, + self.conf, + caching=False, + container=self.container) dfuse0.start(v_hint='two_0') - dfuse1 = DFuse(self.server, self.conf, caching=True, container=self.container) + dfuse1 = DFuse(self.server, + self.conf, + caching=True, + container=self.container) dfuse1.start(v_hint='two_1') file0 = join(dfuse0.dir, 'file') @@ -2312,20 +2268,23 @@ def test_cache_expire(self): """ cache_time = 20 - cont_attrs = { - 'dfuse-data-cache': False, - 'dfuse-attr-time': cache_time, - 'dfuse-dentry-time': cache_time, - 'dfuse-ndentry-time': cache_time, - } + cont_attrs = {'dfuse-data-cache': False, + 'dfuse-attr-time': cache_time, + 'dfuse-dentry-time': cache_time, + 'dfuse-ndentry-time': cache_time} self.container.set_attrs(cont_attrs) - dfuse0 = DFuse( - self.server, self.conf, caching=True, wbcache=False, container=self.container - ) + dfuse0 = DFuse(self.server, + self.conf, + caching=True, + wbcache=False, + container=self.container) dfuse0.start(v_hint='expire_0') - dfuse1 = DFuse(self.server, self.conf, caching=False, container=self.container) + dfuse1 = DFuse(self.server, + self.conf, + caching=False, + container=self.container) dfuse1.start(v_hint='expire_1') # Create ten files. @@ -2708,29 +2667,22 @@ def test_il(self): # Copy something into a container self.dfuse.il_cmd(['cp', '/bin/bash', sub_cont_dir], check_read=False) # Read it from within a container - self.dfuse.il_cmd( - ['md5sum', join(sub_cont_dir, 'bash')], - check_read=False, - check_write=False, - check_fstat=False, - ) - self.dfuse.il_cmd( - [ - 'dd', - f'if={join(sub_cont_dir, "bash")}', - f'of={join(sub_cont_dir, "bash_copy")}', - 'iflag=direct', - 'oflag=direct', - 'bs=128k', - ], - check_fstat=False, - ) + self.dfuse.il_cmd(['md5sum', join(sub_cont_dir, 'bash')], + check_read=False, check_write=False, check_fstat=False) + self.dfuse.il_cmd(['dd', + f'if={join(sub_cont_dir, "bash")}', + f'of={join(sub_cont_dir, "bash_copy")}', + 'iflag=direct', + 'oflag=direct', + 'bs=128k'], + check_fstat=False) @needs_dfuse def test_xattr(self): """Perform basic tests with extended attributes""" new_file = join(self.dfuse.dir, 'attr_file') with open(new_file, 'w') as fd: + xattr.set(fd, 'user.mine', 'init_value') # This should fail as a security test. try: @@ -2746,7 +2698,7 @@ def test_xattr(self): pass xattr.set(fd, 'user.Xfuse.ids', b'other_value') - for key, value in xattr.get_all(fd): + for (key, value) in xattr.get_all(fd): print(f'xattr is {key}:{value}') @needs_dfuse @@ -2779,7 +2731,7 @@ def test_list_xattr(self): xattr.set(self.dfuse.dir, 'user.dummy', 'short string') - for key, value in xattr.get_all(self.dfuse.dir): + for (key, value) in xattr.get_all(self.dfuse.dir): expected_keys.remove(key) print(f'xattr is {key}:{value}') @@ -2811,7 +2763,8 @@ def test_chmod(self): with open(fname, 'w'): pass - modes = [stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, stat.S_IRUSR] + modes = [stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR, + stat.S_IRUSR] for mode in modes: os.chmod(fname, mode) @@ -2920,7 +2873,10 @@ def test_rename_clobber(self): fd.write('test') # Start another dfuse instance to move the files around without the kernel knowing. - dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) + dfuse = DFuse(self.server, + self.conf, + container=self.container, + caching=False) dfuse.start(v_hint='rename_other') print(os.listdir(self.dfuse.dir)) @@ -3077,7 +3033,10 @@ def test_complex_unlink(self): fds.append(fd) # Start another dfuse instance to move the files around without the kernel knowing. - dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) + dfuse = DFuse(self.server, + self.conf, + container=self.container, + caching=False) dfuse.start(v_hint='unlink') print(os.listdir(self.dfuse.dir)) @@ -3102,7 +3061,10 @@ def test_complex_unlink(self): def test_cont_rw(self): """Test write access to another users container""" - dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) + dfuse = DFuse(self.server, + self.conf, + container=self.container, + caching=False) dfuse.start(v_hint='cont_rw_1') @@ -3127,37 +3089,30 @@ def test_cont_rw(self): self.fatal_errors = True # Update container ACLs so current user has rw permissions only, the minimum required. - rc = run_daos_cmd( - self.conf, - [ - 'container', - 'update-acl', - self.pool.id(), - self.container.id(), - '--entry', - f'A::{os.getlogin()}@:rwta', - ], - ) + rc = run_daos_cmd(self.conf, ['container', + 'update-acl', + self.pool.id(), + self.container.id(), + '--entry', + f'A::{os.getlogin()}@:rwta']) print(rc) # Assign the container to someone else. - rc = run_daos_cmd( - self.conf, - [ - 'container', - 'set-owner', - self.pool.id(), - self.container.id(), - '--user', - 'root@', - '--group', - 'root@', - ], - ) + rc = run_daos_cmd(self.conf, ['container', + 'set-owner', + self.pool.id(), + self.container.id(), + '--user', + 'root@', + '--group', + 'root@']) print(rc) # Now start dfuse and access the container, see who the file is owned by. - dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) + dfuse = DFuse(self.server, + self.conf, + container=self.container, + caching=False) dfuse.start(v_hint='cont_rw_2') stat_log = PrintStat() @@ -3211,7 +3166,10 @@ def test_complex_rename(self): with open(fname, 'w') as ofd: print(os.fstat(ofd.fileno())) - dfuse = DFuse(self.server, self.conf, container=self.container, caching=False) + dfuse = DFuse(self.server, + self.conf, + container=self.container, + caching=False) dfuse.start(v_hint='rename') os.mkdir(join(dfuse.dir, 'step_dir')) @@ -3225,10 +3183,8 @@ def test_complex_rename(self): except FileNotFoundError: print('Failed to fstat() replaced file') - os.rename( - join(self.dfuse.dir, 'step_dir', 'file-new'), - join(self.dfuse.dir, 'new_dir', 'my-file'), - ) + os.rename(join(self.dfuse.dir, 'step_dir', 'file-new'), + join(self.dfuse.dir, 'new_dir', 'my-file')) print(os.fstat(ofd.fileno())) @@ -3238,32 +3194,31 @@ def test_complex_rename(self): def test_cont_ro(self): """Test access to a read-only container""" # Update container ACLs so current user has 'rta' permissions only, the minimum required. - rc = run_daos_cmd( - self.conf, - [ - 'container', - 'update-acl', - self.pool.id(), - self.container.id(), - '--entry', - f'A::{os.getlogin()}@:rta', - ], - ) + rc = run_daos_cmd(self.conf, ['container', + 'update-acl', + self.pool.id(), + self.container.id(), + '--entry', + f'A::{os.getlogin()}@:rta']) print(rc) assert rc.returncode == 0 # Assign the container to someone else. - rc = run_daos_cmd( - self.conf, - ['container', 'set-owner', self.pool.id(), self.container.id(), '--user', 'root@'], - ) + rc = run_daos_cmd(self.conf, ['container', + 'set-owner', + self.pool.id(), + self.container.id(), + '--user', + 'root@']) print(rc) assert rc.returncode == 0 # Now start dfuse and access the container, this should require read-only opening. - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='cont_ro') print(os.listdir(dfuse.dir)) @@ -3309,7 +3264,10 @@ def test_with_path(self): cont_path = join(tmp_dir, 'my-cont') create_cont(self.conf, self.pool, path=cont_path) - dfuse = DFuse(self.server, self.conf, caching=True, uns_path=cont_path) + dfuse = DFuse(self.server, + self.conf, + caching=True, + uns_path=cont_path) dfuse.start(v_hint='with_path') # Simply write a file. This will fail if dfuse isn't backed via @@ -3406,7 +3364,10 @@ def test_uns_basic(self): def test_dfuse_dio_off(self): """Test for dfuse with no caching options, but direct-io disabled""" self.container.set_attrs({'dfuse-direct-io-disable': 'on'}) - dfuse = DFuse(self.server, self.conf, caching=True, container=self.container) + dfuse = DFuse(self.server, + self.conf, + caching=True, + container=self.container) dfuse.start(v_hint='dio_off') @@ -3487,16 +3448,8 @@ def test_daos_fs_tool(self): assert check_dfs_tool_output(output, 'S1', '1048576') # run same command using pool, container, dfs-path, and dfs-prefix - cmd = [ - 'fs', - 'get-attr', - pool, - uns_container.id(), - '--dfs-path', - dir1, - '--dfs-prefix', - uns_path, - ] + cmd = ['fs', 'get-attr', pool, uns_container.id(), '--dfs-path', dir1, + '--dfs-prefix', uns_path] print('get-attr of d1') rc = run_daos_cmd(conf, cmd) assert rc.returncode == 0 @@ -3523,14 +3476,16 @@ def test_daos_fs_tool(self): assert check_dfs_tool_output(output, None, '1048576') # Run a command to change attr of dir1 - cmd = ['fs', 'set-attr', '--path', dir1, '--oclass', 'S2', '--chunk-size', '16'] + cmd = ['fs', 'set-attr', '--path', dir1, '--oclass', 'S2', + '--chunk-size', '16'] print('set-attr of d1') rc = run_daos_cmd(conf, cmd) assert rc.returncode == 0 print(f'rc is {rc}') # Run a command to change attr of file1, should fail - cmd = ['fs', 'set-attr', '--path', file1, '--oclass', 'S2', '--chunk-size', '16'] + cmd = ['fs', 'set-attr', '--path', file1, '--oclass', 'S2', + '--chunk-size', '16'] print('set-attr of f1') rc = run_daos_cmd(conf, cmd) print(f'rc is {rc}') @@ -3568,20 +3523,16 @@ def test_cont_copy(self): # Create a temporary directory, with one file into it and copy it into # the container. Check the return-code only, do not verify the data. # tempfile() will remove the directory on completion. - src_dir = tempfile.TemporaryDirectory( - prefix='copy_src_', - ) + src_dir = tempfile.TemporaryDirectory(prefix='copy_src_',) with open(join(src_dir.name, 'file'), 'w') as ofd: ofd.write('hello') os.symlink('file', join(src_dir.name, 'file_s')) - cmd = [ - 'filesystem', - 'copy', - '--src', - src_dir.name, - '--dst', - f'daos://{self.pool.uuid}/{self.container}', - ] + cmd = ['filesystem', + 'copy', + '--src', + src_dir.name, + '--dst', + f'daos://{self.pool.uuid}/{self.container}'] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3603,20 +3554,16 @@ def test_cont_clone(self): # Create a temporary directory, with one file into it and copy it into # the container. Check the return code only, do not verify the data. # tempfile() will remove the directory on completion. - src_dir = tempfile.TemporaryDirectory( - prefix='copy_src_', - ) + src_dir = tempfile.TemporaryDirectory(prefix='copy_src_',) with open(join(src_dir.name, 'file'), 'w') as ofd: ofd.write('hello') - cmd = [ - 'filesystem', - 'copy', - '--src', - src_dir.name, - '--dst', - f'daos://{self.pool.uuid}/{self.container.id()}', - ] + cmd = ['filesystem', + 'copy', + '--src', + src_dir.name, + '--dst', + f'daos://{self.pool.uuid}/{self.container.id()}'] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3627,14 +3574,12 @@ def test_cont_clone(self): # Now create a container uuid and do an object based copy. # The daos command will create the target container on demand. - cmd = [ - 'container', - 'clone', - '--src', - f'daos://{self.pool.uuid}/{self.container.id()}', - '--dst', - f'daos://{self.pool.uuid}/', - ] + cmd = ['container', + 'clone', + '--src', + f'daos://{self.pool.uuid}/{self.container.id()}', + '--dst', + f'daos://{self.pool.uuid}/'] rc = run_daos_cmd(self.conf, cmd, use_json=True) print(rc) @@ -3649,12 +3594,10 @@ def test_dfuse_perms(self): """Test permissions caching for DAOS-12577""" cache_time = 10 - cont_attrs = { - 'dfuse-data-cache': False, - 'dfuse-attr-time': cache_time, - 'dfuse-dentry-time': cache_time, - 'dfuse-ndentry-time': cache_time, - } + cont_attrs = {'dfuse-data-cache': False, + 'dfuse-attr-time': cache_time, + 'dfuse-dentry-time': cache_time, + 'dfuse-ndentry-time': cache_time} self.container.set_attrs(cont_attrs) dfuse = DFuse(self.server, self.conf, container=self.container, wbcache=False) @@ -3728,9 +3671,11 @@ def test_daos_fs_check(self): """Test DAOS FS Checker""" # pylint: disable=too-many-branches # pylint: disable=too-many-statements - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='fs_check_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -3830,16 +3775,8 @@ def test_daos_fs_check(self): self.server.run_daos_client_cmd(cmd) # run the checker while dfuse is still mounted (should fail - EX open) - cmd = [ - 'fs', - 'check', - self.pool.id(), - self.container.id(), - '--flags', - 'print', - '--dir-name', - 'lf1', - ] + cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print', '--dir-name', + 'lf1'] rc = run_daos_cmd(self.conf, cmd, ignore_busy=True) print(rc) assert rc.returncode != 0 @@ -3854,16 +3791,8 @@ def test_daos_fs_check(self): # fs check with relink should find the 2 leaked directories. # Everything under them should be relinked but not reported as leaked. - cmd = [ - 'fs', - 'check', - self.pool.id(), - self.container.id(), - '--flags', - 'print,relink', - '--dir-name', - 'lf1', - ] + cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print,relink', + '--dir-name', 'lf1'] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -3883,9 +3812,11 @@ def test_daos_fs_check(self): raise NLTestFail('Wrong number of Leaked OIDs') # remount dfuse - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='fs_check_test') path = dfuse.dir @@ -3925,16 +3856,8 @@ def test_daos_fs_check(self): # fs check with relink should find 3 leaked dirs and 1 leaked file that were directly under # test_dir2. Everything under those leaked dirs are relinked but not reported as leaked. - cmd = [ - 'fs', - 'check', - self.pool.id(), - self.container.id(), - '--flags', - 'print,relink', - '--dir-name', - 'lf2', - ] + cmd = ['fs', 'check', self.pool.id(), self.container.id(), '--flags', 'print,relink', + '--dir-name', 'lf2'] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -3954,9 +3877,11 @@ def test_daos_fs_check(self): raise NLTestFail('Wrong number of Leaked OIDs') # remount dfuse - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='fs_check_test') path = dfuse.dir @@ -3985,9 +3910,11 @@ def test_daos_fs_check(self): def test_daos_fs_fix(self): """Test DAOS FS Fix Tool""" - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='fs_fix_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -4042,17 +3969,8 @@ def test_daos_fs_fix(self): assert error.errno == errno.EINVAL # fix corrupted entries while dfuse is running - should fail - cmd = [ - 'fs', - 'fix-entry', - self.pool.id(), - self.container.id(), - '--dfs-path', - '/test_dir/f1', - '--type', - '--chunk-size', - '1048576', - ] + cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', '/test_dir/f1', + '--type', '--chunk-size', '1048576'] rc = run_daos_cmd(self.conf, cmd, ignore_busy=True) print(rc) assert rc.returncode != 0 @@ -4066,17 +3984,8 @@ def test_daos_fs_fix(self): self.fatal_errors = True # fix corrupted entries - cmd = [ - 'fs', - 'fix-entry', - self.pool.id(), - self.container.id(), - '--dfs-path', - '/test_dir/f1', - '--type', - '--chunk-size', - '1048576', - ] + cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', '/test_dir/f1', + '--type', '--chunk-size', '1048576'] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -4085,17 +3994,8 @@ def test_daos_fs_fix(self): if line[-1] != 'Adjusting chunk size of /test_dir/f1 to 1048576': raise NLTestFail('daos fs fix-entry /test_dir/f1') - cmd = [ - 'fs', - 'fix-entry', - self.pool.id(), - self.container.id(), - '--dfs-path', - '/test_dir/1d1/f3', - '--type', - '--chunk-size', - '1048576', - ] + cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', + '/test_dir/1d1/f3', '--type', '--chunk-size', '1048576'] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -4104,15 +4004,8 @@ def test_daos_fs_fix(self): if line[-1] != 'Adjusting chunk size of /test_dir/1d1/f3 to 1048576': raise NLTestFail('daos fs fix-entry /test_dir/1d1/f3') - cmd = [ - 'fs', - 'fix-entry', - self.pool.id(), - self.container.id(), - '--dfs-path', - '/test_dir/1d2', - '--type', - ] + cmd = ['fs', 'fix-entry', self.pool.id(), self.container.id(), '--dfs-path', + '/test_dir/1d2', '--type'] rc = run_daos_cmd(self.conf, cmd) print(rc) assert rc.returncode == 0 @@ -4122,9 +4015,11 @@ def test_daos_fs_fix(self): raise NLTestFail('daos fs fix-entry /test_dir/1d2') # remount dfuse - dfuse = DFuse( - self.server, self.conf, pool=self.pool.id(), container=self.container, caching=False - ) + dfuse = DFuse(self.server, + self.conf, + pool=self.pool.id(), + container=self.container, + caching=False) dfuse.start(v_hint='fs_fix_test') path = dfuse.dir dirname = join(path, 'test_dir') @@ -4223,16 +4118,15 @@ def test_pil4dfs(self): # dd to write a file file5 = join(path, 'newfile') - self.server.run_daos_client_cmd_pil4dfs( - ['dd', 'if=/dev/zero', f'of={file5}', 'bs=1', 'count=1'] - ) + self.server.run_daos_client_cmd_pil4dfs(['dd', 'if=/dev/zero', f'of={file5}', 'bs=1', + 'count=1']) # cp "/usr/bin/mkdir" to DFS and call "/usr/bin/file" to analyze the binary file file6 file6 = join(path, 'elffile') self.server.run_daos_client_cmd_pil4dfs(['cp', '/usr/bin/mkdir', file6]) self.server.run_daos_client_cmd_pil4dfs(['file', file6]) -class NltStdoutWrapper: +class NltStdoutWrapper(): """Class for capturing stdout from threads""" def __init__(self): @@ -4273,7 +4167,7 @@ def __del__(self): sys.stdout = self._stdout -class NltStderrWrapper: +class NltStderrWrapper(): """Class for capturing stderr from threads""" def __init__(self): @@ -4329,9 +4223,12 @@ def _run_test(ptl=None, function=None, test_cb=None): # performance impact. There are other tests that run with valgrind enabled so this # should not reduce coverage. try: - ptl.container = create_cont( - conf, pool, ctype="POSIX", valgrind=False, log_check=False, label=function - ) + ptl.container = create_cont(conf, + pool, + ctype="POSIX", + valgrind=False, + log_check=False, + label=function) ptl.container_label = function test_cb() ptl.container.destroy(valgrind=False, log_check=False) @@ -4340,25 +4237,21 @@ def _run_test(ptl=None, function=None, test_cb=None): trace = ''.join(traceback.format_tb(inst.__traceback__)) duration = time.perf_counter() - start out_wrapper.sprint(f'{ptl.test_name} Failed') - conf.wf.add_test_case( - ptl.test_name, - repr(inst), - stdout=out_wrapper.get_thread_output(), - stderr=err_wrapper.get_thread_err(), - output=trace, - test_class='test', - duration=duration, - ) + conf.wf.add_test_case(ptl.test_name, + repr(inst), + stdout=out_wrapper.get_thread_output(), + stderr=err_wrapper.get_thread_err(), + output=trace, + test_class='test', + duration=duration) raise duration = time.perf_counter() - start out_wrapper.sprint(f'Test {ptl.test_name} took {duration:.1f} seconds') - conf.wf.add_test_case( - ptl.test_name, - stdout=out_wrapper.get_thread_output(), - stderr=err_wrapper.get_thread_err(), - test_class='test', - duration=duration, - ) + conf.wf.add_test_case(ptl.test_name, + stdout=out_wrapper.get_thread_output(), + stderr=err_wrapper.get_thread_err(), + test_class='test', + duration=duration) if not ptl.needs_more: break ptl.call_index = ptl.call_index + 1 @@ -4378,6 +4271,7 @@ def _run_test(ptl=None, function=None, test_cb=None): _run_test(ptl=pto, test_cb=obj, function=function) else: + threads = [] slow_tests = ['test_readdir_25', 'test_uns_basic', 'test_daos_fs_tool'] @@ -4394,13 +4288,11 @@ def _run_test(ptl=None, function=None, test_cb=None): if not callable(obj): continue - thread = threading.Thread( - None, - target=_run_test, - name=f'test {function}', - kwargs={'ptl': ptl, 'test_cb': obj, 'function': function}, - daemon=True, - ) + thread = threading.Thread(None, + target=_run_test, + name=f'test {function}', + kwargs={'ptl': ptl, 'test_cb': obj, 'function': function}, + daemon=True) thread.start() threads.append(thread) @@ -4424,11 +4316,9 @@ def _run_test(ptl=None, function=None, test_cb=None): # the tests are running in parallel. We could revise this so there's a dfuse method on # posix_tests class itself if required. for fuse in server.fuse_procs: - conf.wf.add_test_case( - 'fuse leak in tests', - f'Test leaked dfuse instance at {fuse}', - test_class='test', - ) + conf.wf.add_test_case('fuse leak in tests', + f'Test leaked dfuse instance at {fuse}', + test_class='test',) out_wrapper = None err_wrapper = None @@ -4443,10 +4333,9 @@ def run_tests(dfuse): fname = join(path, 'test_file3') - rc = subprocess.run( - ['dd', 'if=/dev/zero', 'bs=16k', 'count=64', f'of={join(path, "dd_file")}'], # nosec - check=True, - ) + rc = subprocess.run(['dd', 'if=/dev/zero', 'bs=16k', 'count=64', # nosec + f'of={join(path, "dd_file")}'], + check=True) print(rc) ofd = open(fname, 'w') ofd.write('hello') @@ -4575,19 +4464,17 @@ def log_timer_wrapper(*args, **kwargs): @log_timer -def log_test( - conf, - filename, - show_memleaks=True, - quiet=False, - skip_fi=False, - leak_wf=None, - ignore_einval=False, - ignore_busy=False, - check_read=False, - check_write=False, - check_fstat=False, -): +def log_test(conf, + filename, + show_memleaks=True, + quiet=False, + skip_fi=False, + leak_wf=None, + ignore_einval=False, + ignore_busy=False, + check_read=False, + check_write=False, + check_fstat=False): """Run the log checker on filename, logging to stdout""" # pylint: disable=too-many-arguments @@ -4633,7 +4520,9 @@ def sizeof_fmt(num, suffix='B'): lto.skip_suffixes.append(" DER_BUSY(-1012): 'Device or resource busy'") try: - lto.check_log_file(abort_on_warning=True, show_memleaks=show_memleaks, leak_wf=leak_wf) + lto.check_log_file(abort_on_warning=True, + show_memleaks=show_memleaks, + leak_wf=leak_wf) except nlt_lt.LogCheckError: pass @@ -4657,9 +4546,8 @@ def sizeof_fmt(num, suffix='B'): raise NLTestNoFunction('dfuse___fxstat') if conf.max_log_size and fstat.st_size > conf.max_log_size: - message = f'Max log size exceeded, {sizeof_fmt(fstat.st_size)} > ' + sizeof_fmt( - conf.max_log_size - ) + message = (f'Max log size exceeded, {sizeof_fmt(fstat.st_size)} > ' + + sizeof_fmt(conf.max_log_size)) conf.wf.add_test_case('logfile_size', failure=message) return lto.fi_location @@ -4799,19 +4687,20 @@ def run_in_fg(server, conf, args): # Only set the container cache attributes when the container is initially created so they # can be modified later. - cont_attrs = { - 'dfuse-data-cache': False, - 'dfuse-attr-time': 60, - 'dfuse-dentry-time': 60, - 'dfuse-ndentry-time': 60, - 'dfuse-direct-io-disable': False, - } + cont_attrs = {'dfuse-data-cache': False, + 'dfuse-attr-time': 60, + 'dfuse-dentry-time': 60, + 'dfuse-ndentry-time': 60, + 'dfuse-direct-io-disable': False} container.set_attrs(cont_attrs) container = container.uuid - dfuse = DFuse( - server, conf, pool=pool.uuid, caching=True, wbcache=False, multi_user=args.multi_user - ) + dfuse = DFuse(server, + conf, + pool=pool.uuid, + caching=True, + wbcache=False, + multi_user=args.multi_user) dfuse.log_flush = True dfuse.start() @@ -4928,9 +4817,11 @@ def print_results(): all_start = time.perf_counter() while True: + row = [count] row.extend(create_times) - dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, + caching=False) dir_dir = join(dfuse.dir, f'dirs.{count}') file_dir = join(dfuse.dir, f'files.{count}') dfuse.start() @@ -4940,29 +4831,35 @@ def print_results(): print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, + caching=False) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', file_dir], stdout=subprocess.PIPE, check=True) + subprocess.run(['/bin/ls', file_dir], stdout=subprocess.PIPE, + check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, + caching=False) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', dir_dir], stdout=subprocess.PIPE, check=True) + subprocess.run(['/bin/ls', '-t', dir_dir], stdout=subprocess.PIPE, + check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) dfuse.stop() - dfuse = DFuse(server, conf, pool=pool, container=container, caching=False) + dfuse = DFuse(server, conf, pool=pool, container=container, + caching=False) dfuse.start() start = time.perf_counter() # Use sort by time here so ls calls stat, if you run ls -l then it will # also call getxattr twice which skews the figures. - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, + check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) @@ -4971,15 +4868,21 @@ def print_results(): # Test with caching enabled. Check the file directory, and do it twice # without restarting, to see the effect of populating the cache, and # reading from the cache. - dfuse = DFuse(server, conf, pool=pool, container=container, caching=True) + dfuse = DFuse(server, + conf, + pool=pool, + container=container, + caching=True) dfuse.start() start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, + check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) start = time.perf_counter() - subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, check=True) + subprocess.run(['/bin/ls', '-t', file_dir], stdout=subprocess.PIPE, + check=True) elapsed = time.perf_counter() - start print(f'processed {count} dirs in {elapsed:.2f} seconds') row.append(elapsed) @@ -4995,7 +4898,10 @@ def print_results(): create_times = make_dirs(dfuse.dir, count) dfuse.stop() - run_daos_cmd(conf, ['container', 'destroy', pool, container]) + run_daos_cmd(conf, ['container', + 'destroy', + pool, + container]) print_results() @@ -5003,7 +4909,9 @@ def test_pydaos_kv(server, conf): """Test the KV interface""" # pylint: disable=consider-using-with - pydaos_log_file = tempfile.NamedTemporaryFile(prefix='dnt_pydaos_', suffix='.log', delete=False) + pydaos_log_file = tempfile.NamedTemporaryFile(prefix='dnt_pydaos_', + suffix='.log', + delete=False) os.environ['D_LOG_FILE'] = pydaos_log_file.name daos = import_daos(server, conf) @@ -5064,9 +4972,9 @@ def test_pydaos_kv(server, conf): def test_pydaos_kv_obj_class(server, conf): """Test the predefined object class works with KV""" - with tempfile.NamedTemporaryFile( - prefix='kv_objclass_pydaos_', suffix='.log', delete=False - ) as tmp_file: + with tempfile.NamedTemporaryFile(prefix='kv_objclass_pydaos_', + suffix='.log', + delete=False) as tmp_file: log_name = tmp_file.name os.environ['D_LOG_FILE'] = log_name @@ -5133,7 +5041,6 @@ def test_pydaos_kv_obj_class(server, conf): daos._cleanup() log_test(conf, log_name) - # Fault injection testing. # # This runs two different commands under fault injection, although it allows @@ -5154,10 +5061,11 @@ def test_pydaos_kv_obj_class(server, conf): # -class AllocFailTestRun: +class AllocFailTestRun(): """Class to run a fault injection command with a single fault""" def __init__(self, aft, cmd, env, loc, cwd): + # The return from subprocess.poll self.ret = None self.fault_injected = None @@ -5187,9 +5095,10 @@ def __init__(self, aft, cmd, env, loc, cwd): prefix = f'dnt_{loc:04d}_' else: prefix = 'dnt_reference_' - with tempfile.NamedTemporaryFile( - prefix=prefix, suffix='.log', dir=self._aft.log_dir, delete=False - ) as log_file: + with tempfile.NamedTemporaryFile(prefix=prefix, + suffix='.log', + dir=self._aft.log_dir, + delete=False) as log_file: self.log_file = log_file.name self._env['D_LOG_FILE'] = self.log_file @@ -5215,18 +5124,16 @@ def start(self): """Start the command""" faults = {} - faults['fault_config'] = [{'id': 100, 'probability_x': 1, 'probability_y': 1}] + faults['fault_config'] = [{'id': 100, + 'probability_x': 1, + 'probability_y': 1}] if self.loc: - faults['fault_config'].append( - { - 'id': 0, - 'probability_x': 1, - 'probability_y': 1, - 'interval': self.loc, - 'max_faults': 1, - } - ) + faults['fault_config'].append({'id': 0, + 'probability_x': 1, + 'probability_y': 1, + 'interval': self.loc, + 'max_faults': 1}) if self._aft.skip_daos_init: faults['fault_config'].append({'id': 101, 'probability_x': 1}) @@ -5245,14 +5152,12 @@ def start(self): else: exec_cmd = self._cmd - self._sp = subprocess.Popen( - exec_cmd, - env=self._env, - cwd=self._cwd, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) + self._sp = subprocess.Popen(exec_cmd, + env=self._env, + cwd=self._cwd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) def has_finished(self): """Check if the command has completed""" @@ -5277,8 +5182,8 @@ def _post(self, rc): This is where all the checks are performed. """ - def _explain(): + if self._aft.conf.tmp_dir: log_dir = self._aft.conf.tmp_dir else: @@ -5287,11 +5192,10 @@ def _explain(): short_log_file = self.log_file if short_log_file.startswith(self.log_file): - short_log_file = short_log_file[len(log_dir) + 1 :] + short_log_file = short_log_file[len(log_dir) + 1:] self._aft.wf.explain(self._fi_loc, short_log_file, fi_signal) self._aft.conf.wf.explain(self._fi_loc, short_log_file, fi_signal) - # Put in a new-line. print() self.returncode = rc @@ -5320,15 +5224,13 @@ def _explain(): else: wf = None - self._fi_loc = log_test( - self._aft.conf, - self.log_file, - show_memleaks=show_memleaks, - ignore_busy=self._aft.ignore_busy, - quiet=True, - skip_fi=True, - leak_wf=wf, - ) + self._fi_loc = log_test(self._aft.conf, + self.log_file, + show_memleaks=show_memleaks, + ignore_busy=self._aft.ignore_busy, + quiet=True, + skip_fi=True, + leak_wf=wf) self.fault_injected = True assert self._fi_loc except NLTestNoFi: @@ -5354,22 +5256,22 @@ def _explain(): # These checks will report an error against the line of code that introduced the "leak" # which may well only have a loose correlation to where the error was reported. if self._aft.check_daos_stderr: + # The go code will report a stacktrace in some cases on segfault or double-free # and these will obviously not be the expected output but are obviously an error, # to avoid filling the results with lots of warnings about stderr just include one # to say the check is disabled. if rc in (-6, -11): - self._aft.wf.add( - self._fi_loc, - 'NORMAL', - f"Unable to check stderr because of exit code '{rc}'", - mtype='Crash preventing check', - ) + self._aft.wf.add(self._fi_loc, + 'NORMAL', + f"Unable to check stderr because of exit code '{rc}'", + mtype='Crash preventing check') _explain() return stderr = self._stderr.decode('utf-8').rstrip() for line in stderr.splitlines(): + # This is what the go code uses. if line.endswith(': DER_NOMEM(-1009): Out of memory'): continue @@ -5390,53 +5292,45 @@ def _explain(): continue if 'DER_UNKNOWN' in line: - self._aft.wf.add( - self._fi_loc, - 'HIGH', - f"Incorrect stderr '{line}'", - mtype='Invalid error code used', - ) + self._aft.wf.add(self._fi_loc, + 'HIGH', + f"Incorrect stderr '{line}'", + mtype='Invalid error code used') continue - self._aft.wf.add( - self._fi_loc, 'NORMAL', f"Malformed stderr '{line}'", mtype='Malformed stderr' - ) + self._aft.wf.add(self._fi_loc, + 'NORMAL', + f"Malformed stderr '{line}'", + mtype='Malformed stderr') _explain() return if self.returncode == 0 and self._aft.check_post_stdout: if self.stdout != self._aft.expected_stdout: - self._aft.wf.add( - self._fi_loc, - 'NORMAL', - f"Incorrect stdout '{self.stdout}'", - mtype='Out of memory caused zero exit code with incorrect output', - ) + self._aft.wf.add(self._fi_loc, + 'NORMAL', + f"Incorrect stdout '{self.stdout}'", + mtype='Out of memory caused zero exit code with incorrect output') if self._aft.check_stderr: stderr = self._stderr.decode('utf-8').rstrip() - if ( - stderr != '' - and not stderr.endswith('(-1009): Out of memory') - and not stderr.endswith(': errno 12 (Cannot allocate memory)') - and 'error parsing command line arguments' not in stderr - and self.stdout != self._aft.expected_stdout - ): + if stderr != '' and not stderr.endswith('(-1009): Out of memory') and \ + not stderr.endswith(': errno 12 (Cannot allocate memory)') and \ + 'error parsing command line arguments' not in stderr and \ + self.stdout != self._aft.expected_stdout: if self.stdout != b'': print(self._aft.expected_stdout) print() print(self.stdout) print() - self._aft.wf.add( - self._fi_loc, - 'NORMAL', - f"Incorrect stderr '{stderr}'", - mtype='Out of memory not reported correctly via stderr', - ) + self._aft.wf.add(self._fi_loc, + 'NORMAL', + f"Incorrect stderr '{stderr}'", + mtype='Out of memory not reported correctly via stderr') _explain() -class AllocFailTest: +class AllocFailTest(): # pylint: disable=too-few-public-methods """Class to describe fault injection command""" @@ -5517,6 +5411,7 @@ def _prep(self): # finish. After each repetition completes then check for re-launch new processes # to keep the pipeline full. while not finished or active: + if not finished: while len(active) < max_child: active.append(self._run_cmd(fid)) @@ -5620,17 +5515,9 @@ def test_dfuse_start(server, conf, wf): os.mkdir(mount_point) - cmd = [ - join(conf['PREFIX'], 'bin', 'dfuse'), - '--mountpoint', - mount_point, - '--pool', - pool.id(), - '--cont', - container.id(), - '--foreground', - '--singlethread', - ] + cmd = [join(conf['PREFIX'], 'bin', 'dfuse'), + '--mountpoint', mount_point, + '--pool', pool.id(), '--cont', container.id(), '--foreground', '--singlethread'] test_cmd = AllocFailTest(conf, 'dfuse', cmd) test_cmd.wf = wf @@ -5655,20 +5542,16 @@ def test_alloc_fail_copy(server, conf, wf): """ def get_cmd(cont_id): - return [ - 'daos', - 'filesystem', - 'copy', - '--src', - f'daos://{pool.id()}/aft_base', - '--dst', - f'daos://{pool.id()}/container_{cont_id}', - ] + return ['daos', + 'filesystem', + 'copy', + '--src', + f'daos://{pool.id()}/aft_base', + '--dst', + f'daos://{pool.id()}/container_{cont_id}'] pool = server.get_test_pool_obj() - with tempfile.TemporaryDirectory( - prefix='copy_src_', - ) as src_dir: + with tempfile.TemporaryDirectory(prefix='copy_src_',) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5679,9 +5562,8 @@ def get_cmd(cont_id): os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd( - conf, ['filesystem', 'copy', '--src', sub_dir, '--dst', f'daos://{pool.id()}/aft_base'] - ) + rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', sub_dir, + '--dst', f'daos://{pool.id()}/aft_base']) assert rc.returncode == 0, rc test_cmd = AllocFailTest(conf, 'filesystem-copy', get_cmd) @@ -5707,15 +5589,8 @@ def test_alloc_fail_copy_trunc(server, conf, wf): files_needed = 4000 def get_cmd(_): - cmd = [ - 'daos', - 'filesystem', - 'copy', - '--src', - src_file.name, - '--dst', - f'daos://{pool.id()}/aftc/new_dir/file.{get_cmd.idx}', - ] + cmd = ['daos', 'filesystem', 'copy', '--src', src_file.name, + '--dst', f'daos://{pool.id()}/aftc/new_dir/file.{get_cmd.idx}'] get_cmd.idx += 1 assert get_cmd.idx <= files_needed return cmd @@ -5723,9 +5598,7 @@ def get_cmd(_): get_cmd.idx = 0 # pylint: disable=invalid-name pool = server.get_test_pool_obj() - with tempfile.TemporaryDirectory( - prefix='copy_src_', - ) as src_dir: + with tempfile.TemporaryDirectory(prefix='copy_src_',) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5733,12 +5606,12 @@ def get_cmd(_): with open(join(sub_dir, f'file.{idx}'), 'w') as ofd: ofd.write('hello') - rc = run_daos_cmd( - conf, ['filesystem', 'copy', '--src', sub_dir, '--dst', f'daos://{pool.id()}/aftc'] - ) + rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', sub_dir, + '--dst', f'daos://{pool.id()}/aftc']) assert rc.returncode == 0, rc with tempfile.NamedTemporaryFile() as src_file: + test_cmd = AllocFailTest(conf, 'filesystem-copy-trunc', get_cmd) test_cmd.wf = wf test_cmd.check_daos_stderr = True @@ -5759,9 +5632,7 @@ def test_alloc_pil4dfs_ls(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX', label='pil4dfs_fi') - with tempfile.TemporaryDirectory( - prefix='pil4_src_', - ) as src_dir: + with tempfile.TemporaryDirectory(prefix='pil4_src_',) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -5773,17 +5644,8 @@ def test_alloc_pil4dfs_ls(server, conf, wf): os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd( - conf, - [ - 'filesystem', - 'copy', - '--src', - f'{src_dir}/new_dir', - '--dst', - f'daos://{pool.id()}/{container.id()}', - ], - ) + rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', f'{src_dir}/new_dir', + '--dst', f'daos://{pool.id()}/{container.id()}']) print(rc) assert rc.returncode == 0, rc @@ -5805,14 +5667,12 @@ def test_alloc_cont_create(server, conf, wf): pool = server.get_test_pool_obj() def get_cmd(cont_id): - return [ - 'daos', - 'container', - 'create', - pool.id(), - '--properties', - f'srv_cksum:on,label:{cont_id}', - ] + return ['daos', + 'container', + 'create', + pool.id(), + '--properties', + f'srv_cksum:on,label:{cont_id}'] test_cmd = AllocFailTest(conf, 'cont-create', get_cmd) test_cmd.wf = wf @@ -5831,15 +5691,13 @@ def test_alloc_fail_cont_create(server, conf): dfuse.start() def get_cmd(cont_id): - return [ - 'daos', - 'container', - 'create', - '--type', - 'POSIX', - '--path', - join(dfuse.dir, f'container_{cont_id}'), - ] + return ['daos', + 'container', + 'create', + '--type', + 'POSIX', + '--path', + join(dfuse.dir, f'container_{cont_id}')] test_cmd = AllocFailTest(conf, 'cont-create', get_cmd) test_cmd.check_post_stdout = False @@ -5921,9 +5779,14 @@ def test_fi_list_attr(server, conf, wf): container = create_cont(conf, pool) - container.set_attrs({'my-test-attr-1': 'some-value', 'my-test-attr-2': 'some-other-value'}) + container.set_attrs({'my-test-attr-1': 'some-value', + 'my-test-attr-2': 'some-other-value'}) - cmd = ['daos', 'container', 'list-attrs', pool.id(), container.id()] + cmd = ['daos', + 'container', + 'list-attrs', + pool.id(), + container.id()] test_cmd = AllocFailTest(conf, 'cont-list-attr', cmd) test_cmd.wf = wf @@ -5939,7 +5802,11 @@ def test_fi_get_prop(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX') - cmd = ['daos', 'container', 'get-prop', pool.id(), container.id()] + cmd = ['daos', + 'container', + 'get-prop', + pool.id(), + container.id()] test_cmd = AllocFailTest(conf, 'cont-get-prop', cmd) test_cmd.wf = wf @@ -5960,7 +5827,12 @@ def test_fi_get_attr(server, conf, wf): container.set_attrs({attr_name: 'value'}) - cmd = ['daos', 'container', 'get-attr', pool.id(), container.id(), attr_name] + cmd = ['daos', + 'container', + 'get-attr', + pool.id(), + container.id(), + attr_name] test_cmd = AllocFailTest(conf, 'cont-get-attr', cmd) test_cmd.wf = wf @@ -5979,7 +5851,11 @@ def test_fi_cont_query(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX') - cmd = ['daos', 'container', 'query', pool.id(), container.id()] + cmd = ['daos', + 'container', + 'query', + pool.id(), + container.id()] test_cmd = AllocFailTest(conf, 'cont-query', cmd) test_cmd.wf = wf @@ -5998,7 +5874,11 @@ def test_fi_cont_check(server, conf, wf): container = create_cont(conf, pool) - cmd = ['daos', 'container', 'check', pool.id(), container.id()] + cmd = ['daos', + 'container', + 'check', + pool.id(), + container.id()] test_cmd = AllocFailTest(conf, 'cont-check', cmd) test_cmd.wf = wf @@ -6015,7 +5895,10 @@ def test_alloc_fail(server, conf): """Run 'daos' client binary with fault injection""" pool = server.get_test_pool_obj() - cmd = ['daos', 'cont', 'list', pool.id()] + cmd = ['daos', + 'cont', + 'list', + pool.id()] test_cmd = AllocFailTest(conf, 'pool-list-containers', cmd) # Create at least one container, and record what the output should be when @@ -6037,9 +5920,7 @@ def test_dfs_check(server, conf, wf): container = create_cont(conf, pool, ctype='POSIX', label='fsck') - with tempfile.TemporaryDirectory( - prefix='fsck_src_', - ) as src_dir: + with tempfile.TemporaryDirectory(prefix='fsck_src_',) as src_dir: sub_dir = join(src_dir, 'new_dir') os.mkdir(sub_dir) @@ -6051,23 +5932,13 @@ def test_dfs_check(server, conf, wf): # os.symlink('broken', join(sub_dir, 'broken_s')) os.symlink('file.0', join(sub_dir, 'link')) - rc = run_daos_cmd( - conf, - [ - 'filesystem', - 'copy', - '--src', - f'{src_dir}/new_dir', - '--dst', - f'daos://{pool.id()}/{container.id()}', - ], - ) + rc = run_daos_cmd(conf, ['filesystem', 'copy', '--src', f'{src_dir}/new_dir', + '--dst', f'daos://{pool.id()}/{container.id()}']) print(rc) assert rc.returncode == 0, rc test_cmd = AllocFailTest( - conf, 'fs-check', ['daos', 'filesystem', 'check', pool.id(), container.id()] - ) + conf, 'fs-check', ['daos', 'filesystem', 'check', pool.id(), container.id()]) test_cmd.wf = wf test_cmd.single_process = True test_cmd.check_daos_stderr = True @@ -6100,6 +5971,7 @@ def server_fi(args): setup_log_test(conf) with DaosServer(conf, wf=wf, test_class='server-fi', enable_fi=True) as server: + pool = server.get_test_pool_obj() cont = create_cont(conf, pool=pool, ctype='POSIX', label='server_test') @@ -6108,17 +5980,12 @@ def server_fi(args): for idx in range(100): server.run_daos_client_cmd_pil4dfs( - ['touch', f'file.{idx}'], container=cont, check=False, report=False - ) + ['touch', f'file.{idx}'], container=cont, check=False, report=False) server.run_daos_client_cmd_pil4dfs( ['dd', 'if=/dev/zero', f'of=file.{idx}', 'bs=1', 'count=1024'], - container=cont, - check=False, - report=False, - ) + container=cont, check=False, report=False) server.run_daos_client_cmd_pil4dfs( - ['rm', '-f', f'file.{idx}'], container=cont, check=False, report=False - ) + ['rm', '-f', f'file.{idx}'], container=cont, check=False, report=False) # Turn off fault injection again to assist in server shutdown. server.set_fi(probability=0) @@ -6144,9 +6011,8 @@ def run(wf, args): if args.mode == 'fi': fi_test = True else: - with DaosServer( - conf, test_class='first', wf=wf_server, fatal_errors=fatal_errors - ) as server: + with DaosServer(conf, test_class='first', wf=wf_server, + fatal_errors=fatal_errors) as server: if args.mode == 'launch': run_in_fg(server, conf, args) elif args.mode == 'overlay': @@ -6171,18 +6037,16 @@ def run(wf, args): fatal_errors.add_result(server.set_fi()) if args.mode == 'all': - with DaosServer( - conf, test_class='restart', wf=wf_server, fatal_errors=fatal_errors - ) as server: + with DaosServer(conf, test_class='restart', wf=wf_server, + fatal_errors=fatal_errors) as server: pass # If running all tests then restart the server under valgrind. # This is really, really slow so just do cont list, then # exit again. if args.server_valgrind: - with DaosServer( - conf, test_class='valgrind', wf=wf_server, valgrind=True, fatal_errors=fatal_errors - ) as server: + with DaosServer(conf, test_class='valgrind', wf=wf_server, valgrind=True, + fatal_errors=fatal_errors) as server: pools = server.fetch_pools() for pool in pools: cmd = ['pool', 'query', pool.id()] @@ -6200,9 +6064,8 @@ def run(wf, args): args.server_debug = 'INFO' args.memcheck = 'no' args.dfuse_debug = 'WARN' - with DaosServer( - conf, test_class='no-debug', wf=wf_server, fatal_errors=fatal_errors - ) as server: + with DaosServer(conf, test_class='no-debug', wf=wf_server, + fatal_errors=fatal_errors) as server: if fi_test: # Most of the fault injection tests go here, they are then run on docker containers # so can be performed in parallel. @@ -6330,13 +6193,11 @@ def main(): print(f"Tests are: {','.join(sorted(tests))}") sys.exit(1) - wf = WarningsFactory( - 'nlt-errors.json', - post_error=True, - check='Log file errors', - class_id=args.class_name, - junit=True, - ) + wf = WarningsFactory('nlt-errors.json', + post_error=True, + check='Log file errors', + class_id=args.class_name, + junit=True) try: fatal_errors = run(wf, args) diff --git a/utils/run_utest.py b/utils/run_utest.py index 6d059274e95..4883a9baf94 100755 --- a/utils/run_utest.py +++ b/utils/run_utest.py @@ -25,7 +25,7 @@ def check_version(): """Ensure python version is compatible""" if sys.version_info < (3, 6): - print("Python version 3.6 or greater is required" "") + print("Python version 3.6 or greater is required""") sys.exit(-1) @@ -51,9 +51,8 @@ def setup_junit(memcheck): return (suite, test) -class BaseResults: +class BaseResults(): """Keep track of test results""" - def __init__(self): """Initializes the values""" self.results = {"tests": 0, "errors": 0, "failures": 0, "fail_msg": "", "error_msg": ""} @@ -89,7 +88,6 @@ def add_error(self, error_str): class Results(BaseResults): """Keep track of test results to produce final report""" - def __init__(self, memcheck): """Class to keep track of results""" super().__init__() @@ -102,23 +100,19 @@ def create_junit(self): if os.environ.get("CMOCKA_XML_FILE", None) is None: return if self.results["failures"]: - self.test.add_failure_info( - message=f"{self.results['failures']} of " + f"{self.results['tests']} failed", - output=self.results["fail_msg"], - ) + self.test.add_failure_info(message=f"{self.results['failures']} of " + + f"{self.results['tests']} failed", + output=self.results["fail_msg"]) if self.results["errors"]: - self.test.add_error_info( - message=f"{self.results['errors']} of " + f"{self.results['tests']} failed", - output=self.results["error_msg"], - ) + self.test.add_error_info(message=f"{self.results['errors']} of " + + f"{self.results['tests']} failed", + output=self.results["error_msg"]) write_xml_result(self.name, self.suite) def print_results(self): """Print the output""" - print( - f"Ran {self.results['tests']} tests, {self.results['failures']} tests failed, " - + f"{self.results['errors']} tests had errors" - ) + print(f"Ran {self.results['tests']} tests, {self.results['failures']} tests failed, " + + f"{self.results['errors']} tests had errors") if self.results["failures"]: print("FAILURES:") print(self.results["fail_msg"]) @@ -127,9 +121,8 @@ def print_results(self): print(self.results["error_msg"]) -class ValgrindHelper: +class ValgrindHelper(): """Helper class to setup xml command""" - @staticmethod def get_xml_name(name): """Get the xml file name""" @@ -143,19 +136,11 @@ def get_supp(base): @staticmethod def setup_cmd(base, cmd, name): """Return a new command using valgrind""" - cmd_prefix = [ - "valgrind", - "--leak-check=full", - "--show-reachable=yes", - "--num-callers=20", - "--error-limit=no", - "--fair-sched=try", - f"--suppressions={ValgrindHelper.get_supp(base)}", - "--gen-suppressions=all", - "--error-exitcode=42", - "--xml=yes", - f"--xml-file={ValgrindHelper.get_xml_name(name)}", - ] + cmd_prefix = ["valgrind", "--leak-check=full", "--show-reachable=yes", "--num-callers=20", + "--error-limit=no", "--fair-sched=try", + f"--suppressions={ValgrindHelper.get_supp(base)}", + "--gen-suppressions=all", "--error-exitcode=42", "--xml=yes", + f"--xml-file={ValgrindHelper.get_xml_name(name)}"] return cmd_prefix + cmd @@ -165,7 +150,8 @@ def run_cmd(cmd, output_log=None, env=None): if output_log: with open(output_log, "w", encoding="UTF-8") as output: print(f"RUNNING COMMAND {' '.join(cmd)}\n Log: {output_log}") - ret = subprocess.run(cmd, check=False, env=env, stdout=output, stderr=subprocess.STDOUT) + ret = subprocess.run(cmd, check=False, env=env, stdout=output, + stderr=subprocess.STDOUT) else: print(f"RUNNING COMMAND {' '.join(cmd)}") ret = subprocess.run(cmd, check=False, env=env) @@ -219,10 +205,8 @@ def process_cmocka(fname, suite_name): else: match = re.search("^(.*case )name(.*$)", line) if match: - outfile.write( - f"{match.group(1)}classname=\"UTEST_{suite_name}.{suite}\"" - + f" name{match.group(2)}\n" - ) + outfile.write(f"{match.group(1)}classname=\"UTEST_{suite_name}.{suite}\"" + + f" name{match.group(2)}\n") continue match = re.search("^(.*case classname=\")(.*$)", line) if match: @@ -242,9 +226,8 @@ def for_each_file(path, operate, arg, ext=None): operate(full_path, arg) -class AIO: +class AIO(): """Handle AIO specific setup and teardown""" - def __init__(self, mount, device=None): """Initialize an AIO device""" self.config_name = os.path.join(mount, "daos_nvme.conf") @@ -325,18 +308,8 @@ def prepare_test(self, name="AIO_1", min_size=4): if self.device is None: run_cmd(["dd", "if=/dev/zero", f"of={self.fname}", "bs=1G", f"count={min_size}"]) else: - run_cmd( - [ - "sudo", - "-E", - "dd", - "if=/dev/zero", - f"of={self.fname}", - "bs=4K", - "count=1", - "conv=notrunc", - ] - ) + run_cmd(["sudo", "-E", "dd", "if=/dev/zero", f"of={self.fname}", "bs=4K", "count=1", + "conv=notrunc"]) self.create_config(name) def finalize_test(self): @@ -355,7 +328,7 @@ def finalize(self): os.unlink(self.config_name) -class Test: +class Test(): """Define a test""" test_num = 1 @@ -369,7 +342,8 @@ def __init__(self, config, path_info, args): if env_vars: self.env.update(env_vars) self.warn_if_missing = config.get("warn_if_missing", None) - self.aio = {"aio": config.get("aio", None), "size": config.get("size", 4)} + self.aio = {"aio": config.get("aio", None), + "size": config.get("size", 4)} if self.filter(args.test_filter): print(f"Filtered test {' '.join(self.cmd)}") raise TestSkipped() @@ -456,12 +430,8 @@ def run(self, base, memcheck, sudo): cmd = [os.path.join(base, self.cmd[0])] + self.cmd[1:] if memcheck: if os.path.splitext(cmd[0])[-1] in [".sh", ".py"]: - self.env.update( - { - "USE_VALGRIND": "memcheck", - "VALGRIND_SUPP": ValgrindHelper.get_supp(self.root_dir()), - } - ) + self.env.update({"USE_VALGRIND": "memcheck", + "VALGRIND_SUPP": ValgrindHelper.get_supp(self.root_dir())}) else: cmd = ValgrindHelper.setup_cmd(self.root_dir(), cmd, self.name) if sudo: @@ -500,9 +470,8 @@ def remove_empty_files(self, log_dir): print(f" produced {fname}") -class Suite: +class Suite(): """Define a suite""" - def __init__(self, path_info, config, args): """Initialize a test suite""" self.name = config["name"] @@ -593,10 +562,8 @@ def run_suite(self, args, aio): try: ret = test.run(self.base, args.memcheck, self.sudo) if ret != 0: - results.add_failure( - f"{' '.join(test.get_last())} failed: ret={ret} " - + f"logs={test.log_dir()}" - ) + results.add_failure(f"{' '.join(test.get_last())} failed: ret={ret} " + + f"logs={test.log_dir()}") except Exception: results.add_error(f"{traceback.format_exc()}") ret = 1 # prevent reporting errors on teardown too @@ -637,25 +604,18 @@ def get_args(): """Parse the arguments""" parser = argparse.ArgumentParser(description='Run DAOS unit tests') parser.add_argument('--memcheck', action='store_true', help='Run tests with Valgrind memcheck') - parser.add_argument( - '--test_filter', default=None, help='Regular expression to select tests to run' - ) - parser.add_argument( - '--suite_filter', default=None, help='Regular expression to select suites to run' - ) - parser.add_argument( - '--no-fail-on-error', action='store_true', help='Disable non-zero return code on failure' - ) - parser.add_argument( - '--sudo', - choices=['yes', 'only', 'no'], - default='yes', - help='How to handle tests requiring sudo', - ) - parser.add_argument( - '--bdev', default=None, help="Device to use for AIO, will create file by default" - ) - parser.add_argument('--log_dir', default="/tmp/daos_utest", help="Path to store test logs") + parser.add_argument('--test_filter', default=None, + help='Regular expression to select tests to run') + parser.add_argument('--suite_filter', default=None, + help='Regular expression to select suites to run') + parser.add_argument('--no-fail-on-error', action='store_true', + help='Disable non-zero return code on failure') + parser.add_argument('--sudo', choices=['yes', 'only', 'no'], default='yes', + help='How to handle tests requiring sudo') + parser.add_argument('--bdev', default=None, + help="Device to use for AIO, will create file by default") + parser.add_argument('--log_dir', default="/tmp/daos_utest", + help="Path to store test logs") return parser.parse_args() @@ -664,12 +624,10 @@ def get_path_info(args): script_dir = os.path.dirname(os.path.realpath(__file__)) daos_base = os.path.realpath(os.path.join(script_dir, '..')) build_vars_file = os.path.join(daos_base, '.build_vars.json') - path_info = { - "DAOS_BASE": daos_base, - "UTEST_YAML": os.path.join(daos_base, "utils", "utest.yaml"), - "MOUNT_DIR": "/mnt/daos", - "LOG_DIR": args.log_dir, - } + path_info = {"DAOS_BASE": daos_base, + "UTEST_YAML": os.path.join(daos_base, "utils", "utest.yaml"), + "MOUNT_DIR": "/mnt/daos", + "LOG_DIR": args.log_dir} try: with open(build_vars_file, "r", encoding="UTF-8") as vars_file: build_vars = json.load(vars_file) From 2934722f62a9c3e7e6ed502203253d6c2c03be28 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 3 Nov 2023 10:59:52 +0000 Subject: [PATCH 09/26] Back out conflict. Required-githooks: true Signed-off-by: Ashley Pittman --- src/tests/ftest/launch.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tests/ftest/launch.py b/src/tests/ftest/launch.py index b8df3f0244c..2dc863f9619 100755 --- a/src/tests/ftest/launch.py +++ b/src/tests/ftest/launch.py @@ -24,7 +24,6 @@ # from avocado.core.version import MAJOR, MINOR # from avocado.utils.stacktrace import prepare_exc_info from ClusterShell.NodeSet import NodeSet - # When SRE-439 is fixed we should be able to include these import statements here # from util.distro_utils import detect # pylint: disable=import-error,no-name-in-module @@ -35,7 +34,6 @@ # This is not good coding practice. Should use package paths and remove all these E402. sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "util")) from data_utils import dict_extract_values, list_flatten, list_unique # noqa: E402 - # pylint: disable=import-outside-toplevel from host_utils import HostException, HostInfo, get_local_host, get_node_set # noqa: E402 from logger_utils import get_console_handler, get_file_handler # noqa: E402 From 98e1fffbeafca2f5fb759afc3a5c667915661e96 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 3 Nov 2023 12:51:35 +0000 Subject: [PATCH 10/26] Fix isort errors. Required-githooks: true Signed-off-by: Ashley Pittman --- src/tests/ftest/launch.py | 1 + src/tests/ftest/slurm_setup.py | 1 + src/tests/ftest/util/collection_utils.py | 1 + src/tests/ftest/util/environment_utils.py | 10 ++++++++-- src/tests/ftest/util/host_utils.py | 1 + src/tests/ftest/util/launch_utils.py | 1 + src/tests/ftest/util/network_utils.py | 1 + src/tests/ftest/util/slurm_utils.py | 1 + src/tests/ftest/util/storage_utils.py | 1 + src/tests/ftest/util/user_utils.py | 1 + src/tests/ftest/util/yaml_utils.py | 1 + 11 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/tests/ftest/launch.py b/src/tests/ftest/launch.py index dc5f9f84247..d0553a71505 100755 --- a/src/tests/ftest/launch.py +++ b/src/tests/ftest/launch.py @@ -15,6 +15,7 @@ from ClusterShell.NodeSet import NodeSet from process_core_files import get_core_file_pattern + # pylint: disable=import-error,no-name-in-module from util.avocado_utils import AvocadoException, AvocadoInfo from util.code_coverage_utils import CodeCoverage diff --git a/src/tests/ftest/slurm_setup.py b/src/tests/ftest/slurm_setup.py index 3036c92b397..3c3aab2052b 100755 --- a/src/tests/ftest/slurm_setup.py +++ b/src/tests/ftest/slurm_setup.py @@ -14,6 +14,7 @@ import sys from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.logger_utils import get_console_handler from util.package_utils import install_packages, remove_packages diff --git a/src/tests/ftest/util/collection_utils.py b/src/tests/ftest/util/collection_utils.py index f84f74fddab..4c5e7d18241 100644 --- a/src/tests/ftest/util/collection_utils.py +++ b/src/tests/ftest/util/collection_utils.py @@ -12,6 +12,7 @@ from ClusterShell.NodeSet import NodeSet from process_core_files import CoreFileException, CoreFileProcessing + # pylint: disable=import-error,no-name-in-module from util.environment_utils import TestEnvironment from util.host_utils import get_local_host diff --git a/src/tests/ftest/util/environment_utils.py b/src/tests/ftest/util/environment_utils.py index d71ee3e8e8e..ae9992f77ce 100644 --- a/src/tests/ftest/util/environment_utils.py +++ b/src/tests/ftest/util/environment_utils.py @@ -8,9 +8,15 @@ import site from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module -from util.network_utils import (PROVIDER_ALIAS, SUPPORTED_PROVIDERS, NetworkException, - get_common_provider, get_fastest_interface) +from util.network_utils import ( + PROVIDER_ALIAS, + SUPPORTED_PROVIDERS, + NetworkException, + get_common_provider, + get_fastest_interface, +) from util.run_utils import run_remote diff --git a/src/tests/ftest/util/host_utils.py b/src/tests/ftest/util/host_utils.py index 83add3924c2..5a7e135c44f 100644 --- a/src/tests/ftest/util/host_utils.py +++ b/src/tests/ftest/util/host_utils.py @@ -7,6 +7,7 @@ from socket import gethostname from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.slurm_utils import SlurmFailed, get_partition_hosts, get_reservation_hosts diff --git a/src/tests/ftest/util/launch_utils.py b/src/tests/ftest/util/launch_utils.py index ce5c30ecb85..dce0c402ef9 100644 --- a/src/tests/ftest/util/launch_utils.py +++ b/src/tests/ftest/util/launch_utils.py @@ -13,6 +13,7 @@ from ClusterShell.NodeSet import NodeSet from slurm_setup import SlurmSetup, SlurmSetupException + # pylint: disable=import-error,no-name-in-module from util.collection_utils import TEST_RESULTS_DIRS, collect_test_result from util.data_utils import dict_extract_values, list_flatten, list_unique diff --git a/src/tests/ftest/util/network_utils.py b/src/tests/ftest/util/network_utils.py index 5806daaf229..1cbbfb6ce91 100644 --- a/src/tests/ftest/util/network_utils.py +++ b/src/tests/ftest/util/network_utils.py @@ -8,6 +8,7 @@ import re from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.run_utils import run_remote diff --git a/src/tests/ftest/util/slurm_utils.py b/src/tests/ftest/util/slurm_utils.py index 9ba92b0f8c9..41d33679684 100644 --- a/src/tests/ftest/util/slurm_utils.py +++ b/src/tests/ftest/util/slurm_utils.py @@ -10,6 +10,7 @@ import time from ClusterShell.NodeSet import NodeSet, NodeSetParseError + # pylint: disable=import-error,no-name-in-module from util.run_utils import RunException, run_local, run_remote diff --git a/src/tests/ftest/util/storage_utils.py b/src/tests/ftest/util/storage_utils.py index 2aa27b5ea53..81cd94d0590 100644 --- a/src/tests/ftest/util/storage_utils.py +++ b/src/tests/ftest/util/storage_utils.py @@ -12,6 +12,7 @@ import yaml from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.run_utils import run_remote diff --git a/src/tests/ftest/util/user_utils.py b/src/tests/ftest/util/user_utils.py index 368a25862db..f836f800cfa 100644 --- a/src/tests/ftest/util/user_utils.py +++ b/src/tests/ftest/util/user_utils.py @@ -11,6 +11,7 @@ from pwd import getpwnam from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.run_utils import run_remote diff --git a/src/tests/ftest/util/yaml_utils.py b/src/tests/ftest/util/yaml_utils.py index 1bb969e4049..7879b084c3f 100644 --- a/src/tests/ftest/util/yaml_utils.py +++ b/src/tests/ftest/util/yaml_utils.py @@ -9,6 +9,7 @@ import yaml from ClusterShell.NodeSet import NodeSet + # pylint: disable=import-error,no-name-in-module from util.data_utils import dict_extract_values, list_flatten, list_unique From c6904f5746b01ae781aefb51cca27819a4384ffc Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 6 Nov 2023 10:28:17 +0000 Subject: [PATCH 11/26] Add a package for pylint Required-githooks: true Signed-off-by: Ashley Pittman --- utils/cq/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/cq/requirements.txt b/utils/cq/requirements.txt index a5d00a92fa7..b9a383b50c4 100644 --- a/utils/cq/requirements.txt +++ b/utils/cq/requirements.txt @@ -6,6 +6,7 @@ avocado-framework-plugin-varianter-yaml-to-mux<94 clustershell paramiko pyenchant +junitparser ## flake8 6 removed --diff option which breaks flake precommit hook. ## https://github.com/pycqa/flake8/issues/1389 https://github.com/PyCQA/flake8/pull/1720 flake8<6.0.0 From 74056efaeafb7b6044741399d4ee50764a736f9f Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 13 Nov 2023 14:34:52 +0000 Subject: [PATCH 12/26] back out cahge. Required-githooks: true Signed-off-by: Ashley Pittman --- src/SConscript | 63 +++++++++++++------------------------------------- 1 file changed, 16 insertions(+), 47 deletions(-) diff --git a/src/SConscript b/src/SConscript index f41182306a0..c4cb419e047 100644 --- a/src/SConscript +++ b/src/SConscript @@ -2,45 +2,16 @@ import os -HEADERS = [ - 'daos_api.h', - 'daos_types.h', - 'daos_errno.h', - 'daos_kv.h', - 'daos_event.h', - 'daos_mgmt.h', - 'daos_types.h', - 'daos_array.h', - 'daos_task.h', - 'daos_fs.h', - 'daos_uns.h', - 'daos_security.h', - 'daos_prop.h', - 'daos_obj_class.h', - 'daos_obj.h', - 'daos_pool.h', - 'daos_cont.h', - 'daos_version.h', - 'daos_fs_sys.h', - 'daos_s3.h', - 'daos_pipeline.h', -] +HEADERS = ['daos_api.h', 'daos_types.h', 'daos_errno.h', 'daos_kv.h', + 'daos_event.h', 'daos_mgmt.h', 'daos_types.h', 'daos_array.h', + 'daos_task.h', 'daos_fs.h', 'daos_uns.h', 'daos_security.h', + 'daos_prop.h', 'daos_obj_class.h', 'daos_obj.h', 'daos_pool.h', + 'daos_cont.h', 'daos_version.h', 'daos_fs_sys.h', 'daos_s3.h', 'daos_pipeline.h'] HEADERS_SRV = ['vos.h', 'vos_types.h'] -HEADERS_GURT = { - 'dlog.h', - 'debug.h', - 'common.h', - 'hash.h', - 'list.h', - 'heap.h', - 'fault_inject.h', - 'debug_setup.h', - 'types.h', - 'atomic.h', - 'slab.h', - 'telemetry_consumer.h', - 'telemetry_common.h', -} +HEADERS_GURT = ['dlog.h', 'debug.h', 'common.h', 'hash.h', 'list.h', + 'heap.h', 'fault_inject.h', 'debug_setup.h', + 'types.h', 'atomic.h', 'slab.h', + 'telemetry_consumer.h', 'telemetry_common.h'] HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] @@ -69,15 +40,13 @@ def read_and_save_version(env): return version tmpl_hdr_in = os.path.join('include', 'daos_version.h.in') - subst_dict = { - '@TMPL_MAJOR@': API_VERSION_MAJOR, - '@TMPL_MINOR@': API_VERSION_MINOR, - '@TMPL_FIX@': API_VERSION_FIX, - '@TMPL_PKG_MAJOR@': major, - '@TMPL_PKG_MINOR@': minor, - '@TMPL_PKG_FIX@': fix, - '@Template for @': '', - } + subst_dict = {'@TMPL_MAJOR@': API_VERSION_MAJOR, + '@TMPL_MINOR@': API_VERSION_MINOR, + '@TMPL_FIX@': API_VERSION_FIX, + '@TMPL_PKG_MAJOR@': major, + '@TMPL_PKG_MINOR@': minor, + '@TMPL_PKG_FIX@': fix, + '@Template for @': ''} out = env.Substfile(tmpl_hdr_in, SUBST_DICT=subst_dict) if not GetOption('silent'): From a8161c73ebca7835bc7ee0fd91e72153e560a213 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Mon, 13 Nov 2023 14:49:40 +0000 Subject: [PATCH 13/26] Required-githooks: true change scons file Skip-func-hw-test: true Skip-func-test: true Quick-Functional: true Test-tag: dfuse --- src/SConscript | 63 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/src/SConscript b/src/SConscript index c4cb419e047..f194a1c56a0 100644 --- a/src/SConscript +++ b/src/SConscript @@ -2,16 +2,45 @@ import os -HEADERS = ['daos_api.h', 'daos_types.h', 'daos_errno.h', 'daos_kv.h', - 'daos_event.h', 'daos_mgmt.h', 'daos_types.h', 'daos_array.h', - 'daos_task.h', 'daos_fs.h', 'daos_uns.h', 'daos_security.h', - 'daos_prop.h', 'daos_obj_class.h', 'daos_obj.h', 'daos_pool.h', - 'daos_cont.h', 'daos_version.h', 'daos_fs_sys.h', 'daos_s3.h', 'daos_pipeline.h'] +HEADERS = [ + 'daos_api.h', + 'daos_types.h', + 'daos_errno.h', + 'daos_kv.h', + 'daos_event.h', + 'daos_mgmt.h', + 'daos_types.h', + 'daos_array.h', + 'daos_task.h', + 'daos_fs.h', + 'daos_uns.h', + 'daos_security.h', + 'daos_prop.h', + 'daos_obj_class.h', + 'daos_obj.h', + 'daos_pool.h', + 'daos_cont.h', + 'daos_version.h', + 'daos_fs_sys.h', + 'daos_s3.h', + 'daos_pipeline.h', +] HEADERS_SRV = ['vos.h', 'vos_types.h'] -HEADERS_GURT = ['dlog.h', 'debug.h', 'common.h', 'hash.h', 'list.h', - 'heap.h', 'fault_inject.h', 'debug_setup.h', - 'types.h', 'atomic.h', 'slab.h', - 'telemetry_consumer.h', 'telemetry_common.h'] +HEADERS_GURT = [ + 'dlog.h', + 'debug.h', + 'common.h', + 'hash.h', + 'list.h', + 'heap.h', + 'fault_inject.h', + 'debug_setup.h', + 'types.h', + 'atomic.h', + 'slab.h', + 'telemetry_consumer.h', + 'telemetry_common.h', +] HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] @@ -40,13 +69,15 @@ def read_and_save_version(env): return version tmpl_hdr_in = os.path.join('include', 'daos_version.h.in') - subst_dict = {'@TMPL_MAJOR@': API_VERSION_MAJOR, - '@TMPL_MINOR@': API_VERSION_MINOR, - '@TMPL_FIX@': API_VERSION_FIX, - '@TMPL_PKG_MAJOR@': major, - '@TMPL_PKG_MINOR@': minor, - '@TMPL_PKG_FIX@': fix, - '@Template for @': ''} + subst_dict = { + '@TMPL_MAJOR@': API_VERSION_MAJOR, + '@TMPL_MINOR@': API_VERSION_MINOR, + '@TMPL_FIX@': API_VERSION_FIX, + '@TMPL_PKG_MAJOR@': major, + '@TMPL_PKG_MINOR@': minor, + '@TMPL_PKG_FIX@': fix, + '@Template for @': '', + } out = env.Substfile(tmpl_hdr_in, SUBST_DICT=subst_dict) if not GetOption('silent'): From b6613c1183b82ed4252736ae5b5479413b58dc12 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 17 Nov 2023 10:23:45 +0000 Subject: [PATCH 14/26] Resolve conflicts. Required-githooks: true Signed-off-by: Ashley Pittman --- site_scons/site_tools/doneapi.py | 30 +++++------- src/SConscript | 63 +++++++----------------- src/client/dfuse/SConscript | 82 +++++++++++++++----------------- src/gurt/tests/SConscript | 10 ++-- 4 files changed, 72 insertions(+), 113 deletions(-) diff --git a/site_scons/site_tools/doneapi.py b/site_scons/site_tools/doneapi.py index 3569d02a38e..24042439969 100644 --- a/site_scons/site_tools/doneapi.py +++ b/site_scons/site_tools/doneapi.py @@ -13,7 +13,7 @@ # pylint: disable=too-few-public-methods -class DetectCompiler: +class DetectCompiler(): """Find oneapi compiler""" def __init__(self): @@ -29,15 +29,13 @@ def __init__(self): for path in [root, binp, libp, binarch, libarch, include, icx]: if not os.path.exists(path): return - self.map = { - 'root': root, - 'bin': binp, - 'lib': libp, - 'binarch': binarch, - 'libarch': libarch, - 'include': include, - 'icx': icx, - } + self.map = {'root': root, + 'bin': binp, + 'lib': libp, + 'binarch': binarch, + 'libarch': libarch, + 'include': include, + 'icx': icx} def __getitem__(self, key): """Return key""" @@ -53,13 +51,11 @@ def generate(env): raise SCons.Errors.InternalError("No oneapi compiler found") env['INTEL_C_COMPILER_TOP'] = detector['root'] - paths = { - 'INCLUDE': 'include', - 'LIB': 'libarch', - 'PATH': 'binarch', - 'LD_LIBRARY_PATH': 'libarch', - } - for key, value in paths.items(): + paths = {'INCLUDE': 'include', + 'LIB': 'libarch', + 'PATH': 'binarch', + 'LD_LIBRARY_PATH': 'libarch'} + for (key, value) in paths.items(): env.PrependENVPath(key, detector[value]) env.PrependENVPath("PATH", detector["bin"]) env.PrependENVPath("LIB", detector["lib"]) diff --git a/src/SConscript b/src/SConscript index f194a1c56a0..c4cb419e047 100644 --- a/src/SConscript +++ b/src/SConscript @@ -2,45 +2,16 @@ import os -HEADERS = [ - 'daos_api.h', - 'daos_types.h', - 'daos_errno.h', - 'daos_kv.h', - 'daos_event.h', - 'daos_mgmt.h', - 'daos_types.h', - 'daos_array.h', - 'daos_task.h', - 'daos_fs.h', - 'daos_uns.h', - 'daos_security.h', - 'daos_prop.h', - 'daos_obj_class.h', - 'daos_obj.h', - 'daos_pool.h', - 'daos_cont.h', - 'daos_version.h', - 'daos_fs_sys.h', - 'daos_s3.h', - 'daos_pipeline.h', -] +HEADERS = ['daos_api.h', 'daos_types.h', 'daos_errno.h', 'daos_kv.h', + 'daos_event.h', 'daos_mgmt.h', 'daos_types.h', 'daos_array.h', + 'daos_task.h', 'daos_fs.h', 'daos_uns.h', 'daos_security.h', + 'daos_prop.h', 'daos_obj_class.h', 'daos_obj.h', 'daos_pool.h', + 'daos_cont.h', 'daos_version.h', 'daos_fs_sys.h', 'daos_s3.h', 'daos_pipeline.h'] HEADERS_SRV = ['vos.h', 'vos_types.h'] -HEADERS_GURT = [ - 'dlog.h', - 'debug.h', - 'common.h', - 'hash.h', - 'list.h', - 'heap.h', - 'fault_inject.h', - 'debug_setup.h', - 'types.h', - 'atomic.h', - 'slab.h', - 'telemetry_consumer.h', - 'telemetry_common.h', -] +HEADERS_GURT = ['dlog.h', 'debug.h', 'common.h', 'hash.h', 'list.h', + 'heap.h', 'fault_inject.h', 'debug_setup.h', + 'types.h', 'atomic.h', 'slab.h', + 'telemetry_consumer.h', 'telemetry_common.h'] HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] @@ -69,15 +40,13 @@ def read_and_save_version(env): return version tmpl_hdr_in = os.path.join('include', 'daos_version.h.in') - subst_dict = { - '@TMPL_MAJOR@': API_VERSION_MAJOR, - '@TMPL_MINOR@': API_VERSION_MINOR, - '@TMPL_FIX@': API_VERSION_FIX, - '@TMPL_PKG_MAJOR@': major, - '@TMPL_PKG_MINOR@': minor, - '@TMPL_PKG_FIX@': fix, - '@Template for @': '', - } + subst_dict = {'@TMPL_MAJOR@': API_VERSION_MAJOR, + '@TMPL_MINOR@': API_VERSION_MINOR, + '@TMPL_FIX@': API_VERSION_FIX, + '@TMPL_PKG_MAJOR@': major, + '@TMPL_PKG_MINOR@': minor, + '@TMPL_PKG_FIX@': fix, + '@Template for @': ''} out = env.Substfile(tmpl_hdr_in, SUBST_DICT=subst_dict) if not GetOption('silent'): diff --git a/src/client/dfuse/SConscript b/src/client/dfuse/SConscript index cb90ae980f0..5de8992a246 100644 --- a/src/client/dfuse/SConscript +++ b/src/client/dfuse/SConscript @@ -3,37 +3,33 @@ import os HEADERS = ['ioil_io.h', 'ioil_defines.h', 'ioil_api.h', 'ioil.h'] COMMON_SRC = ['dfuse_obj_da.c', 'dfuse_vector.c'] -DFUSE_SRC = [ - 'dfuse_core.c', - 'dfuse_main.c', - 'dfuse_fuseops.c', - 'dfuse_cont.c', - 'dfuse_thread.c', - 'dfuse_pool.c', -] -OPS_SRC = [ - 'create', - 'fgetattr', - 'forget', - 'getxattr', - 'listxattr', - 'ioctl', - 'lookup', - 'mknod', - 'open', - 'opendir', - 'read', - 'rename', - 'readdir', - 'readlink', - 'removexattr', - 'setxattr', - 'setattr', - 'symlink', - 'unlink', - 'write', - 'statfs', -] +DFUSE_SRC = ['dfuse_core.c', + 'dfuse_main.c', + 'dfuse_fuseops.c', + 'dfuse_cont.c', + 'dfuse_thread.c', + 'dfuse_pool.c'] +OPS_SRC = ['create', + 'fgetattr', + 'forget', + 'getxattr', + 'listxattr', + 'ioctl', + 'lookup', + 'mknod', + 'open', + 'opendir', + 'read', + 'rename', + 'readdir', + 'readlink', + 'removexattr', + 'setxattr', + 'setattr', + 'symlink', + 'unlink', + 'write', + 'statfs'] IOIL_SRC = ['int_posix.c', 'int_read.c', 'int_write.c'] PIL4DFS_SRC = ['int_dfs.c', 'hook.c'] @@ -92,9 +88,11 @@ def build_client_libs_shared(env, prereqs): gen_script = ilenv.d_program('il/gen_script', ['il/gen_script.c'], LIBS=[]) if prereqs.test_requested(): - script = ilenv.Command('il/check_ioil_syms', gen_script, "$SOURCE -s $TARGET") + script = ilenv.Command('il/check_ioil_syms', gen_script, + "$SOURCE -s $TARGET") env.Install('$PREFIX/lib/daos/TESTING/scripts', script) - script = ilenv.Command('il/ioil-ld-opts', gen_script, '$SOURCE -l $TARGET') + script = ilenv.Command('il/ioil-ld-opts', gen_script, + '$SOURCE -l $TARGET') env.Install('$PREFIX/share/daos', script) env.InstallVersionedLib(os.path.join("$PREFIX", 'lib64'), dfuse_lib) @@ -139,8 +137,7 @@ def check_ioctl_def(context, ctype): context.Message(f'Checking if fuse ioctl is type {ctype} ') # pylint: disable-next=consider-using-f-string - src = ( - """#include + src = """#include extern void my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, @@ -149,9 +146,7 @@ my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, struct fuse_lowlevel_ops ops = {.ioctl = my_ioctl}; -""" - % ctype - ) # pylint: disable=consider-using-f-string +""" % ctype # pylint: disable=consider-using-f-string rc = context.TryCompile(src, '.c') context.Result(rc) @@ -163,12 +158,13 @@ def configure_fuse(cenv): if GetOption('help') or GetOption('clean'): return - check = Configure( - cenv, - custom_tests={'CheckStructMember': check_struct_member, 'CheckFuseIoctl': check_ioctl_def}, - ) + check = Configure(cenv, + custom_tests={'CheckStructMember': check_struct_member, + 'CheckFuseIoctl': check_ioctl_def}) - if check.CheckStructMember('#include ', 'struct fuse_file_info', 'cache_readdir'): + if check.CheckStructMember('#include ', + 'struct fuse_file_info', + 'cache_readdir'): cenv.AppendUnique(CPPDEFINES={'HAVE_CACHE_READDIR': '1'}) if check.CheckFuseIoctl('unsigned int'): diff --git a/src/gurt/tests/SConscript b/src/gurt/tests/SConscript index 03a1b245407..6ba63fe61df 100644 --- a/src/gurt/tests/SConscript +++ b/src/gurt/tests/SConscript @@ -26,12 +26,10 @@ def scons(): flags = [] testobj = test_env.Object(test) testname = os.path.splitext(test)[0] - testprog = test_env.d_test_program( - target=testname, - source=testobj + gurt_targets, - LIBS=test_env["LIBS"] + ['yaml'], - LINKFLAGS=flags, - ) + testprog = test_env.d_test_program(target=testname, + source=testobj + gurt_targets, + LIBS=test_env["LIBS"] + ['yaml'], + LINKFLAGS=flags) tests.append(testprog) Default(tests) From 1160bfb6a13966105a98d700b312c77cef525ddd Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 17 Nov 2023 10:27:32 +0000 Subject: [PATCH 15/26] make check it's own chek. Required-githooks: true Signed-off-by: Ashley Pittman --- .github/workflows/linting.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index f23b232119a..63d18ba1f5d 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -40,6 +40,11 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - name: Check DAOS logging macro use. run: ./utils/cq/d_logging_check.py --github src + + python-black: + name: Logging macro checking + runs-on: ubuntu-22.04 + steps: - name: Black uses: psf/black@stable with: From f8527f0e96a5111d5a00891767ab5375e193c5c1 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 17 Nov 2023 10:28:22 +0000 Subject: [PATCH 16/26] Revise code. Required-githooks: true Signed-off-by: Ashley Pittman --- site_scons/site_tools/doneapi.py | 15 ++----- src/SConscript | 63 ++++++++++++++++++++------- src/client/dfuse/SConscript | 74 +++++++++++++++++--------------- src/gurt/tests/SConscript | 6 +-- 4 files changed, 93 insertions(+), 65 deletions(-) diff --git a/site_scons/site_tools/doneapi.py b/site_scons/site_tools/doneapi.py index b9938b5a197..b419b10c456 100644 --- a/site_scons/site_tools/doneapi.py +++ b/site_scons/site_tools/doneapi.py @@ -13,7 +13,7 @@ # pylint: disable=too-few-public-methods -class DetectCompiler(): +class DetectCompiler: """Find oneapi compiler""" def __init__(self): @@ -28,11 +28,7 @@ def __init__(self): if not os.path.exists(path): print(f"oneapi compiler: {path} doesn't exist") return - self.map = {'root': root, - 'bin': binp, - 'lib': libp, - 'include': include, - 'icx': icx} + self.map = {'root': root, 'bin': binp, 'lib': libp, 'include': include, 'icx': icx} def __getitem__(self, key): """Return key""" @@ -48,11 +44,8 @@ def generate(env): raise SCons.Errors.InternalError("No oneapi compiler found") env['INTEL_C_COMPILER_TOP'] = detector['root'] - paths = {'INCLUDE': 'include', - 'LIB': 'libarch', - 'PATH': 'bin', - 'LD_LIBRARY_PATH': 'lib'} - for (key, value) in paths.items(): + paths = {'INCLUDE': 'include', 'LIB': 'libarch', 'PATH': 'bin', 'LD_LIBRARY_PATH': 'lib'} + for key, value in paths.items(): env.PrependENVPath(key, detector[value]) env.PrependENVPath("PATH", detector["bin"]) env.PrependENVPath("LIB", detector["lib"]) diff --git a/src/SConscript b/src/SConscript index c20adfafaff..b13d924920e 100644 --- a/src/SConscript +++ b/src/SConscript @@ -2,15 +2,44 @@ import os -HEADERS = ['daos_api.h', 'daos_types.h', 'daos_errno.h', 'daos_kv.h', - 'daos_event.h', 'daos_mgmt.h', 'daos_types.h', 'daos_array.h', - 'daos_task.h', 'daos_fs.h', 'daos_uns.h', 'daos_security.h', - 'daos_prop.h', 'daos_obj_class.h', 'daos_obj.h', 'daos_pool.h', - 'daos_cont.h', 'daos_version.h', 'daos_fs_sys.h', 'daos_s3.h', 'daos_pipeline.h'] -HEADERS_GURT = ['dlog.h', 'debug.h', 'common.h', 'hash.h', 'list.h', - 'heap.h', 'fault_inject.h', 'debug_setup.h', - 'types.h', 'atomic.h', 'slab.h', - 'telemetry_consumer.h', 'telemetry_common.h'] +HEADERS = [ + 'daos_api.h', + 'daos_types.h', + 'daos_errno.h', + 'daos_kv.h', + 'daos_event.h', + 'daos_mgmt.h', + 'daos_types.h', + 'daos_array.h', + 'daos_task.h', + 'daos_fs.h', + 'daos_uns.h', + 'daos_security.h', + 'daos_prop.h', + 'daos_obj_class.h', + 'daos_obj.h', + 'daos_pool.h', + 'daos_cont.h', + 'daos_version.h', + 'daos_fs_sys.h', + 'daos_s3.h', + 'daos_pipeline.h', +] +HEADERS_GURT = [ + 'dlog.h', + 'debug.h', + 'common.h', + 'hash.h', + 'list.h', + 'heap.h', + 'fault_inject.h', + 'debug_setup.h', + 'types.h', + 'atomic.h', + 'slab.h', + 'telemetry_consumer.h', + 'telemetry_common.h', +] HEADERS_CART = ['api.h', 'iv.h', 'types.h', 'swim.h'] @@ -39,13 +68,15 @@ def read_and_save_version(env): return version tmpl_hdr_in = os.path.join('include', 'daos_version.h.in') - subst_dict = {'@TMPL_MAJOR@': API_VERSION_MAJOR, - '@TMPL_MINOR@': API_VERSION_MINOR, - '@TMPL_FIX@': API_VERSION_FIX, - '@TMPL_PKG_MAJOR@': major, - '@TMPL_PKG_MINOR@': minor, - '@TMPL_PKG_FIX@': fix, - '@Template for @': ''} + subst_dict = { + '@TMPL_MAJOR@': API_VERSION_MAJOR, + '@TMPL_MINOR@': API_VERSION_MINOR, + '@TMPL_FIX@': API_VERSION_FIX, + '@TMPL_PKG_MAJOR@': major, + '@TMPL_PKG_MINOR@': minor, + '@TMPL_PKG_FIX@': fix, + '@Template for @': '', + } out = env.Substfile(tmpl_hdr_in, SUBST_DICT=subst_dict) if not GetOption('silent'): diff --git a/src/client/dfuse/SConscript b/src/client/dfuse/SConscript index bafe87431f0..c6498a6b9cb 100644 --- a/src/client/dfuse/SConscript +++ b/src/client/dfuse/SConscript @@ -3,33 +3,37 @@ import os HEADERS = ['ioil_io.h', 'ioil_defines.h', 'ioil_api.h', 'ioil.h'] COMMON_SRC = ['dfuse_obj_da.c', 'dfuse_vector.c'] -DFUSE_SRC = ['dfuse_core.c', - 'dfuse_main.c', - 'dfuse_fuseops.c', - 'dfuse_cont.c', - 'dfuse_thread.c', - 'dfuse_pool.c'] -OPS_SRC = ['create', - 'fgetattr', - 'forget', - 'getxattr', - 'listxattr', - 'ioctl', - 'lookup', - 'mknod', - 'open', - 'opendir', - 'read', - 'rename', - 'readdir', - 'readlink', - 'removexattr', - 'setxattr', - 'setattr', - 'symlink', - 'unlink', - 'write', - 'statfs'] +DFUSE_SRC = [ + 'dfuse_core.c', + 'dfuse_main.c', + 'dfuse_fuseops.c', + 'dfuse_cont.c', + 'dfuse_thread.c', + 'dfuse_pool.c', +] +OPS_SRC = [ + 'create', + 'fgetattr', + 'forget', + 'getxattr', + 'listxattr', + 'ioctl', + 'lookup', + 'mknod', + 'open', + 'opendir', + 'read', + 'rename', + 'readdir', + 'readlink', + 'removexattr', + 'setxattr', + 'setattr', + 'symlink', + 'unlink', + 'write', + 'statfs', +] IOIL_SRC = ['int_posix.c', 'int_read.c', 'int_write.c'] PIL4DFS_SRC = ['int_dfs.c', 'hook.c'] @@ -88,11 +92,9 @@ def build_client_libs_shared(env, prereqs): gen_script = ilenv.d_program('il/gen_script', ['il/gen_script.c'], LIBS=[]) if prereqs.test_requested(): - script = ilenv.Command('il/check_ioil_syms', gen_script, - "$SOURCE -s $TARGET") + script = ilenv.Command('il/check_ioil_syms', gen_script, "$SOURCE -s $TARGET") env.Install('$PREFIX/lib/daos/TESTING/scripts', script) - script = ilenv.Command('il/ioil-ld-opts', gen_script, - '$SOURCE -l $TARGET') + script = ilenv.Command('il/ioil-ld-opts', gen_script, '$SOURCE -l $TARGET') env.Install('$PREFIX/share/daos', script) env.InstallVersionedLib(os.path.join("$PREFIX", 'lib64'), dfuse_lib) @@ -137,7 +139,8 @@ def check_ioctl_def(context, ctype): context.Message(f'Checking if fuse ioctl is type {ctype} ') # pylint: disable-next=consider-using-f-string - src = """#include + src = ( + """#include extern void my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, @@ -146,7 +149,9 @@ my_ioctl (fuse_req_t req, fuse_ino_t ino, %s cmd, struct fuse_lowlevel_ops ops = {.ioctl = my_ioctl}; -""" % ctype # pylint: disable=consider-using-f-string +""" + % ctype + ) # pylint: disable=consider-using-f-string rc = context.TryCompile(src, '.c') context.Result(rc) @@ -158,8 +163,7 @@ def configure_fuse(cenv): if GetOption('help') or GetOption('clean'): return - check = Configure(cenv, - custom_tests={'CheckFuseIoctl': check_ioctl_def}) + check = Configure(cenv, custom_tests={'CheckFuseIoctl': check_ioctl_def}) if check.CheckFuseIoctl('unsigned int'): pass diff --git a/src/gurt/tests/SConscript b/src/gurt/tests/SConscript index e5ca3dc6951..e1b2d573eef 100644 --- a/src/gurt/tests/SConscript +++ b/src/gurt/tests/SConscript @@ -25,9 +25,9 @@ def scons(): for test in TEST_SRC: testobj = test_env.Object(test) testname = os.path.splitext(test)[0] - testprog = test_env.d_test_program(target=testname, - source=testobj + gurt_targets, - LIBS=test_env["LIBS"] + ['yaml']) + testprog = test_env.d_test_program( + target=testname, source=testobj + gurt_targets, LIBS=test_env["LIBS"] + ['yaml'] + ) tests.append(testprog) Default(tests) From 53c207ce53f4afb90da6c8bca1f9c8486e6b8251 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 17 Nov 2023 10:35:39 +0000 Subject: [PATCH 17/26] Fix black check. Required-githooks: true Signed-off-by: Ashley Pittman --- .github/workflows/linting.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 63d18ba1f5d..963f3e6674d 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -42,9 +42,13 @@ jobs: run: ./utils/cq/d_logging_check.py --github src python-black: - name: Logging macro checking + name: Python black formatting runs-on: ubuntu-22.04 steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Black uses: psf/black@stable with: From 1f9bd95670a3c15c94140858c154d0b6ba0504f9 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 24 Nov 2023 08:51:37 +0000 Subject: [PATCH 18/26] Fix file. skip-build: true Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/pydaos/raw/conversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/pydaos/raw/conversion.py b/src/client/pydaos/raw/conversion.py index 5c4fb56db9c..c9f68efc82e 100644 --- a/src/client/pydaos/raw/conversion.py +++ b/src/client/pydaos/raw/conversion.py @@ -36,7 +36,7 @@ def str_to_c_uuid(uuidstr): def c_err_to_str(err_num): - """ Utility function to convert C error code to its string id. + """Utility function to convert C error code to its string id. Args: err_num (int): C error code. From da1bb8d9fae441f49dda9e0aeb1c817b72e7cc1e Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 24 Nov 2023 09:00:45 +0000 Subject: [PATCH 19/26] Add --diff option. skip-build: true Required-githooks: true Signed-off-by: Ashley Pittman --- .github/workflows/linting.yml | 2 +- src/client/pydaos/raw/conversion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 963f3e6674d..dbc1224f75b 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -52,4 +52,4 @@ jobs: - name: Black uses: psf/black@stable with: - options: "--check --verbose --extend-exclude (ftest|vendor|utils)" + options: "--check --diff --verbose --extend-exclude (ftest|vendor|utils)" diff --git a/src/client/pydaos/raw/conversion.py b/src/client/pydaos/raw/conversion.py index c9f68efc82e..5c4fb56db9c 100644 --- a/src/client/pydaos/raw/conversion.py +++ b/src/client/pydaos/raw/conversion.py @@ -36,7 +36,7 @@ def str_to_c_uuid(uuidstr): def c_err_to_str(err_num): - """Utility function to convert C error code to its string id. + """ Utility function to convert C error code to its string id. Args: err_num (int): C error code. From a41d09321fbe11d74b12efebee3071f6586d9a83 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 24 Nov 2023 09:14:42 +0000 Subject: [PATCH 20/26] Fix conversion file. skip-build: true Required-githooks: true Signed-off-by: Ashley Pittman --- src/client/pydaos/raw/conversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/pydaos/raw/conversion.py b/src/client/pydaos/raw/conversion.py index 5c4fb56db9c..c9f68efc82e 100644 --- a/src/client/pydaos/raw/conversion.py +++ b/src/client/pydaos/raw/conversion.py @@ -36,7 +36,7 @@ def str_to_c_uuid(uuidstr): def c_err_to_str(err_num): - """ Utility function to convert C error code to its string id. + """Utility function to convert C error code to its string id. Args: err_num (int): C error code. From aa688701c0aef600cc453d10d4f1ec07e483b2e6 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 8 Dec 2023 16:52:41 +0000 Subject: [PATCH 21/26] Back out conflict. skip-build: true Required-githooks: true Signed-off-by: Ashley Pittman --- site_scons/components/__init__.py | 360 ++++++++++++---------------- site_scons/site_tools/go_builder.py | 21 +- 2 files changed, 160 insertions(+), 221 deletions(-) diff --git a/site_scons/components/__init__.py b/site_scons/components/__init__.py index beb6e49a966..ede55290b73 100644 --- a/site_scons/components/__init__.py +++ b/site_scons/components/__init__.py @@ -34,7 +34,7 @@ ARM_PLATFORM = True -class InstalledComps: +class InstalledComps(): """Checks for installed components and keeps track of prior checks""" installed = [] @@ -113,113 +113,92 @@ def define_mercury(reqs): # pylint: disable-next=wrong-spelling-in-comment,fixme # TODO: change to --enable-opx once upgraded to libfabric 1.17+ - ofi_build = [ - './configure', - '--prefix=$OFI_PREFIX', - '--disable-efa', - '--disable-psm2', - '--disable-psm3', - '--disable-opx', - '--without-gdrcopy', - ] + ofi_build = ['./configure', + '--prefix=$OFI_PREFIX', + '--disable-efa', + '--disable-psm2', + '--disable-psm3', + '--disable-opx', + '--without-gdrcopy'] if reqs.target_type == 'debug': ofi_build.append('--enable-debug') else: ofi_build.append('--disable-debug') - reqs.define( - 'ofi', - retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), - commands=[['./autogen.sh'], ofi_build, ['make'], ['make', 'install']], - libs=['fabric'], - config_cb=ofi_config, - headers=['rdma/fabric.h'], - pkgconfig='libfabric', - package='libfabric-devel' if inst(reqs, 'ofi') else None, - patch_rpath=['lib'], - build_env={'CFLAGS': "-fstack-usage"}, - ) - - ucx_configure = [ - './configure', - '--disable-assertions', - '--disable-params-check', - '--enable-mt', - '--without-go', - '--without-java', - '--prefix=$UCX_PREFIX', - '--libdir=$UCX_PREFIX/lib64', - '--enable-cma', - '--without-cuda', - '--without-gdrcopy', - '--with-verbs', - '--without-knem', - '--without-rocm', - '--without-xpmem', - '--without-fuse3', - '--without-ugni', - ] + reqs.define('ofi', + retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), + commands=[['./autogen.sh'], + ofi_build, + ['make'], + ['make', 'install']], + libs=['fabric'], + config_cb=ofi_config, + headers=['rdma/fabric.h'], + pkgconfig='libfabric', + package='libfabric-devel' if inst(reqs, 'ofi') else None, + patch_rpath=['lib'], + build_env={'CFLAGS': "-fstack-usage"}) + + ucx_configure = ['./configure', '--disable-assertions', '--disable-params-check', '--enable-mt', + '--without-go', '--without-java', '--prefix=$UCX_PREFIX', + '--libdir=$UCX_PREFIX/lib64', '--enable-cma', '--without-cuda', + '--without-gdrcopy', '--with-verbs', '--without-knem', '--without-rocm', + '--without-xpmem', '--without-fuse3', '--without-ugni'] if reqs.target_type == 'debug': ucx_configure.extend(['--enable-debug']) else: ucx_configure.extend(['--disable-debug', '--disable-logging']) - reqs.define( - 'ucx', - retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), - libs=['ucs', 'ucp', 'uct'], - functions={'ucs': ['ucs_debug_disable_signal']}, - headers=['uct/api/uct.h'], - pkgconfig='ucx', - commands=[ - ['./autogen.sh'], - ucx_configure, - ['make'], - ['make', 'install'], - ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], - ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig'], - ], - build_env={'CFLAGS': '-Wno-error'}, - package='ucx-devel' if inst(reqs, 'ucx') else None, - ) - - mercury_build = [ - 'cmake', - '-DBUILD_SHARED_LIBS:BOOL=ON', - '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', - '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', - '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', - '-DBUILD_DOCUMENTATION:BOOL=OFF', - '-DBUILD_EXAMPLES:BOOL=OFF', - '-DBUILD_TESTING:BOOL=ON', - '-DBUILD_TESTING_PERF:BOOL=ON', - '-DBUILD_TESTING_UNIT:BOOL=OFF', - '-DMERCURY_USE_BOOST_PP:BOOL=ON', - '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', - '-DNA_USE_SM:BOOL=ON', - '-DNA_USE_OFI:BOOL=ON', - '-DNA_USE_UCX:BOOL=ON', - '../mercury', - ] + reqs.define('ucx', + retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), + libs=['ucs', 'ucp', 'uct'], + functions={'ucs': ['ucs_debug_disable_signal']}, + headers=['uct/api/uct.h'], + pkgconfig='ucx', + commands=[['./autogen.sh'], + ucx_configure, + ['make'], + ['make', 'install'], + ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], + ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig']], + build_env={'CFLAGS': '-Wno-error'}, + package='ucx-devel' if inst(reqs, 'ucx') else None) + + mercury_build = ['cmake', + '-DBUILD_SHARED_LIBS:BOOL=ON', + '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', + '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', + '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', + '-DBUILD_DOCUMENTATION:BOOL=OFF', + '-DBUILD_EXAMPLES:BOOL=OFF', + '-DBUILD_TESTING:BOOL=ON', + '-DBUILD_TESTING_PERF:BOOL=ON', + '-DBUILD_TESTING_UNIT:BOOL=OFF', + '-DMERCURY_USE_BOOST_PP:BOOL=ON', + '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', + '-DNA_USE_SM:BOOL=ON', + '-DNA_USE_OFI:BOOL=ON', + '-DNA_USE_UCX:BOOL=ON', + '../mercury'] if reqs.target_type == 'debug': mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=ON') else: mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=OFF') - reqs.define( - 'mercury', - retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), - commands=[mercury_build, ['make'], ['make', 'install']], - libs=['mercury'], - pkgconfig='mercury', - requires=['boost', 'ofi', 'ucx'] + libs, - out_of_src_build=True, - package='mercury-devel' if inst(reqs, 'mercury') else None, - build_env={'CFLAGS': '-fstack-usage'}, - ) + reqs.define('mercury', + retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), + commands=[mercury_build, + ['make'], + ['make', 'install']], + libs=['mercury'], + pkgconfig='mercury', + requires=['boost', 'ofi', 'ucx'] + libs, + out_of_src_build=True, + package='mercury-devel' if inst(reqs, 'mercury') else None, + build_env={'CFLAGS': '-fstack-usage'}) def define_common(reqs): @@ -266,48 +245,39 @@ def define_components(reqs): define_mercury(reqs) define_ompi(reqs) - reqs.define( - 'isal', - retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), - commands=[ - ['./autogen.sh'], - ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], - ['make'], - ['make', 'install'], - ], - libs=['isal'], - ) - reqs.define( - 'isal_crypto', - retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), - commands=[ - ['./autogen.sh'], - ['./configure', '--prefix=$ISAL_CRYPTO_PREFIX', '--libdir=$ISAL_CRYPTO_PREFIX/lib'], - ['make'], - ['make', 'install'], - ], - libs=['isal_crypto'], - ) - - reqs.define( - 'pmdk', - retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), - commands=[ - [ - 'make', - 'all', - 'BUILD_RPMEM=n', - 'NDCTL_ENABLE=n', - 'NDCTL_DISABLE=y', - 'DOC=n', - 'EXTRA_CFLAGS="-Wno-error"', - 'install', - 'prefix=$PMDK_PREFIX', - ] - ], - libs=['pmemobj'], - ) - abt_build = ['./configure', '--prefix=$ARGOBOTS_PREFIX', 'CC=gcc', '--enable-stack-unwind'] + reqs.define('isal', + retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), + commands=[['./autogen.sh'], + ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], + ['make'], + ['make', 'install']], + libs=['isal']) + reqs.define('isal_crypto', + retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), + commands=[['./autogen.sh'], + ['./configure', + '--prefix=$ISAL_CRYPTO_PREFIX', + '--libdir=$ISAL_CRYPTO_PREFIX/lib'], + ['make'], + ['make', 'install']], + libs=['isal_crypto']) + + reqs.define('pmdk', + retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), + commands=[['make', + 'all', + 'BUILD_RPMEM=n', + 'NDCTL_ENABLE=n', + 'NDCTL_DISABLE=y', + 'DOC=n', + 'EXTRA_CFLAGS="-Wno-error"', + 'install', + 'prefix=$PMDK_PREFIX']], + libs=['pmemobj']) + abt_build = ['./configure', + '--prefix=$ARGOBOTS_PREFIX', + 'CC=gcc', + '--enable-stack-unwind'] if reqs.target_type == 'debug': abt_build.append('--enable-debug=most') @@ -317,28 +287,19 @@ def define_components(reqs): if inst(reqs, 'valgrind_devel'): abt_build.append('--enable-valgrind') - reqs.define( - 'argobots', - retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), - commands=[ - ['git', 'clean', '-dxf'], - ['./autogen.sh'], - abt_build, - ['make'], - ['make', 'install'], - ], - requires=['libunwind'], - libs=['abt'], - headers=['abt.h'], - ) - - reqs.define( - 'fuse', - libs=['fuse3'], - defines=['FUSE_USE_VERSION=35'], - headers=['fuse3/fuse.h'], - package='fuse3-devel', - ) + reqs.define('argobots', + retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), + commands=[['git', 'clean', '-dxf'], + ['./autogen.sh'], + abt_build, + ['make'], + ['make', 'install']], + requires=['libunwind'], + libs=['abt'], + headers=['abt.h']) + + reqs.define('fuse', libs=['fuse3'], defines=['FUSE_USE_VERSION=35'], + headers=['fuse3/fuse.h'], package='fuse3-devel') # Tell SPDK which CPU to optimize for, by default this is native which works well unless you # are relocating binaries across systems, for example in CI under GitHub actions etc. There @@ -359,54 +320,44 @@ def define_components(reqs): else: spdk_arch = 'haswell' - reqs.define( - 'spdk', - retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), - commands=[ - [ - './configure', - '--prefix=$SPDK_PREFIX', - '--disable-tests', - '--disable-unit-tests', - '--disable-apps', - '--without-vhost', - '--without-crypto', - '--without-pmdk', - '--without-rbd', - '--without-iscsi-initiator', - '--without-isal', - '--without-vtune', - '--with-shared', - f'--target-arch={spdk_arch}', - ], - ['make', f'CONFIG_ARCH={spdk_arch}'], - ['make', 'install'], - ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], - ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], - ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], - ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], - ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], - ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], - ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], - ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf'], - ], - headers=['spdk/nvme.h'], - patch_rpath=['lib', 'bin'], - ) - - reqs.define( - 'protobufc', - retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), - commands=[ - ['./autogen.sh'], - ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], - ['make'], - ['make', 'install'], - ], - libs=['protobuf-c'], - headers=['protobuf-c/protobuf-c.h'], - package='protobuf-c-devel', - ) + reqs.define('spdk', + retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), + commands=[['./configure', + '--prefix=$SPDK_PREFIX', + '--disable-tests', + '--disable-unit-tests', + '--disable-apps', + '--without-vhost', + '--without-crypto', + '--without-pmdk', + '--without-rbd', + '--without-iscsi-initiator', + '--without-isal', + '--without-vtune', + '--with-shared', + f'--target-arch={spdk_arch}'], + ['make', f'CONFIG_ARCH={spdk_arch}'], + ['make', 'install'], + ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], + ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], + ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], + ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], + ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], + ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], + ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], + ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf']], + headers=['spdk/nvme.h'], + patch_rpath=['lib', 'bin']) + + reqs.define('protobufc', + retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), + commands=[['./autogen.sh'], + ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], + ['make'], + ['make', 'install']], + libs=['protobuf-c'], + headers=['protobuf-c/protobuf-c.h'], + package='protobuf-c-devel') os_name = dist[0].split()[0] if os_name == 'Ubuntu': @@ -415,9 +366,8 @@ def define_components(reqs): capstone_pkg = 'libcapstone-devel' else: capstone_pkg = 'capstone-devel' - reqs.define( - 'capstone', libs=['capstone'], headers=['capstone/capstone.h'], package=capstone_pkg - ) + reqs.define('capstone', libs=['capstone'], headers=['capstone/capstone.h'], + package=capstone_pkg) __all__ = ['define_components'] diff --git a/site_scons/site_tools/go_builder.py b/site_scons/site_tools/go_builder.py index aff663ef47f..b3706a8976d 100644 --- a/site_scons/site_tools/go_builder.py +++ b/site_scons/site_tools/go_builder.py @@ -17,12 +17,8 @@ def _scan_go_file(node, env, _path): src_dir = os.path.dirname(str(node)) includes = [] path_name = str(node)[12:] - rc = subprocess.run( - [env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], - cwd='src/control', - stdout=subprocess.PIPE, - check=True, - ) + rc = subprocess.run([env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], + cwd='src/control', stdout=subprocess.PIPE, check=True) data = json.loads(rc.stdout.decode('utf-8')) for dep in data['Deps']: if not dep.startswith('github.com/daos-stack/daos'): @@ -74,16 +70,9 @@ def _check_go_version(context): # go version go1.2.3 Linux/amd64 go_version = out.split(' ')[2].replace('go', '') - if ( - len( - [ - x - for x, y in zip(go_version.split('.'), MIN_GO_VERSION.split('.')) - if int(x) < int(y) - ] - ) - > 0 - ): + if len([x for x, y in + zip(go_version.split('.'), MIN_GO_VERSION.split('.')) + if int(x) < int(y)]) > 0: context.Result(f'{go_version} is too old (min supported: {MIN_GO_VERSION}) ') return 0 context.Result(str(go_version)) From 639ffe28b952bc19ed55906f8c48aa2bd188ea27 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 8 Dec 2023 16:54:44 +0000 Subject: [PATCH 22/26] Fix the two files. Required-githooks: true --- site_scons/components/__init__.py | 360 ++++++++++++++++------------ site_scons/site_tools/go_builder.py | 21 +- src/rdb/raft | 2 +- 3 files changed, 222 insertions(+), 161 deletions(-) diff --git a/site_scons/components/__init__.py b/site_scons/components/__init__.py index a338cd3e6fe..d4427b0a9fa 100644 --- a/site_scons/components/__init__.py +++ b/site_scons/components/__init__.py @@ -34,7 +34,7 @@ ARM_PLATFORM = True -class InstalledComps(): +class InstalledComps: """Checks for installed components and keeps track of prior checks""" installed = [] @@ -113,92 +113,113 @@ def define_mercury(reqs): # pylint: disable-next=wrong-spelling-in-comment,fixme # TODO: change to --enable-opx once upgraded to libfabric 1.17+ - ofi_build = ['./configure', - '--prefix=$OFI_PREFIX', - '--disable-efa', - '--disable-psm2', - '--disable-psm3', - '--disable-opx', - '--without-gdrcopy'] + ofi_build = [ + './configure', + '--prefix=$OFI_PREFIX', + '--disable-efa', + '--disable-psm2', + '--disable-psm3', + '--disable-opx', + '--without-gdrcopy', + ] if reqs.target_type == 'debug': ofi_build.append('--enable-debug') else: ofi_build.append('--disable-debug') - reqs.define('ofi', - retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), - commands=[['./autogen.sh'], - ofi_build, - ['make'], - ['make', 'install']], - libs=['fabric'], - config_cb=ofi_config, - headers=['rdma/fabric.h'], - pkgconfig='libfabric', - package='libfabric-devel' if inst(reqs, 'ofi') else None, - patch_rpath=['lib'], - build_env={'CFLAGS': "-fstack-usage"}) - - ucx_configure = ['./configure', '--disable-assertions', '--disable-params-check', '--enable-mt', - '--without-go', '--without-java', '--prefix=$UCX_PREFIX', - '--libdir=$UCX_PREFIX/lib64', '--enable-cma', '--without-cuda', - '--without-gdrcopy', '--with-verbs', '--without-knem', '--without-rocm', - '--without-xpmem', '--without-fuse3', '--without-ugni'] + reqs.define( + 'ofi', + retriever=GitRepoRetriever('https://github.com/ofiwg/libfabric'), + commands=[['./autogen.sh'], ofi_build, ['make'], ['make', 'install']], + libs=['fabric'], + config_cb=ofi_config, + headers=['rdma/fabric.h'], + pkgconfig='libfabric', + package='libfabric-devel' if inst(reqs, 'ofi') else None, + patch_rpath=['lib'], + build_env={'CFLAGS': "-fstack-usage"}, + ) + + ucx_configure = [ + './configure', + '--disable-assertions', + '--disable-params-check', + '--enable-mt', + '--without-go', + '--without-java', + '--prefix=$UCX_PREFIX', + '--libdir=$UCX_PREFIX/lib64', + '--enable-cma', + '--without-cuda', + '--without-gdrcopy', + '--with-verbs', + '--without-knem', + '--without-rocm', + '--without-xpmem', + '--without-fuse3', + '--without-ugni', + ] if reqs.target_type == 'debug': ucx_configure.extend(['--enable-debug']) else: ucx_configure.extend(['--disable-debug', '--disable-logging']) - reqs.define('ucx', - retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), - libs=['ucs', 'ucp', 'uct'], - functions={'ucs': ['ucs_debug_disable_signal']}, - headers=['uct/api/uct.h'], - pkgconfig='ucx', - commands=[['./autogen.sh'], - ucx_configure, - ['make'], - ['make', 'install'], - ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], - ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig']], - build_env={'CFLAGS': '-Wno-error'}, - package='ucx-devel' if inst(reqs, 'ucx') else None) - - mercury_build = ['cmake', - '-DBUILD_SHARED_LIBS:BOOL=ON', - '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', - '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', - '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', - '-DBUILD_DOCUMENTATION:BOOL=OFF', - '-DBUILD_EXAMPLES:BOOL=OFF', - '-DBUILD_TESTING:BOOL=ON', - '-DBUILD_TESTING_PERF:BOOL=ON', - '-DBUILD_TESTING_UNIT:BOOL=OFF', - '-DMERCURY_USE_BOOST_PP:BOOL=ON', - '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', - '-DNA_USE_SM:BOOL=ON', - '-DNA_USE_OFI:BOOL=ON', - '-DNA_USE_UCX:BOOL=ON', - '../mercury'] + reqs.define( + 'ucx', + retriever=GitRepoRetriever('https://github.com/openucx/ucx.git'), + libs=['ucs', 'ucp', 'uct'], + functions={'ucs': ['ucs_debug_disable_signal']}, + headers=['uct/api/uct.h'], + pkgconfig='ucx', + commands=[ + ['./autogen.sh'], + ucx_configure, + ['make'], + ['make', 'install'], + ['mkdir', '-p', '$UCX_PREFIX/lib64/pkgconfig'], + ['cp', 'ucx.pc', '$UCX_PREFIX/lib64/pkgconfig'], + ], + build_env={'CFLAGS': '-Wno-error'}, + package='ucx-devel' if inst(reqs, 'ucx') else None, + ) + + mercury_build = [ + 'cmake', + '-DBUILD_SHARED_LIBS:BOOL=ON', + '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo', + '-DCMAKE_CXX_FLAGS:STRING="-std=c++11"', + '-DCMAKE_INSTALL_PREFIX:PATH=$MERCURY_PREFIX', + '-DBUILD_DOCUMENTATION:BOOL=OFF', + '-DBUILD_EXAMPLES:BOOL=OFF', + '-DBUILD_TESTING:BOOL=ON', + '-DBUILD_TESTING_PERF:BOOL=ON', + '-DBUILD_TESTING_UNIT:BOOL=OFF', + '-DMERCURY_USE_BOOST_PP:BOOL=ON', + '-DMERCURY_USE_CHECKSUMS:BOOL=OFF', + '-DNA_USE_SM:BOOL=ON', + '-DNA_USE_OFI:BOOL=ON', + '-DNA_USE_UCX:BOOL=ON', + '../mercury', + ] if reqs.target_type == 'debug': mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=ON') else: mercury_build.append('-DMERCURY_ENABLE_DEBUG:BOOL=OFF') - reqs.define('mercury', - retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), - commands=[mercury_build, - ['make'], - ['make', 'install']], - libs=['mercury'], - pkgconfig='mercury', - requires=['boost', 'ofi', 'ucx'] + libs, - out_of_src_build=True, - package='mercury-devel' if inst(reqs, 'mercury') else None, - build_env={'CFLAGS': '-fstack-usage'}) + reqs.define( + 'mercury', + retriever=GitRepoRetriever('https://github.com/mercury-hpc/mercury.git', True), + commands=[mercury_build, ['make'], ['make', 'install']], + libs=['mercury'], + pkgconfig='mercury', + requires=['boost', 'ofi', 'ucx'] + libs, + out_of_src_build=True, + package='mercury-devel' if inst(reqs, 'mercury') else None, + build_env={'CFLAGS': '-fstack-usage'}, + ) def define_common(reqs): @@ -245,39 +266,48 @@ def define_components(reqs): define_mercury(reqs) define_ompi(reqs) - reqs.define('isal', - retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), - commands=[['./autogen.sh'], - ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], - ['make'], - ['make', 'install']], - libs=['isal']) - reqs.define('isal_crypto', - retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), - commands=[['./autogen.sh'], - ['./configure', - '--prefix=$ISAL_CRYPTO_PREFIX', - '--libdir=$ISAL_CRYPTO_PREFIX/lib'], - ['make'], - ['make', 'install']], - libs=['isal_crypto']) - - reqs.define('pmdk', - retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), - commands=[['make', - 'all', - 'NDCTL_ENABLE=n', - 'BUILD_EXAMPLES=n', - 'BUILD_BENCHMARKS=n', - 'DOC=n', - 'EXTRA_CFLAGS="-Wno-error"', - 'install', - 'prefix=$PMDK_PREFIX']], - libs=['pmemobj']) - abt_build = ['./configure', - '--prefix=$ARGOBOTS_PREFIX', - 'CC=gcc', - '--enable-stack-unwind'] + reqs.define( + 'isal', + retriever=GitRepoRetriever('https://github.com/intel/isa-l.git'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$ISAL_PREFIX', '--libdir=$ISAL_PREFIX/lib'], + ['make'], + ['make', 'install'], + ], + libs=['isal'], + ) + reqs.define( + 'isal_crypto', + retriever=GitRepoRetriever('https://github.com/intel/isa-l_crypto'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$ISAL_CRYPTO_PREFIX', '--libdir=$ISAL_CRYPTO_PREFIX/lib'], + ['make'], + ['make', 'install'], + ], + libs=['isal_crypto'], + ) + + reqs.define( + 'pmdk', + retriever=GitRepoRetriever('https://github.com/pmem/pmdk.git'), + commands=[ + [ + 'make', + 'all', + 'NDCTL_ENABLE=n', + 'BUILD_EXAMPLES=n', + 'BUILD_BENCHMARKS=n', + 'DOC=n', + 'EXTRA_CFLAGS="-Wno-error"', + 'install', + 'prefix=$PMDK_PREFIX', + ] + ], + libs=['pmemobj'], + ) + abt_build = ['./configure', '--prefix=$ARGOBOTS_PREFIX', 'CC=gcc', '--enable-stack-unwind'] if reqs.target_type == 'debug': abt_build.append('--enable-debug=most') @@ -287,19 +317,28 @@ def define_components(reqs): if inst(reqs, 'valgrind_devel'): abt_build.append('--enable-valgrind') - reqs.define('argobots', - retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), - commands=[['git', 'clean', '-dxf'], - ['./autogen.sh'], - abt_build, - ['make'], - ['make', 'install']], - requires=['libunwind'], - libs=['abt'], - headers=['abt.h']) - - reqs.define('fuse', libs=['fuse3'], defines=['FUSE_USE_VERSION=35'], - headers=['fuse3/fuse.h'], package='fuse3-devel') + reqs.define( + 'argobots', + retriever=GitRepoRetriever('https://github.com/pmodels/argobots.git', True), + commands=[ + ['git', 'clean', '-dxf'], + ['./autogen.sh'], + abt_build, + ['make'], + ['make', 'install'], + ], + requires=['libunwind'], + libs=['abt'], + headers=['abt.h'], + ) + + reqs.define( + 'fuse', + libs=['fuse3'], + defines=['FUSE_USE_VERSION=35'], + headers=['fuse3/fuse.h'], + package='fuse3-devel', + ) # Tell SPDK which CPU to optimize for, by default this is native which works well unless you # are relocating binaries across systems, for example in CI under GitHub actions etc. There @@ -320,44 +359,54 @@ def define_components(reqs): else: spdk_arch = 'haswell' - reqs.define('spdk', - retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), - commands=[['./configure', - '--prefix=$SPDK_PREFIX', - '--disable-tests', - '--disable-unit-tests', - '--disable-apps', - '--without-vhost', - '--without-crypto', - '--without-pmdk', - '--without-rbd', - '--without-iscsi-initiator', - '--without-isal', - '--without-vtune', - '--with-shared', - f'--target-arch={spdk_arch}'], - ['make', f'CONFIG_ARCH={spdk_arch}'], - ['make', 'install'], - ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], - ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], - ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], - ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], - ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], - ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], - ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], - ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf']], - headers=['spdk/nvme.h'], - patch_rpath=['lib', 'bin']) - - reqs.define('protobufc', - retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), - commands=[['./autogen.sh'], - ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], - ['make'], - ['make', 'install']], - libs=['protobuf-c'], - headers=['protobuf-c/protobuf-c.h'], - package='protobuf-c-devel') + reqs.define( + 'spdk', + retriever=GitRepoRetriever('https://github.com/spdk/spdk.git', True), + commands=[ + [ + './configure', + '--prefix=$SPDK_PREFIX', + '--disable-tests', + '--disable-unit-tests', + '--disable-apps', + '--without-vhost', + '--without-crypto', + '--without-pmdk', + '--without-rbd', + '--without-iscsi-initiator', + '--without-isal', + '--without-vtune', + '--with-shared', + f'--target-arch={spdk_arch}', + ], + ['make', f'CONFIG_ARCH={spdk_arch}'], + ['make', 'install'], + ['cp', '-r', '-P', 'dpdk/build/lib/', '$SPDK_PREFIX'], + ['cp', '-r', '-P', 'dpdk/build/include/', '$SPDK_PREFIX/include/dpdk'], + ['mkdir', '-p', '$SPDK_PREFIX/share/spdk'], + ['cp', '-r', 'include', 'scripts', '$SPDK_PREFIX/share/spdk'], + ['cp', 'build/examples/lsvmd', '$SPDK_PREFIX/bin/spdk_nvme_lsvmd'], + ['cp', 'build/examples/nvme_manage', '$SPDK_PREFIX/bin/spdk_nvme_manage'], + ['cp', 'build/examples/identify', '$SPDK_PREFIX/bin/spdk_nvme_identify'], + ['cp', 'build/examples/perf', '$SPDK_PREFIX/bin/spdk_nvme_perf'], + ], + headers=['spdk/nvme.h'], + patch_rpath=['lib', 'bin'], + ) + + reqs.define( + 'protobufc', + retriever=GitRepoRetriever('https://github.com/protobuf-c/protobuf-c.git'), + commands=[ + ['./autogen.sh'], + ['./configure', '--prefix=$PROTOBUFC_PREFIX', '--disable-protoc'], + ['make'], + ['make', 'install'], + ], + libs=['protobuf-c'], + headers=['protobuf-c/protobuf-c.h'], + package='protobuf-c-devel', + ) os_name = dist[0].split()[0] if os_name == 'Ubuntu': @@ -366,8 +415,9 @@ def define_components(reqs): capstone_pkg = 'libcapstone-devel' else: capstone_pkg = 'capstone-devel' - reqs.define('capstone', libs=['capstone'], headers=['capstone/capstone.h'], - package=capstone_pkg) + reqs.define( + 'capstone', libs=['capstone'], headers=['capstone/capstone.h'], package=capstone_pkg + ) __all__ = ['define_components'] diff --git a/site_scons/site_tools/go_builder.py b/site_scons/site_tools/go_builder.py index 51fedbe88cf..af6758632d5 100644 --- a/site_scons/site_tools/go_builder.py +++ b/site_scons/site_tools/go_builder.py @@ -17,8 +17,12 @@ def _scan_go_file(node, env, _path): src_dir = os.path.dirname(str(node)) includes = [] path_name = str(node)[12:] - rc = subprocess.run([env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], - cwd='src/control', stdout=subprocess.PIPE, check=True) + rc = subprocess.run( + [env.d_go_bin, 'list', '--json', '-mod=vendor', path_name], + cwd='src/control', + stdout=subprocess.PIPE, + check=True, + ) data = json.loads(rc.stdout.decode('utf-8')) for dep in data['Deps']: if not dep.startswith('github.com/daos-stack/daos'): @@ -72,9 +76,16 @@ def _check_go_version(context): go_version = out.split(' ')[2].replace('go', '') if '-' in go_version: go_version = go_version.split('-')[0] - if len([x for x, y in - zip(go_version.split('.'), MIN_GO_VERSION.split('.')) - if int(x) < int(y)]) > 0: + if ( + len( + [ + x + for x, y in zip(go_version.split('.'), MIN_GO_VERSION.split('.')) + if int(x) < int(y) + ] + ) + > 0 + ): context.Result(f'{go_version} is too old (min supported: {MIN_GO_VERSION}) ') return 0 context.Result(str(go_version)) diff --git a/src/rdb/raft b/src/rdb/raft index efa15f46360..c354cd7fb1e 160000 --- a/src/rdb/raft +++ b/src/rdb/raft @@ -1 +1 @@ -Subproject commit efa15f46360078ff427562c53d23ed6f0e4a06ac +Subproject commit c354cd7fb1e1dbf3fbc9b3d24a6b05d0c5c9d5af From 6d954ee8238986c51d1f577ffe70673e9c61c79e Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Fri, 8 Dec 2023 16:56:10 +0000 Subject: [PATCH 23/26] Back out raft cahnge Required-githooks: true --- src/rdb/raft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rdb/raft b/src/rdb/raft index c354cd7fb1e..efa15f46360 160000 --- a/src/rdb/raft +++ b/src/rdb/raft @@ -1 +1 @@ -Subproject commit c354cd7fb1e1dbf3fbc9b3d24a6b05d0c5c9d5af +Subproject commit efa15f46360078ff427562c53d23ed6f0e4a06ac From a3d21e301fb7d3d5069d7db23c67185345ef4e32 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 30 Jan 2024 09:59:37 +0000 Subject: [PATCH 24/26] Back out conflict. Required-githooks: true Signed-off-by: Ashley Pittman --- src/control/SConscript | 73 ++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 45 deletions(-) diff --git a/src/control/SConscript b/src/control/SConscript index 19760040c82..933b090b442 100644 --- a/src/control/SConscript +++ b/src/control/SConscript @@ -38,7 +38,7 @@ def get_build_flags(benv): def gen_build_id(): """generate a unique build id per binary for use by RPM - https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" + https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" buildid = b2a_hex(urandom(20)) return '0x' + buildid.decode() @@ -48,13 +48,9 @@ def go_ldflags(): Import('daos_version', 'conf_dir') path = 'github.com/daos-stack/daos/src/control/build' - return ' '.join( - [ - f'-X {path}.DaosVersion={daos_version}', - f'-X {path}.ConfigDir={conf_dir}', - f'-B $({gen_build_id()}$)', - ] - ) + return ' '.join([f'-X {path}.DaosVersion={daos_version}', + f'-X {path}.ConfigDir={conf_dir}', + f'-B $({gen_build_id()}$)']) def install_go_bin(env, name, libs=None, install_man=False): @@ -74,16 +70,12 @@ def install_go_bin(env, name, libs=None, install_man=False): libs = [] libs.extend(['daos_common', 'cart', 'gurt']) - target = env.d_run_command( - name, - sources, - libs, - f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' - + f'-ldflags "{go_ldflags()}" ' - + f'{get_build_flags(env)} ' - + f'{get_build_tags(env)} ' - + f'-o {build_bin} {install_src}', - ) + target = env.d_run_command(name, sources, libs, + f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' + + f'-ldflags "{go_ldflags()}" ' + + f'{get_build_flags(env)} ' + + f'{get_build_tags(env)} ' + + f'-o {build_bin} {install_src}') env.Install('$PREFIX/bin', target) if install_man: gen_bin = join('$BUILD_DIR/src/control', name) @@ -106,7 +98,6 @@ def scons(): prefix = denv.subst("$PREFIX") sprefix = denv.subst("$SPDK_PREFIX") if sprefix not in ["", prefix]: - def install_dir(srcdir): """walk a directory and install targets""" for root, _dirs, files in os.walk(srcdir): @@ -130,17 +121,14 @@ def scons(): install_go_bin(denv, "hello_drpc") dbenv = denv.Clone() - dblibs = dbenv.subst( - "-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$BUILD_DIR/src/client/dfs " - "-L$BUILD_DIR/src/utils $_RPATH" - ) + dblibs = dbenv.subst("-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$BUILD_DIR/src/client/dfs " + "-L$BUILD_DIR/src/utils $_RPATH") dbenv.AppendENVPath("CGO_LDFLAGS", dblibs, sep=" ") - install_go_bin( - dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], install_man=True - ) + install_go_bin(dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], + install_man=True) if not prereqs.server_requested(): return @@ -163,25 +151,20 @@ def scons(): aenv.AppendUnique(LINKFLAGS=["-Wl,--no-as-needed"]) aenv.Replace(RPATH=[]) - cgolibdirs = aenv.subst( - "-L$BUILD_DIR/src/control/lib/spdk " - "-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$SPDK_PREFIX/lib " - "-L$OFI_PREFIX/lib $_RPATH" - ) + cgolibdirs = aenv.subst("-L$BUILD_DIR/src/control/lib/spdk " + "-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$SPDK_PREFIX/lib " + "-L$OFI_PREFIX/lib $_RPATH") # Explicitly link RTE & SPDK libs for CGO access - ldopts = ( - cgolibdirs - + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" - + " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" - ) + ldopts = cgolibdirs + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" + \ + " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" aenv.AppendENVPath("CGO_LDFLAGS", ldopts, sep=" ") - aenv.AppendENVPath( - "CGO_CFLAGS", senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), sep=" " - ) + aenv.AppendENVPath("CGO_CFLAGS", + senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), + sep=" ") # Sets CGO_LDFLAGS for rpath aenv.d_add_rpaths(None, True, True) From 631de8648df20dba3858068a607fa4c337c2a463 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 30 Jan 2024 10:00:22 +0000 Subject: [PATCH 25/26] Patch build file. Required-githooks: true Signed-off-by: Ashley Pittman --- src/control/SConscript | 77 ++++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 30 deletions(-) diff --git a/src/control/SConscript b/src/control/SConscript index 490ca2a94cd..13762f214f5 100644 --- a/src/control/SConscript +++ b/src/control/SConscript @@ -40,7 +40,7 @@ def get_build_flags(benv): def gen_build_id(): """generate a unique build id per binary for use by RPM - https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" + https://fedoraproject.org/wiki/PackagingDrafts/Go#Build_ID""" buildid = b2a_hex(urandom(20)) return '0x' + buildid.decode() @@ -54,11 +54,15 @@ def go_ldflags(benv): build_time = datetime.now(timezone.utc).astimezone().isoformat() Import('daos_version', 'conf_dir') path = 'github.com/daos-stack/daos/src/control/build' - return ' '.join([f'-X {path}.DaosVersion={daos_version}', - f'-X {path}.ConfigDir={conf_dir}', - f'-X {path}.BuildTime={build_time}', - f'-X {path}.BuildHost={build_host}', - f'-B $({gen_build_id()}$)']) + return ' '.join( + [ + f'-X {path}.DaosVersion={daos_version}', + f'-X {path}.ConfigDir={conf_dir}', + f'-X {path}.BuildTime={build_time}', + f'-X {path}.BuildHost={build_host}', + f'-B $({gen_build_id()}$)', + ] + ) def install_go_bin(env, name, libs=None, install_man=False): @@ -78,12 +82,16 @@ def install_go_bin(env, name, libs=None, install_man=False): libs = [] libs.extend(['daos_common', 'cart', 'gurt']) - target = env.d_run_command(name, sources, libs, - f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' - + f'-ldflags "{go_ldflags(env)}" ' - + f'{get_build_flags(env)} ' - + f'{get_build_tags(env)} ' - + f'-o {build_bin} {install_src}') + target = env.d_run_command( + name, + sources, + libs, + f'cd {gosrc}; {env.d_go_bin} build -mod vendor ' + + f'-ldflags "{go_ldflags(env)}" ' + + f'{get_build_flags(env)} ' + + f'{get_build_tags(env)} ' + + f'-o {build_bin} {install_src}', + ) env.Install('$PREFIX/bin', target) if install_man: gen_bin = join('$BUILD_DIR/src/control', name) @@ -106,6 +114,7 @@ def scons(): prefix = denv.subst("$PREFIX") sprefix = denv.subst("$SPDK_PREFIX") if sprefix not in ["", prefix]: + def install_dir(srcdir): """walk a directory and install targets""" for root, _dirs, files in os.walk(srcdir): @@ -129,14 +138,17 @@ def scons(): install_go_bin(denv, "hello_drpc") dbenv = denv.Clone() - dblibs = dbenv.subst("-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$BUILD_DIR/src/client/dfs " - "-L$BUILD_DIR/src/utils $_RPATH") + dblibs = dbenv.subst( + "-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$BUILD_DIR/src/client/dfs " + "-L$BUILD_DIR/src/utils $_RPATH" + ) dbenv.AppendENVPath("CGO_LDFLAGS", dblibs, sep=" ") - install_go_bin(dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], - install_man=True) + install_go_bin( + dbenv, 'daos', libs=['daos_cmd_hdlrs', 'dfs', 'duns', 'daos'], install_man=True + ) if not prereqs.server_requested(): return @@ -159,20 +171,25 @@ def scons(): aenv.AppendUnique(LINKFLAGS=["-Wl,--no-as-needed"]) aenv.Replace(RPATH=[]) - cgolibdirs = aenv.subst("-L$BUILD_DIR/src/control/lib/spdk " - "-L$BUILD_DIR/src/gurt " - "-L$BUILD_DIR/src/cart " - "-L$BUILD_DIR/src/common " - "-L$SPDK_PREFIX/lib " - "-L$OFI_PREFIX/lib $_RPATH") + cgolibdirs = aenv.subst( + "-L$BUILD_DIR/src/control/lib/spdk " + "-L$BUILD_DIR/src/gurt " + "-L$BUILD_DIR/src/cart " + "-L$BUILD_DIR/src/common " + "-L$SPDK_PREFIX/lib " + "-L$OFI_PREFIX/lib $_RPATH" + ) # Explicitly link RTE & SPDK libs for CGO access - ldopts = cgolibdirs + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" + \ - " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" + ldopts = ( + cgolibdirs + + " -lspdk_env_dpdk -lspdk_nvme -lspdk_vmd -lrte_mempool" + + " -lrte_mempool_ring -lrte_bus_pci -lnvme_control -lnuma -ldl" + ) aenv.AppendENVPath("CGO_LDFLAGS", ldopts, sep=" ") - aenv.AppendENVPath("CGO_CFLAGS", - senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), - sep=" ") + aenv.AppendENVPath( + "CGO_CFLAGS", senv.subst("-I$SPDK_PREFIX/include -I$OFI_PREFIX/include"), sep=" " + ) # Sets CGO_LDFLAGS for rpath aenv.d_add_rpaths(None, True, True) From c02152fe849897f54f8b4f812c8d44dcde64fd96 Mon Sep 17 00:00:00 2001 From: Ashley Pittman Date: Tue, 30 Jan 2024 12:53:37 +0000 Subject: [PATCH 26/26] Update to 2024 stlye. Required-githooks: true Signed-off-by: Ashley Pittman --- site_scons/site_tools/doneapi.py | 1 + src/client/dfuse/SConscript | 1 + src/client/pydaos/SConscript | 1 + src/client/pydaos/raw/conversion.py | 3 ++- src/client/pydaos/raw/daos_api.py | 3 ++- src/client/pydaos/raw/daos_cref.py | 3 ++- src/client/setup.py | 1 + src/common/SConscript | 1 - src/control/SConscript | 1 + src/control/lib/spdk/SConscript | 1 + src/control/lib/spdk/ctests/SConscript | 1 + src/utils/ctl/SConscript | 1 - src/utils/self_test/SConscript | 1 - src/vos/storage_estimator/SConscript | 1 + src/vos/storage_estimator/common/__init__.py | 3 ++- src/vos/storage_estimator/common/tests/__init__.py | 1 + .../storage_estimator/common/tests/storage_estimator_test.py | 3 ++- src/vos/storage_estimator/common/tests/util.py | 3 ++- src/vos/storage_estimator/common/util.py | 3 +-- src/vos/storage_estimator/common/vos_size.py | 3 ++- utils/cq/requirements.txt | 1 + 21 files changed, 25 insertions(+), 12 deletions(-) diff --git a/site_scons/site_tools/doneapi.py b/site_scons/site_tools/doneapi.py index b419b10c456..aa90d598e93 100644 --- a/site_scons/site_tools/doneapi.py +++ b/site_scons/site_tools/doneapi.py @@ -3,6 +3,7 @@ Hack to support oneapi version of Intel compilers """ + import os import sys diff --git a/src/client/dfuse/SConscript b/src/client/dfuse/SConscript index c6498a6b9cb..db964a0b52c 100644 --- a/src/client/dfuse/SConscript +++ b/src/client/dfuse/SConscript @@ -1,4 +1,5 @@ """Build DFuse""" + import os HEADERS = ['ioil_io.h', 'ioil_defines.h', 'ioil_api.h', 'ioil.h'] diff --git a/src/client/pydaos/SConscript b/src/client/pydaos/SConscript index adb929f342f..76ffacc1e15 100644 --- a/src/client/pydaos/SConscript +++ b/src/client/pydaos/SConscript @@ -1,4 +1,5 @@ """Build pydaos client""" + import sys diff --git a/src/client/pydaos/raw/conversion.py b/src/client/pydaos/raw/conversion.py index c9f68efc82e..318e001205e 100644 --- a/src/client/pydaos/raw/conversion.py +++ b/src/client/pydaos/raw/conversion.py @@ -1,8 +1,9 @@ """ - (C) Copyright 2018-2023 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ + # pylint: disable=consider-using-f-string import ctypes diff --git a/src/client/pydaos/raw/daos_api.py b/src/client/pydaos/raw/daos_api.py index f31ed7b510d..123eceb636a 100644 --- a/src/client/pydaos/raw/daos_api.py +++ b/src/client/pydaos/raw/daos_api.py @@ -1,8 +1,9 @@ """ - (C) Copyright 2018-2023 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ + # pylint: disable=too-many-lines # pylint: disable=raise-missing-from # pylint: disable=consider-using-f-string diff --git a/src/client/pydaos/raw/daos_cref.py b/src/client/pydaos/raw/daos_cref.py index cf85325f315..9ae35083128 100644 --- a/src/client/pydaos/raw/daos_cref.py +++ b/src/client/pydaos/raw/daos_cref.py @@ -1,8 +1,9 @@ """ - (C) Copyright 2018-2023 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ + # pylint: disable=too-few-public-methods # pylint: disable=pylint-missing-docstring import ctypes diff --git a/src/client/setup.py b/src/client/setup.py index 0f4a016dc28..48d23a7f0a8 100644 --- a/src/client/setup.py +++ b/src/client/setup.py @@ -8,6 +8,7 @@ If run from within a compiled DAOS source tree this it will detect the install path automatically, otherwise it'll use the defaults. """ + import json import os diff --git a/src/common/SConscript b/src/common/SConscript index c3cce4a2482..46aa63fcdac 100644 --- a/src/common/SConscript +++ b/src/common/SConscript @@ -1,6 +1,5 @@ """Build common libraries""" - COMMON_FILES = [ 'debug.c', 'mem.c', diff --git a/src/control/SConscript b/src/control/SConscript index 13762f214f5..08c0a254e21 100644 --- a/src/control/SConscript +++ b/src/control/SConscript @@ -1,4 +1,5 @@ """Build DAOS Control Plane""" + # pylint: disable=too-many-locals import os import socket diff --git a/src/control/lib/spdk/SConscript b/src/control/lib/spdk/SConscript index bfa92ae0909..ed86eb0ed42 100644 --- a/src/control/lib/spdk/SConscript +++ b/src/control/lib/spdk/SConscript @@ -1,4 +1,5 @@ """Build DAOS SPDK Go bindings""" + from os.path import join diff --git a/src/control/lib/spdk/ctests/SConscript b/src/control/lib/spdk/ctests/SConscript index 16a9bf21cb3..4e904ea3955 100644 --- a/src/control/lib/spdk/ctests/SConscript +++ b/src/control/lib/spdk/ctests/SConscript @@ -1,4 +1,5 @@ """Build go-spdk bindings C tests""" + import os diff --git a/src/utils/ctl/SConscript b/src/utils/ctl/SConscript index 4d71af3a406..78936cba883 100644 --- a/src/utils/ctl/SConscript +++ b/src/utils/ctl/SConscript @@ -1,6 +1,5 @@ """Build cart_ctl test""" - SRC = ['cart_ctl.c'] diff --git a/src/utils/self_test/SConscript b/src/utils/self_test/SConscript index 50ce71545f0..c22712a1c6f 100644 --- a/src/utils/self_test/SConscript +++ b/src/utils/self_test/SConscript @@ -1,6 +1,5 @@ """Build self test""" - SELF_TEST = 'self_test.c' diff --git a/src/vos/storage_estimator/SConscript b/src/vos/storage_estimator/SConscript index 88f6d38f418..27e4cad0e20 100644 --- a/src/vos/storage_estimator/SConscript +++ b/src/vos/storage_estimator/SConscript @@ -1,4 +1,5 @@ """Install DAOS Storage Estimator""" + import sys diff --git a/src/vos/storage_estimator/common/__init__.py b/src/vos/storage_estimator/common/__init__.py index da88d621ae3..1cee4e530a6 100644 --- a/src/vos/storage_estimator/common/__init__.py +++ b/src/vos/storage_estimator/common/__init__.py @@ -1,6 +1,7 @@ ''' - (C) Copyright 2018-2021 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' + __all__ = ['dfs_sb', 'explorer', 'parse_csv', 'vos_size', 'vos_structures', 'util'] diff --git a/src/vos/storage_estimator/common/tests/__init__.py b/src/vos/storage_estimator/common/tests/__init__.py index 3e90533aad0..06864af03f9 100644 --- a/src/vos/storage_estimator/common/tests/__init__.py +++ b/src/vos/storage_estimator/common/tests/__init__.py @@ -3,4 +3,5 @@ SPDX-License-Identifier: BSD-2-Clause-Patent ''' + __all__ = ['util'] diff --git a/src/vos/storage_estimator/common/tests/storage_estimator_test.py b/src/vos/storage_estimator/common/tests/storage_estimator_test.py index 4375daad37b..07f45b1dd6a 100644 --- a/src/vos/storage_estimator/common/tests/storage_estimator_test.py +++ b/src/vos/storage_estimator/common/tests/storage_estimator_test.py @@ -1,8 +1,9 @@ ''' - (C) Copyright 2018-2023 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' + import os import unittest diff --git a/src/vos/storage_estimator/common/tests/util.py b/src/vos/storage_estimator/common/tests/util.py index bce472f97c6..fd6a6676aa6 100644 --- a/src/vos/storage_estimator/common/tests/util.py +++ b/src/vos/storage_estimator/common/tests/util.py @@ -1,8 +1,9 @@ ''' - (C) Copyright 2018-2023 Intel Corporation. + (C) Copyright 2018-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' + import os import shutil import tempfile diff --git a/src/vos/storage_estimator/common/util.py b/src/vos/storage_estimator/common/util.py index 448ab1c9b30..4fe5505db17 100644 --- a/src/vos/storage_estimator/common/util.py +++ b/src/vos/storage_estimator/common/util.py @@ -1,10 +1,9 @@ ''' - (C) Copyright 2020-2023 Intel Corporation. + (C) Copyright 2020-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' - import os import yaml diff --git a/src/vos/storage_estimator/common/vos_size.py b/src/vos/storage_estimator/common/vos_size.py index c8cad3630fd..b569cf500c7 100644 --- a/src/vos/storage_estimator/common/vos_size.py +++ b/src/vos/storage_estimator/common/vos_size.py @@ -1,8 +1,9 @@ ''' - (C) Copyright 2019-2023 Intel Corporation. + (C) Copyright 2019-2024 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' + import math import random diff --git a/utils/cq/requirements.txt b/utils/cq/requirements.txt index 008c6c31651..c238531946f 100644 --- a/utils/cq/requirements.txt +++ b/utils/cq/requirements.txt @@ -14,3 +14,4 @@ isort==5.13.2 pylint==3.0.3 yamllint==1.33.0 codespell==2.2.6 +black